summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2018-09-21 09:14:51 +0200
committerMichaël Zasso <targos@protonmail.com>2018-09-22 18:29:25 +0200
commit0e7ddbd3d7e9439c67573b854c49cf82c398ae82 (patch)
tree2afe372acde921cb57ddb3444ff00c5adef8848c /deps
parent13245dc50da4cb7443c39ef6c68d419d5e6336d4 (diff)
downloadandroid-node-v8-0e7ddbd3d7e9439c67573b854c49cf82c398ae82.tar.gz
android-node-v8-0e7ddbd3d7e9439c67573b854c49cf82c398ae82.tar.bz2
android-node-v8-0e7ddbd3d7e9439c67573b854c49cf82c398ae82.zip
deps: update V8 to 7.0.276.20
PR-URL: https://github.com/nodejs/node/pull/22754 Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: James M Snell <jasnell@gmail.com> Reviewed-By: Refael Ackermann <refack@gmail.com> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/.gitignore2
-rw-r--r--deps/v8/.vpython2
-rw-r--r--deps/v8/AUTHORS2
-rw-r--r--deps/v8/BUILD.gn204
-rw-r--r--deps/v8/ChangeLog1515
-rw-r--r--deps/v8/DEPS62
-rw-r--r--deps/v8/include/OWNERS2
-rw-r--r--deps/v8/include/libplatform/libplatform.h5
-rw-r--r--deps/v8/include/v8-inspector.h2
-rw-r--r--deps/v8/include/v8-profiler.h20
-rw-r--r--deps/v8/include/v8-version.h8
-rw-r--r--deps/v8/include/v8.h422
-rw-r--r--deps/v8/include/v8config.h2
-rw-r--r--deps/v8/infra/config/cq.cfg4
-rw-r--r--deps/v8/infra/mb/mb_config.pyl41
-rw-r--r--deps/v8/infra/testing/PRESUBMIT.py2
-rw-r--r--deps/v8/infra/testing/builders.pyl20
-rw-r--r--deps/v8/src/DEPS2
-rw-r--r--deps/v8/src/accessors.cc16
-rw-r--r--deps/v8/src/accessors.h4
-rw-r--r--deps/v8/src/api-arguments-inl.h24
-rw-r--r--deps/v8/src/api-arguments.cc54
-rw-r--r--deps/v8/src/api-arguments.h58
-rw-r--r--deps/v8/src/api-inl.h140
-rw-r--r--deps/v8/src/api-natives.cc2
-rw-r--r--deps/v8/src/api-natives.h1
-rw-r--r--deps/v8/src/api.cc920
-rw-r--r--deps/v8/src/api.h138
-rw-r--r--deps/v8/src/arguments-inl.h28
-rw-r--r--deps/v8/src/arguments.h20
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h108
-rw-r--r--deps/v8/src/arm/assembler-arm.cc144
-rw-r--r--deps/v8/src/arm/assembler-arm.h31
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc9
-rw-r--r--deps/v8/src/arm/codegen-arm.cc4
-rw-r--r--deps/v8/src/arm/constants-arm.h35
-rw-r--r--deps/v8/src/arm/disasm-arm.cc33
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc24
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc119
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h47
-rw-r--r--deps/v8/src/arm/simulator-arm.cc148
-rw-r--r--deps/v8/src/arm/simulator-arm.h23
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h35
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc53
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h30
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc4
-rw-r--r--deps/v8/src/arm64/constants-arm64.h40
-rw-r--r--deps/v8/src/arm64/decoder-arm64-inl.h10
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc2
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc18
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc24
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h27
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc24
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h18
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc244
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h57
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc4
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h2
-rw-r--r--deps/v8/src/asmjs/asm-js.cc43
-rw-r--r--deps/v8/src/assembler-arch-inl.h30
-rw-r--r--deps/v8/src/assembler.cc530
-rw-r--r--deps/v8/src/assembler.h423
-rw-r--r--deps/v8/src/assert-scope.h6
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc1
-rw-r--r--deps/v8/src/ast/ast.h103
-rw-r--r--deps/v8/src/ast/prettyprinter.cc31
-rw-r--r--deps/v8/src/ast/prettyprinter.h10
-rw-r--r--deps/v8/src/ast/scopes.cc25
-rw-r--r--deps/v8/src/ast/scopes.h10
-rw-r--r--deps/v8/src/async-hooks-wrapper.cc56
-rw-r--r--deps/v8/src/async-hooks-wrapper.h3
-rw-r--r--deps/v8/src/bailout-reason.cc5
-rw-r--r--deps/v8/src/bailout-reason.h7
-rw-r--r--deps/v8/src/base/compiler-specific.h11
-rw-r--r--deps/v8/src/base/macros.h38
-rw-r--r--deps/v8/src/base/optional.h30
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc10
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc9
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc6
-rw-r--r--deps/v8/src/base/template-utils.h10
-rw-r--r--deps/v8/src/basic-block-profiler.cc59
-rw-r--r--deps/v8/src/basic-block-profiler.h11
-rw-r--r--deps/v8/src/bootstrapper.cc378
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc6
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc16
-rw-r--r--deps/v8/src/builtins/array-copywithin.tq93
-rw-r--r--deps/v8/src/builtins/array-foreach.tq53
-rw-r--r--deps/v8/src/builtins/array-reverse.tq190
-rw-r--r--deps/v8/src/builtins/array-sort.tq8
-rw-r--r--deps/v8/src/builtins/array.tq33
-rw-r--r--deps/v8/src/builtins/base.tq158
-rw-r--r--deps/v8/src/builtins/builtins-api.cc5
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc446
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.h11
-rw-r--r--deps/v8/src/builtins/builtins-array.cc198
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc26
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc16
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-bigint.cc34
-rw-r--r--deps/v8/src/builtins/builtins-boolean.cc25
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc37
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc4
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc236
-rw-r--r--deps/v8/src/builtins/builtins-collections.cc2
-rw-r--r--deps/v8/src/builtins/builtins-console.cc5
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc9
-rw-r--r--deps/v8/src/builtins/builtins-constructor.h1
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc137
-rw-r--r--deps/v8/src/builtins/builtins-date.cc134
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h120
-rw-r--r--deps/v8/src/builtins/builtins-descriptors.h2
-rw-r--r--deps/v8/src/builtins/builtins-error.cc6
-rw-r--r--deps/v8/src/builtins/builtins-function-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-function.cc4
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc15
-rw-r--r--deps/v8/src/builtins/builtins-global.cc2
-rw-r--r--deps/v8/src/builtins/builtins-ic-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc52
-rw-r--r--deps/v8/src/builtins/builtins-intl-gen.cc80
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc799
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc65
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h7
-rw-r--r--deps/v8/src/builtins/builtins-json.cc6
-rw-r--r--deps/v8/src/builtins/builtins-number.cc13
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc23
-rw-r--r--deps/v8/src/builtins/builtins-object.cc8
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc17
-rw-r--r--deps/v8/src/builtins/builtins-promise.cc2
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.h1
-rw-r--r--deps/v8/src/builtins/builtins-reflect-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-reflect.cc3
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc855
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h51
-rw-r--r--deps/v8/src/builtins/builtins-regexp.cc4
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc5
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc43
-rw-r--r--deps/v8/src/builtins/builtins-string.cc20
-rw-r--r--deps/v8/src/builtins/builtins-symbol.cc22
-rw-r--r--deps/v8/src/builtins/builtins-trace.cc14
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc180
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h4
-rw-r--r--deps/v8/src/builtins/builtins-typed-array.cc7
-rw-r--r--deps/v8/src/builtins/builtins-utils-inl.h35
-rw-r--r--deps/v8/src/builtins/builtins-utils.h36
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins.cc87
-rw-r--r--deps/v8/src/builtins/builtins.h17
-rw-r--r--deps/v8/src/builtins/constants-table-builder.cc4
-rw-r--r--deps/v8/src/builtins/data-view.tq204
-rw-r--r--deps/v8/src/builtins/growable-fixed-array-gen.cc4
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc50
-rw-r--r--deps/v8/src/builtins/mips/OWNERS5
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc11
-rw-r--r--deps/v8/src/builtins/mips64/OWNERS5
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc11
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc10
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc6
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc63
-rw-r--r--deps/v8/src/builtins/typed-array.tq41
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc4
-rw-r--r--deps/v8/src/code-factory.h1
-rw-r--r--deps/v8/src/code-stub-assembler.cc1011
-rw-r--r--deps/v8/src/code-stub-assembler.h239
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc31
-rw-r--r--deps/v8/src/compiler.cc50
-rw-r--r--deps/v8/src/compiler.h12
-rw-r--r--deps/v8/src/compiler/access-builder.cc13
-rw-r--r--deps/v8/src/compiler/access-info.cc22
-rw-r--r--deps/v8/src/compiler/access-info.h6
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc136
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h547
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc31
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc234
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc44
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h2
-rw-r--r--deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc2
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc135
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.cc22
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc26
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h7
-rw-r--r--deps/v8/src/compiler/code-assembler.cc55
-rw-r--r--deps/v8/src/compiler/code-assembler.h34
-rw-r--r--deps/v8/src/compiler/code-generator.cc32
-rw-r--r--deps/v8/src/compiler/code-generator.h7
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc4
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h6
-rw-r--r--deps/v8/src/compiler/common-operator.h2
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc249
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h28
-rw-r--r--deps/v8/src/compiler/constant-folding-reducer.cc4
-rw-r--r--deps/v8/src/compiler/constant-folding-reducer.h6
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc501
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h2
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc3
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc22
-rw-r--r--deps/v8/src/compiler/graph-assembler.h10
-rw-r--r--deps/v8/src/compiler/graph-visualizer.h1
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc208
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h711
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc35
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc384
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc114
-rw-r--r--deps/v8/src/compiler/instruction-selector.h2
-rw-r--r--deps/v8/src/compiler/instruction.h2
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc136
-rw-r--r--deps/v8/src/compiler/int64-lowering.h3
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc689
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h7
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc3
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h5
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc287
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h28
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc19
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc1410
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h250
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc85
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.h38
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h2
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc78
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h12
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc80
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h7
-rw-r--r--deps/v8/src/compiler/js-operator.cc46
-rw-r--r--deps/v8/src/compiler/js-operator.h35
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc60
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h6
-rw-r--r--deps/v8/src/compiler/linkage.cc2
-rw-r--r--deps/v8/src/compiler/load-elimination.cc63
-rw-r--r--deps/v8/src/compiler/load-elimination.h1
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.cc3
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc5
-rw-r--r--deps/v8/src/compiler/machine-graph.cc1
-rw-r--r--deps/v8/src/compiler/machine-graph.h2
-rw-r--r--deps/v8/src/compiler/machine-operator.cc377
-rw-r--r--deps/v8/src/compiler/machine-operator.h70
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc2
-rw-r--r--deps/v8/src/compiler/mips/OWNERS5
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc17
-rw-r--r--deps/v8/src/compiler/mips64/OWNERS5
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc20
-rw-r--r--deps/v8/src/compiler/node-matchers.h2
-rw-r--r--deps/v8/src/compiler/node-properties.cc2
-rw-r--r--deps/v8/src/compiler/opcodes.h221
-rw-r--r--deps/v8/src/compiler/operation-typer.cc22
-rw-r--r--deps/v8/src/compiler/operation-typer.h5
-rw-r--r--deps/v8/src/compiler/operator-properties.cc2
-rw-r--r--deps/v8/src/compiler/pipeline.cc80
-rw-r--r--deps/v8/src/compiler/pipeline.h2
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc40
-rw-r--r--deps/v8/src/compiler/ppc/instruction-codes-ppc.h4
-rw-r--r--deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc2
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc19
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc3
-rw-r--r--deps/v8/src/compiler/property-access-builder.h6
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h8
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc1
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc20
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc96
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h4
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc66
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h10
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc2
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h6
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc53
-rw-r--r--deps/v8/src/compiler/simplified-operator.h30
-rw-r--r--deps/v8/src/compiler/type-narrowing-reducer.cc2
-rw-r--r--deps/v8/src/compiler/type-narrowing-reducer.h2
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc6
-rw-r--r--deps/v8/src/compiler/typed-optimization.h6
-rw-r--r--deps/v8/src/compiler/typer.cc323
-rw-r--r--deps/v8/src/compiler/typer.h6
-rw-r--r--deps/v8/src/compiler/types.cc38
-rw-r--r--deps/v8/src/compiler/types.h9
-rw-r--r--deps/v8/src/compiler/verifier.cc18
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc830
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h59
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc10
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h2
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc2
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc92
-rw-r--r--deps/v8/src/contexts-inl.h17
-rw-r--r--deps/v8/src/contexts.cc6
-rw-r--r--deps/v8/src/contexts.h126
-rw-r--r--deps/v8/src/counters.h6
-rw-r--r--deps/v8/src/d8-posix.cc2
-rw-r--r--deps/v8/src/d8.cc70
-rw-r--r--deps/v8/src/d8.h62
-rw-r--r--deps/v8/src/debug/arm/OWNERS1
-rw-r--r--deps/v8/src/debug/arm64/OWNERS1
-rw-r--r--deps/v8/src/debug/debug-coverage.cc10
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc217
-rw-r--r--deps/v8/src/debug/debug-scope-iterator.cc3
-rw-r--r--deps/v8/src/debug/debug-scopes.cc1
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc5
-rw-r--r--deps/v8/src/debug/debug.cc38
-rw-r--r--deps/v8/src/debug/debug.h28
-rw-r--r--deps/v8/src/debug/liveedit.cc197
-rw-r--r--deps/v8/src/debug/mips/OWNERS5
-rw-r--r--deps/v8/src/debug/mips64/OWNERS5
-rw-r--r--deps/v8/src/deoptimizer.cc30
-rw-r--r--deps/v8/src/deoptimizer.h1
-rw-r--r--deps/v8/src/disasm.h33
-rw-r--r--deps/v8/src/disassembler.cc79
-rw-r--r--deps/v8/src/disassembler.h2
-rw-r--r--deps/v8/src/double.h24
-rw-r--r--deps/v8/src/elements-inl.h38
-rw-r--r--deps/v8/src/elements-kind.cc45
-rw-r--r--deps/v8/src/elements-kind.h30
-rw-r--r--deps/v8/src/elements.cc111
-rw-r--r--deps/v8/src/elements.h23
-rw-r--r--deps/v8/src/execution.cc2
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc13
-rw-r--r--deps/v8/src/external-reference-table.cc12
-rw-r--r--deps/v8/src/external-reference-table.h19
-rw-r--r--deps/v8/src/external-reference.cc37
-rw-r--r--deps/v8/src/external-reference.h8
-rw-r--r--deps/v8/src/feedback-vector-inl.h4
-rw-r--r--deps/v8/src/feedback-vector.cc140
-rw-r--r--deps/v8/src/feedback-vector.h27
-rw-r--r--deps/v8/src/field-index-inl.h26
-rw-r--r--deps/v8/src/field-index.h1
-rw-r--r--deps/v8/src/field-type.cc17
-rw-r--r--deps/v8/src/field-type.h9
-rw-r--r--deps/v8/src/flag-definitions.h92
-rw-r--r--deps/v8/src/frames-inl.h26
-rw-r--r--deps/v8/src/frames.cc72
-rw-r--r--deps/v8/src/futex-emulation.cc8
-rw-r--r--deps/v8/src/futex-emulation.h7
-rw-r--r--deps/v8/src/gdb-jit.cc145
-rw-r--r--deps/v8/src/global-handles.cc24
-rw-r--r--deps/v8/src/global-handles.h5
-rw-r--r--deps/v8/src/globals.h35
-rw-r--r--deps/v8/src/handler-table.cc30
-rw-r--r--deps/v8/src/handles-inl.h73
-rw-r--r--deps/v8/src/handles.cc9
-rw-r--r--deps/v8/src/handles.h115
-rw-r--r--deps/v8/src/heap-symbols.h15
-rw-r--r--deps/v8/src/heap/array-buffer-collector.cc1
-rw-r--r--deps/v8/src/heap/array-buffer-collector.h2
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h1
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc11
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h2
-rw-r--r--deps/v8/src/heap/code-stats.cc2
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc22
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc19
-rw-r--r--deps/v8/src/heap/embedder-tracing.h21
-rw-r--r--deps/v8/src/heap/factory-inl.h12
-rw-r--r--deps/v8/src/heap/factory.cc287
-rw-r--r--deps/v8/src/heap/factory.h40
-rw-r--r--deps/v8/src/heap/gc-tracer.cc2
-rw-r--r--deps/v8/src/heap/heap-controller.cc75
-rw-r--r--deps/v8/src/heap/heap-controller.h71
-rw-r--r--deps/v8/src/heap/heap-inl.h84
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h157
-rw-r--r--deps/v8/src/heap/heap-write-barrier.h51
-rw-r--r--deps/v8/src/heap/heap.cc448
-rw-r--r--deps/v8/src/heap/heap.h69
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h16
-rw-r--r--deps/v8/src/heap/incremental-marking.cc19
-rw-r--r--deps/v8/src/heap/incremental-marking.h8
-rw-r--r--deps/v8/src/heap/invalidated-slots-inl.h6
-rw-r--r--deps/v8/src/heap/invalidated-slots.cc9
-rw-r--r--deps/v8/src/heap/invalidated-slots.h1
-rw-r--r--deps/v8/src/heap/local-allocator-inl.h109
-rw-r--r--deps/v8/src/heap/local-allocator.h98
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h26
-rw-r--r--deps/v8/src/heap/mark-compact.cc94
-rw-r--r--deps/v8/src/heap/mark-compact.h11
-rw-r--r--deps/v8/src/heap/object-stats.cc75
-rw-r--r--deps/v8/src/heap/object-stats.h8
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h1
-rw-r--r--deps/v8/src/heap/objects-visiting.h79
-rw-r--r--deps/v8/src/heap/remembered-set.h5
-rw-r--r--deps/v8/src/heap/scavenger-inl.h5
-rw-r--r--deps/v8/src/heap/scavenger.cc6
-rw-r--r--deps/v8/src/heap/scavenger.h4
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc59
-rw-r--r--deps/v8/src/heap/spaces-inl.h7
-rw-r--r--deps/v8/src/heap/spaces.cc49
-rw-r--r--deps/v8/src/heap/spaces.h30
-rw-r--r--deps/v8/src/heap/store-buffer-inl.h36
-rw-r--r--deps/v8/src/heap/store-buffer.cc71
-rw-r--r--deps/v8/src/heap/store-buffer.h86
-rw-r--r--deps/v8/src/heap/sweeper.cc4
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h31
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc76
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h61
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc37
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc4
-rw-r--r--deps/v8/src/ia32/constants-ia32.h4
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc2
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc53
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc33
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc255
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h89
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc335
-rw-r--r--deps/v8/src/ic/accessor-assembler.h20
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h4
-rw-r--r--deps/v8/src/ic/handler-configuration.cc63
-rw-r--r--deps/v8/src/ic/handler-configuration.h16
-rw-r--r--deps/v8/src/ic/ic-inl.h8
-rw-r--r--deps/v8/src/ic/ic.cc298
-rw-r--r--deps/v8/src/ic/ic.h10
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc97
-rw-r--r--deps/v8/src/ic/keyed-store-generic.h7
-rw-r--r--deps/v8/src/inspector/DEPS3
-rw-r--r--deps/v8/src/inspector/injected-script-source.js10
-rw-r--r--deps/v8/src/inspector/injected-script.cc5
-rw-r--r--deps/v8/src/inspector/string-16.cc7
-rw-r--r--deps/v8/src/inspector/string-16.h6
-rw-r--r--deps/v8/src/inspector/string-util.cc10
-rw-r--r--deps/v8/src/inspector/string-util.h4
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc14
-rw-r--r--deps/v8/src/inspector/v8-console.cc26
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc29
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc15
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc4
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc11
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.cc5
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.cc11
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h2
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc16
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h2
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.cc20
-rw-r--r--deps/v8/src/inspector/v8-regex.cc2
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc12
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc10
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h2
-rw-r--r--deps/v8/src/inspector/v8-value-utils.cc9
-rw-r--r--deps/v8/src/inspector/wasm-translation.cc3
-rw-r--r--deps/v8/src/interface-descriptors.cc63
-rw-r--r--deps/v8/src/interface-descriptors.h38
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc7
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc198
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h11
-rw-r--r--deps/v8/src/interpreter/bytecodes.h2
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h1
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc50
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h8
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc61
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.h3
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc19
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h3
-rw-r--r--deps/v8/src/interpreter/setup-interpreter-internal.cc6
-rw-r--r--deps/v8/src/intl.cc56
-rw-r--r--deps/v8/src/intl.h24
-rw-r--r--deps/v8/src/isolate-inl.h15
-rw-r--r--deps/v8/src/isolate.cc142
-rw-r--r--deps/v8/src/isolate.h129
-rw-r--r--deps/v8/src/js/array.js210
-rw-r--r--deps/v8/src/js/intl.js894
-rw-r--r--deps/v8/src/js/macros.py6
-rw-r--r--deps/v8/src/js/typedarray.js6
-rw-r--r--deps/v8/src/json-stringifier.cc122
-rw-r--r--deps/v8/src/json-stringifier.h111
-rw-r--r--deps/v8/src/keys.cc7
-rw-r--r--deps/v8/src/keys.h4
-rw-r--r--deps/v8/src/label.h8
-rw-r--r--deps/v8/src/layout-descriptor-inl.h12
-rw-r--r--deps/v8/src/layout-descriptor.cc2
-rw-r--r--deps/v8/src/layout-descriptor.h7
-rw-r--r--deps/v8/src/libplatform/default-platform.cc3
-rw-r--r--deps/v8/src/libplatform/tracing/tracing-controller.cc58
-rw-r--r--deps/v8/src/libsampler/sampler.cc7
-rw-r--r--deps/v8/src/log-utils.cc149
-rw-r--r--deps/v8/src/log-utils.h42
-rw-r--r--deps/v8/src/log.cc49
-rw-r--r--deps/v8/src/log.h2
-rw-r--r--deps/v8/src/lookup-inl.h144
-rw-r--r--deps/v8/src/lookup.cc4
-rw-r--r--deps/v8/src/lookup.h125
-rw-r--r--deps/v8/src/map-updater.cc28
-rw-r--r--deps/v8/src/map-updater.h14
-rw-r--r--deps/v8/src/maybe-handles-inl.h86
-rw-r--r--deps/v8/src/maybe-handles.h120
-rw-r--r--deps/v8/src/messages.cc20
-rw-r--r--deps/v8/src/messages.h11
-rw-r--r--deps/v8/src/mips/OWNERS5
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h39
-rw-r--r--deps/v8/src/mips/assembler-mips.cc214
-rw-r--r--deps/v8/src/mips/assembler-mips.h27
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc4
-rw-r--r--deps/v8/src/mips/codegen-mips.cc4
-rw-r--r--deps/v8/src/mips/constants-mips.h37
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc10
-rw-r--r--deps/v8/src/mips/disasm-mips.cc15
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc24
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc208
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h28
-rw-r--r--deps/v8/src/mips/simulator-mips.cc68
-rw-r--r--deps/v8/src/mips/simulator-mips.h2
-rw-r--r--deps/v8/src/mips64/OWNERS5
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h22
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc212
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h26
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc2
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc4
-rw-r--r--deps/v8/src/mips64/constants-mips64.h33
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc8
-rw-r--r--deps/v8/src/mips64/disasm-mips64.cc28
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc24
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc142
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h28
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc66
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h2
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h16
-rw-r--r--deps/v8/src/objects-body-descriptors.h37
-rw-r--r--deps/v8/src/objects-debug.cc146
-rw-r--r--deps/v8/src/objects-definitions.h4
-rw-r--r--deps/v8/src/objects-inl.h389
-rw-r--r--deps/v8/src/objects-printer.cc127
-rw-r--r--deps/v8/src/objects.cc1246
-rw-r--r--deps/v8/src/objects.h503
-rw-r--r--deps/v8/src/objects/api-callbacks.h3
-rw-r--r--deps/v8/src/objects/arguments-inl.h5
-rw-r--r--deps/v8/src/objects/arguments.h1
-rw-r--r--deps/v8/src/objects/bigint.cc17
-rw-r--r--deps/v8/src/objects/code-inl.h32
-rw-r--r--deps/v8/src/objects/code.h43
-rw-r--r--deps/v8/src/objects/compilation-cache-inl.h5
-rw-r--r--deps/v8/src/objects/data-handler-inl.h15
-rw-r--r--deps/v8/src/objects/data-handler.h4
-rw-r--r--deps/v8/src/objects/debug-objects-inl.h2
-rw-r--r--deps/v8/src/objects/debug-objects.h10
-rw-r--r--deps/v8/src/objects/descriptor-array.h4
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h76
-rw-r--r--deps/v8/src/objects/fixed-array.h157
-rw-r--r--deps/v8/src/objects/frame-array.h4
-rw-r--r--deps/v8/src/objects/hash-table-inl.h6
-rw-r--r--deps/v8/src/objects/hash-table.h2
-rw-r--r--deps/v8/src/objects/intl-objects-inl.h2
-rw-r--r--deps/v8/src/objects/intl-objects.cc1851
-rw-r--r--deps/v8/src/objects/intl-objects.h275
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h210
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc310
-rw-r--r--deps/v8/src/objects/js-array-buffer.h230
-rw-r--r--deps/v8/src/objects/js-array-inl.h190
-rw-r--r--deps/v8/src/objects/js-array.h231
-rw-r--r--deps/v8/src/objects/js-collator-inl.h43
-rw-r--r--deps/v8/src/objects/js-collator.cc541
-rw-r--r--deps/v8/src/objects/js-collator.h92
-rw-r--r--deps/v8/src/objects/js-collection-inl.h2
-rw-r--r--deps/v8/src/objects/js-collection.h1
-rw-r--r--deps/v8/src/objects/js-generator-inl.h52
-rw-r--r--deps/v8/src/objects/js-generator.h111
-rw-r--r--deps/v8/src/objects/js-list-format-inl.h55
-rw-r--r--deps/v8/src/objects/js-list-format.cc401
-rw-r--r--deps/v8/src/objects/js-list-format.h121
-rw-r--r--deps/v8/src/objects/js-locale-inl.h3
-rw-r--r--deps/v8/src/objects/js-locale.cc49
-rw-r--r--deps/v8/src/objects/js-locale.h5
-rw-r--r--deps/v8/src/objects/js-plural-rules-inl.h36
-rw-r--r--deps/v8/src/objects/js-plural-rules.cc326
-rw-r--r--deps/v8/src/objects/js-plural-rules.h69
-rw-r--r--deps/v8/src/objects/js-promise-inl.h4
-rw-r--r--deps/v8/src/objects/js-proxy-inl.h30
-rw-r--r--deps/v8/src/objects/js-proxy.h155
-rw-r--r--deps/v8/src/objects/js-regexp-inl.h1
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator-inl.h2
-rw-r--r--deps/v8/src/objects/js-relative-time-format-inl.h27
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc47
-rw-r--r--deps/v8/src/objects/js-relative-time-format.h38
-rw-r--r--deps/v8/src/objects/map-inl.h45
-rw-r--r--deps/v8/src/objects/map.h127
-rw-r--r--deps/v8/src/objects/maybe-object-inl.h13
-rw-r--r--deps/v8/src/objects/maybe-object.h14
-rw-r--r--deps/v8/src/objects/microtask-inl.h2
-rw-r--r--deps/v8/src/objects/module-inl.h2
-rw-r--r--deps/v8/src/objects/module.cc26
-rw-r--r--deps/v8/src/objects/module.h4
-rw-r--r--deps/v8/src/objects/object-macros-undef.h1
-rw-r--r--deps/v8/src/objects/object-macros.h140
-rw-r--r--deps/v8/src/objects/ordered-hash-table-inl.h4
-rw-r--r--deps/v8/src/objects/ordered-hash-table.cc6
-rw-r--r--deps/v8/src/objects/property-descriptor-object.h1
-rw-r--r--deps/v8/src/objects/prototype-info-inl.h4
-rw-r--r--deps/v8/src/objects/prototype-info.h15
-rw-r--r--deps/v8/src/objects/regexp-match-info.h1
-rw-r--r--deps/v8/src/objects/script.h2
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h148
-rw-r--r--deps/v8/src/objects/shared-function-info.h84
-rw-r--r--deps/v8/src/objects/string-inl.h24
-rw-r--r--deps/v8/src/objects/string.h20
-rw-r--r--deps/v8/src/optimized-compilation-info.cc4
-rw-r--r--deps/v8/src/optimized-compilation-info.h9
-rw-r--r--deps/v8/src/parsing/expression-classifier.h56
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.cc44
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.h13
-rw-r--r--deps/v8/src/parsing/parse-info.cc34
-rw-r--r--deps/v8/src/parsing/parse-info.h36
-rw-r--r--deps/v8/src/parsing/parser-base.h259
-rw-r--r--deps/v8/src/parsing/parser.cc103
-rw-r--r--deps/v8/src/parsing/parser.h20
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc3
-rw-r--r--deps/v8/src/parsing/preparse-data.cc44
-rw-r--r--deps/v8/src/parsing/preparse-data.h84
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.cc7
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.h6
-rw-r--r--deps/v8/src/parsing/preparser-logger.h35
-rw-r--r--deps/v8/src/parsing/preparser.cc1
-rw-r--r--deps/v8/src/parsing/preparser.h49
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc201
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.h1
-rw-r--r--deps/v8/src/parsing/scanner-inl.h43
-rw-r--r--deps/v8/src/parsing/scanner.cc402
-rw-r--r--deps/v8/src/parsing/scanner.h343
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h22
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc4
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h5
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc12
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc3
-rw-r--r--deps/v8/src/ppc/constants-ppc.h23
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc17
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc24
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc108
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h52
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc32
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc4
-rw-r--r--deps/v8/src/profiler/heap-profiler.h2
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator-inl.h25
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc133
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h37
-rw-r--r--deps/v8/src/profiler/profile-generator.cc4
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc11
-rw-r--r--deps/v8/src/profiler/tick-sample.cc6
-rw-r--r--deps/v8/src/profiler/tracing-cpu-profiler.cc9
-rw-r--r--deps/v8/src/profiler/tracing-cpu-profiler.h4
-rw-r--r--deps/v8/src/property-descriptor.cc17
-rw-r--r--deps/v8/src/property.cc50
-rw-r--r--deps/v8/src/property.h41
-rw-r--r--deps/v8/src/prototype-inl.h145
-rw-r--r--deps/v8/src/prototype.h133
-rw-r--r--deps/v8/src/regexp/arm/OWNERS1
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc2
-rw-r--r--deps/v8/src/regexp/arm64/OWNERS1
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc12
-rw-r--r--deps/v8/src/regexp/mips/OWNERS5
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc2
-rw-r--r--deps/v8/src/regexp/mips64/OWNERS5
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc2
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc2
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc5
-rw-r--r--deps/v8/src/regexp/regexp-utils.h4
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc4
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc9
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h5
-rw-r--r--deps/v8/src/register-configuration.cc59
-rw-r--r--deps/v8/src/register-configuration.h3
-rw-r--r--deps/v8/src/reloc-info.cc540
-rw-r--r--deps/v8/src/reloc-info.h455
-rw-r--r--deps/v8/src/roots.h14
-rw-r--r--deps/v8/src/runtime/runtime-array.cc26
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc53
-rw-r--r--deps/v8/src/runtime/runtime-bigint.cc5
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc16
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc19
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc7
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc6
-rw-r--r--deps/v8/src/runtime/runtime-forin.cc2
-rw-r--r--deps/v8/src/runtime/runtime-function.cc5
-rw-r--r--deps/v8/src/runtime/runtime-futex.cc7
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc7
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc10
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc5
-rw-r--r--deps/v8/src/runtime/runtime-intl.cc589
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc104
-rw-r--r--deps/v8/src/runtime/runtime-maths.cc1
-rw-r--r--deps/v8/src/runtime/runtime-module.cc5
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc9
-rw-r--r--deps/v8/src/runtime/runtime-object.cc61
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc7
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc2
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc112
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc11
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc8
-rw-r--r--deps/v8/src/runtime/runtime-symbol.cc7
-rw-r--r--deps/v8/src/runtime/runtime-test.cc100
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc9
-rw-r--r--deps/v8/src/runtime/runtime-utils.h4
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc17
-rw-r--r--deps/v8/src/runtime/runtime.cc2
-rw-r--r--deps/v8/src/runtime/runtime.h49
-rw-r--r--deps/v8/src/s390/assembler-s390-inl.h18
-rw-r--r--deps/v8/src/s390/assembler-s390.cc4
-rw-r--r--deps/v8/src/s390/assembler-s390.h3
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc2
-rw-r--r--deps/v8/src/s390/codegen-s390.cc3
-rw-r--r--deps/v8/src/s390/constants-s390.h21
-rw-r--r--deps/v8/src/s390/disasm-s390.cc10
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc24
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc113
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h44
-rw-r--r--deps/v8/src/safepoint-table.cc98
-rw-r--r--deps/v8/src/safepoint-table.h41
-rw-r--r--deps/v8/src/simulator-base.h1
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc6
-rw-r--r--deps/v8/src/snapshot/code-serializer.h1
-rw-r--r--deps/v8/src/snapshot/deserializer.cc36
-rw-r--r--deps/v8/src/snapshot/deserializer.h3
-rw-r--r--deps/v8/src/snapshot/macros.h4
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc40
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc7
-rw-r--r--deps/v8/src/snapshot/partial-deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc2
-rw-r--r--deps/v8/src/snapshot/serializer-common.h8
-rw-r--r--deps/v8/src/snapshot/serializer.cc96
-rw-r--r--deps/v8/src/snapshot/serializer.h7
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc62
-rw-r--r--deps/v8/src/snapshot/snapshot.h44
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc20
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h8
-rw-r--r--deps/v8/src/string-builder-inl.h (renamed from deps/v8/src/string-builder.h)203
-rw-r--r--deps/v8/src/string-builder.cc191
-rw-r--r--deps/v8/src/string-hasher-inl.h4
-rw-r--r--deps/v8/src/string-stream.cc1
-rw-r--r--deps/v8/src/third_party/utf8-decoder/utf8-decoder.h1
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.cc2
-rw-r--r--deps/v8/src/torque/Torque.g4315
-rw-r--r--deps/v8/src/torque/Torque.interp249
-rw-r--r--deps/v8/src/torque/Torque.tokens154
-rw-r--r--deps/v8/src/torque/TorqueBaseListener.h373
-rw-r--r--deps/v8/src/torque/TorqueBaseVisitor.h389
-rw-r--r--deps/v8/src/torque/TorqueLexer.cpp988
-rw-r--r--deps/v8/src/torque/TorqueLexer.h138
-rw-r--r--deps/v8/src/torque/TorqueLexer.interp264
-rw-r--r--deps/v8/src/torque/TorqueLexer.tokens154
-rw-r--r--deps/v8/src/torque/TorqueListener.h351
-rw-r--r--deps/v8/src/torque/TorqueParser.cpp8370
-rw-r--r--deps/v8/src/torque/TorqueParser.h1635
-rw-r--r--deps/v8/src/torque/TorqueVisitor.h249
-rw-r--r--deps/v8/src/torque/ast-generator.cc833
-rw-r--r--deps/v8/src/torque/ast-generator.h201
-rw-r--r--deps/v8/src/torque/ast.h499
-rw-r--r--deps/v8/src/torque/contextual.h27
-rw-r--r--deps/v8/src/torque/declarable.cc2
-rw-r--r--deps/v8/src/torque/declarable.h34
-rw-r--r--deps/v8/src/torque/declaration-visitor.cc35
-rw-r--r--deps/v8/src/torque/declaration-visitor.h2
-rw-r--r--deps/v8/src/torque/declarations.cc26
-rw-r--r--deps/v8/src/torque/declarations.h3
-rw-r--r--deps/v8/src/torque/earley-parser.cc303
-rw-r--r--deps/v8/src/torque/earley-parser.h473
-rw-r--r--deps/v8/src/torque/file-visitor.h2
-rw-r--r--deps/v8/src/torque/global-context.h13
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc428
-rw-r--r--deps/v8/src/torque/implementation-visitor.h27
-rw-r--r--deps/v8/src/torque/scope.h4
-rw-r--r--deps/v8/src/torque/source-positions.cc17
-rw-r--r--deps/v8/src/torque/source-positions.h55
-rw-r--r--deps/v8/src/torque/torque-parser.cc1280
-rw-r--r--deps/v8/src/torque/torque-parser.h23
-rw-r--r--deps/v8/src/torque/torque.cc78
-rw-r--r--deps/v8/src/torque/type-oracle.cc15
-rw-r--r--deps/v8/src/torque/types.cc39
-rw-r--r--deps/v8/src/torque/types.h25
-rw-r--r--deps/v8/src/torque/utils.cc60
-rw-r--r--deps/v8/src/torque/utils.h3
-rw-r--r--deps/v8/src/transitions.cc22
-rw-r--r--deps/v8/src/transitions.h6
-rw-r--r--deps/v8/src/trap-handler/handler-outside.cc1
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h7
-rw-r--r--deps/v8/src/turbo-assembler.cc16
-rw-r--r--deps/v8/src/turbo-assembler.h35
-rw-r--r--deps/v8/src/unoptimized-compilation-info.cc5
-rw-r--r--deps/v8/src/unoptimized-compilation-info.h2
-rw-r--r--deps/v8/src/uri.cc1
-rw-r--r--deps/v8/src/uri.h1
-rw-r--r--deps/v8/src/utils.cc85
-rw-r--r--deps/v8/src/utils.h120
-rw-r--r--deps/v8/src/v8.cc5
-rw-r--r--deps/v8/src/v8memory.h167
-rw-r--r--deps/v8/src/value-serializer.cc39
-rw-r--r--deps/v8/src/value-serializer.h1
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h13
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h11
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h27
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h11
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc6
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h20
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc213
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.h5
-rw-r--r--deps/v8/src/wasm/baseline/mips/OWNERS5
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h263
-rw-r--r--deps/v8/src/wasm/baseline/mips64/OWNERS5
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h221
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h17
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h17
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h74
-rw-r--r--deps/v8/src/wasm/decoder.h2
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h67
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc171
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h42
-rw-r--r--deps/v8/src/wasm/function-compiler.cc105
-rw-r--r--deps/v8/src/wasm/function-compiler.h70
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.cc24
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.h96
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc24
-rw-r--r--deps/v8/src/wasm/memory-tracing.h5
-rw-r--r--deps/v8/src/wasm/module-compiler.cc502
-rw-r--r--deps/v8/src/wasm/module-compiler.h36
-rw-r--r--deps/v8/src/wasm/module-decoder.cc175
-rw-r--r--deps/v8/src/wasm/module-decoder.h43
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc5
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc220
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h80
-rw-r--r--deps/v8/src/wasm/wasm-constants.h6
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc38
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc194
-rw-r--r--deps/v8/src/wasm/wasm-engine.h78
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc6
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h3
-rw-r--r--deps/v8/src/wasm/wasm-feature-flags.h26
-rw-r--r--deps/v8/src/wasm/wasm-features.cc40
-rw-r--r--deps/v8/src/wasm/wasm-features.h67
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc158
-rw-r--r--deps/v8/src/wasm/wasm-js.cc324
-rw-r--r--deps/v8/src/wasm/wasm-limits.h15
-rw-r--r--deps/v8/src/wasm/wasm-linkage.h88
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc150
-rw-r--r--deps/v8/src/wasm/wasm-memory.h50
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h1
-rw-r--r--deps/v8/src/wasm/wasm-module.cc7
-rw-r--r--deps/v8/src/wasm/wasm-module.h6
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h42
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc164
-rw-r--r--deps/v8/src/wasm/wasm-objects.h51
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc2
-rw-r--r--deps/v8/src/wasm/wasm-result.cc2
-rw-r--r--deps/v8/src/wasm/wasm-result.h13
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc61
-rw-r--r--deps/v8/src/wasm/wasm-serialization.h27
-rw-r--r--deps/v8/src/wasm/wasm-text.cc3
-rw-r--r--deps/v8/src/wasm/wasm-tier.h23
-rw-r--r--deps/v8/src/wasm/wasm-value.h2
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h47
-rw-r--r--deps/v8/src/x64/assembler-x64.cc220
-rw-r--r--deps/v8/src/x64/assembler-x64.h112
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc4
-rw-r--r--deps/v8/src/x64/codegen-x64.cc2
-rw-r--r--deps/v8/src/x64/disasm-x64.cc108
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc24
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc91
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h37
-rw-r--r--deps/v8/src/x64/sse-instr.h22
-rw-r--r--deps/v8/src/zone/zone-chunk-list.h38
-rw-r--r--deps/v8/test/BUILD.gn11
-rw-r--r--deps/v8/test/cctest/BUILD.gn12
-rw-r--r--deps/v8/test/cctest/OWNERS5
-rw-r--r--deps/v8/test/cctest/cctest.h27
-rw-r--r--deps/v8/test/cctest/cctest.status59
-rw-r--r--deps/v8/test/cctest/compiler/code-assembler-tester.h4
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.cc5
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.h1
-rw-r--r--deps/v8/test/cctest/compiler/test-basic-block-profiler.cc5
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc18
-rw-r--r--deps/v8/test/cctest/compiler/test-js-constant-cache.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-js-context-specialization.cc48
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc7
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc7
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc166
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jscalls.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc38
-rw-r--r--deps/v8/test/cctest/compiler/test-run-retpoline.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-run-stubs.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-run-tail-calls.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-run-variables.cc2
-rw-r--r--deps/v8/test/cctest/heap/heap-tester.h4
-rw-r--r--deps/v8/test/cctest/heap/test-alloc.cc2
-rw-r--r--deps/v8/test/cctest/heap/test-array-buffer-tracker.cc4
-rw-r--r--deps/v8/test/cctest/heap/test-embedder-tracing.cc18
-rw-r--r--deps/v8/test/cctest/heap/test-external-string-tracker.cc226
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc213
-rw-r--r--deps/v8/test/cctest/heap/test-invalidated-slots.cc186
-rw-r--r--deps/v8/test/cctest/heap/test-weak-references.cc51
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc18
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden161
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden18
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden408
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden109
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden423
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden119
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc27
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.cc1
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc265
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc17
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc1
-rw-r--r--deps/v8/test/cctest/interpreter/test-source-positions.cc2
-rw-r--r--deps/v8/test/cctest/parsing/test-parse-decision.cc2
-rw-r--r--deps/v8/test/cctest/parsing/test-preparser.cc2
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner-streams.cc139
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner.cc8
-rw-r--r--deps/v8/test/cctest/test-accessors.cc4
-rw-r--r--deps/v8/test/cctest/test-api-accessors.cc12
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc35
-rw-r--r--deps/v8/test/cctest/test-api.cc658
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc8
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc164
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc50
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc50
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc1
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc56
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm.cc2
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm64.cc2
-rw-r--r--deps/v8/test/cctest/test-code-stubs-ia32.cc1
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips.cc2
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips64.cc2
-rw-r--r--deps/v8/test/cctest/test-code-stubs-x64.cc1
-rw-r--r--deps/v8/test/cctest/test-compiler.cc9
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc2
-rw-r--r--deps/v8/test/cctest/test-debug.cc17
-rw-r--r--deps/v8/test/cctest/test-deoptimization.cc2
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc3
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc22
-rw-r--r--deps/v8/test/cctest/test-disasm-arm64.cc7
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc5
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc8
-rw-r--r--deps/v8/test/cctest/test-elements-kind.cc1
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc2
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc13
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc2
-rw-r--r--deps/v8/test/cctest/test-fuzz-arm64.cc4
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc2
-rw-r--r--deps/v8/test/cctest/test-global-object.cc1
-rw-r--r--deps/v8/test/cctest/test-hashcode.cc19
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc157
-rw-r--r--deps/v8/test/cctest/test-inobject-slack-tracking.cc4
-rw-r--r--deps/v8/test/cctest/test-isolate-independent-builtins.cc1
-rw-r--r--deps/v8/test/cctest/test-liveedit.cc56
-rw-r--r--deps/v8/test/cctest/test-lockers.cc1
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc2
-rw-r--r--deps/v8/test/cctest/test-log.cc543
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc1
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc5
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc3
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc1
-rw-r--r--deps/v8/test/cctest/test-object.cc2
-rw-r--r--deps/v8/test/cctest/test-parsing.cc63
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc2
-rw-r--r--deps/v8/test/cctest/test-regexp.cc2
-rw-r--r--deps/v8/test/cctest/test-roots.cc34
-rw-r--r--deps/v8/test/cctest/test-serialize.cc35
-rw-r--r--deps/v8/test/cctest/test-strings.cc44
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc2
-rw-r--r--deps/v8/test/cctest/test-trace-event.cc16
-rw-r--r--deps/v8/test/cctest/test-typedarrays.cc3
-rw-r--r--deps/v8/test/cctest/test-types.cc2
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc6
-rw-r--r--deps/v8/test/cctest/test-utils.cc1
-rw-r--r--deps/v8/test/cctest/torque/test-torque.cc37
-rw-r--r--deps/v8/test/cctest/types-fuzz.h4
-rw-r--r--deps/v8/test/cctest/wasm/test-c-wasm-entry.cc4
-rw-r--r--deps/v8/test/cctest/wasm/test-jump-table-assembler.cc199
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc284
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc24
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc68
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc88
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc58
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-js.cc164
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc6
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-sign-extension.cc10
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc435
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc809
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc16
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc10
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc12
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-serialization.cc74
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc198
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc10
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-trap-position.cc8
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc54
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h58
-rw-r--r--deps/v8/test/common/assembler-tester.h46
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h2
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc11
-rw-r--r--deps/v8/test/debugger/debug/debug-liveedit-arrow-function-at-start.js13
-rw-r--r--deps/v8/test/debugger/debug/wasm/frame-inspection.js1
-rw-r--r--deps/v8/test/debugger/debugger.status5
-rw-r--r--deps/v8/test/fuzzer/multi-return.cc5
-rw-r--r--deps/v8/test/fuzzer/regexp-builtins.cc14
-rw-r--r--deps/v8/test/fuzzer/wasm-async.cc4
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc21
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc49
-rw-r--r--deps/v8/test/fuzzer/wasm.cc8
-rw-r--r--deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt5
-rw-r--r--deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt12
-rw-r--r--deps/v8/test/inspector/inspector-test.cc158
-rw-r--r--deps/v8/test/inspector/isolate-data.cc21
-rw-r--r--deps/v8/test/intl/collator/default-locale.js8
-rw-r--r--deps/v8/test/intl/collator/property-override.js2
-rw-r--r--deps/v8/test/intl/date-format/timezone.js35
-rw-r--r--deps/v8/test/intl/general/getCanonicalLocales.js26
-rw-r--r--deps/v8/test/intl/general/grandfathered_tags_without_preferred_value.js27
-rw-r--r--deps/v8/test/intl/general/language_tags_with_preferred_values.js11
-rw-r--r--deps/v8/test/intl/list-format/constructor.js108
-rw-r--r--deps/v8/test/intl/list-format/format-en.js119
-rw-r--r--deps/v8/test/intl/list-format/format-to-parts.js92
-rw-r--r--deps/v8/test/intl/list-format/format.js63
-rw-r--r--deps/v8/test/intl/list-format/formatToParts-zh.js157
-rw-r--r--deps/v8/test/intl/list-format/resolved-options.js155
-rw-r--r--deps/v8/test/intl/locale/locale-constructor.js5
-rw-r--r--deps/v8/test/intl/locale/maximize_minimize.js138
-rw-r--r--deps/v8/test/intl/locale/regress-8032.js (renamed from deps/v8/src/torque/TorqueBaseListener.cpp)4
-rw-r--r--deps/v8/test/intl/number-format/options.js13
-rw-r--r--deps/v8/test/intl/regress-8030.js21
-rw-r--r--deps/v8/test/intl/regress-8031.js22
-rw-r--r--deps/v8/test/intl/regress-8725514.js10
-rw-r--r--deps/v8/test/intl/regress-875643.js (renamed from deps/v8/src/torque/TorqueVisitor.cpp)4
-rw-r--r--deps/v8/test/intl/relative-time-format/format-en.js502
-rw-r--r--deps/v8/test/intl/relative-time-format/format-to-parts-en.js68
-rw-r--r--deps/v8/test/intl/relative-time-format/format-to-parts.js82
-rw-r--r--deps/v8/test/intl/relative-time-format/format.js82
-rw-r--r--deps/v8/test/js-perf-test/Array/copy-within.js43
-rw-r--r--deps/v8/test/js-perf-test/Array/every.js10
-rw-r--r--deps/v8/test/js-perf-test/Array/filter.js20
-rw-r--r--deps/v8/test/js-perf-test/Array/find-index.js23
-rw-r--r--deps/v8/test/js-perf-test/Array/find.js14
-rw-r--r--deps/v8/test/js-perf-test/Array/for-each.js26
-rw-r--r--deps/v8/test/js-perf-test/Array/map.js18
-rw-r--r--deps/v8/test/js-perf-test/Array/reduce-right.js22
-rw-r--r--deps/v8/test/js-perf-test/Array/reduce.js13
-rw-r--r--deps/v8/test/js-perf-test/Array/run.js62
-rw-r--r--deps/v8/test/js-perf-test/Array/slice.js7
-rw-r--r--deps/v8/test/js-perf-test/Array/some.js10
-rw-r--r--deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLarge/run.js153
-rw-r--r--deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeHoley/run.js161
-rw-r--r--deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmall/run.js154
-rw-r--r--deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallHoley/run.js159
-rw-r--r--deps/v8/test/js-perf-test/ArraySort/sort-base.js6
-rw-r--r--deps/v8/test/js-perf-test/ArraySort/sort-lengths.js14
-rw-r--r--deps/v8/test/js-perf-test/JSTests.json96
-rw-r--r--deps/v8/test/js-perf-test/Numbers/run.js20
-rw-r--r--deps/v8/test/js-perf-test/Numbers/toNumber.js25
-rw-r--r--deps/v8/test/js-perf-test/Parsing/arrowfunctions.js60
-rw-r--r--deps/v8/test/js-perf-test/Parsing/comments.js16
-rw-r--r--deps/v8/test/js-perf-test/Parsing/run.js11
-rw-r--r--deps/v8/test/js-perf-test/Parsing/strings.js37
-rw-r--r--deps/v8/test/js-perf-test/StringIterators/string-iterator.js55
-rw-r--r--deps/v8/test/js-perf-test/TurboFan/run.js29
-rw-r--r--deps/v8/test/js-perf-test/TurboFan/typedLowering.js13
-rw-r--r--deps/v8/test/js-perf-test/base.js20
-rw-r--r--deps/v8/test/message/asm-linking-bogus-heap.out2
-rw-r--r--deps/v8/test/message/fail/non-alphanum.out2
-rw-r--r--deps/v8/test/message/wasm-trace-memory-interpreted.out18
-rw-r--r--deps/v8/test/message/wasm-trace-memory-liftoff.out18
-rw-r--r--deps/v8/test/message/wasm-trace-memory.out18
-rw-r--r--deps/v8/test/mjsunit/array-constructor-feedback.js1
-rw-r--r--deps/v8/test/mjsunit/array-iterator-prototype-next.js20
-rw-r--r--deps/v8/test/mjsunit/array-lastindexof.js73
-rw-r--r--deps/v8/test/mjsunit/array-prototype-every.js20
-rw-r--r--deps/v8/test/mjsunit/array-prototype-filter.js22
-rw-r--r--deps/v8/test/mjsunit/array-prototype-find.js20
-rw-r--r--deps/v8/test/mjsunit/array-prototype-findindex.js20
-rw-r--r--deps/v8/test/mjsunit/array-prototype-foreach.js26
-rw-r--r--deps/v8/test/mjsunit/array-prototype-includes.js36
-rw-r--r--deps/v8/test/mjsunit/array-prototype-indexof.js36
-rw-r--r--deps/v8/test/mjsunit/array-prototype-lastindexof.js20
-rw-r--r--deps/v8/test/mjsunit/array-prototype-map.js21
-rw-r--r--deps/v8/test/mjsunit/array-prototype-pop.js29
-rw-r--r--deps/v8/test/mjsunit/array-prototype-reduce.js24
-rw-r--r--deps/v8/test/mjsunit/array-prototype-slice.js21
-rw-r--r--deps/v8/test/mjsunit/array-prototype-some.js20
-rw-r--r--deps/v8/test/mjsunit/array-reverse.js70
-rw-r--r--deps/v8/test/mjsunit/asm/asm-heap.js99
-rw-r--r--deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/array-slice-clone.js365
-rw-r--r--deps/v8/test/mjsunit/compiler/dataview-deopt.js58
-rw-r--r--deps/v8/test/mjsunit/compiler/dataview-get.js63
-rw-r--r--deps/v8/test/mjsunit/compiler/dataview-set.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-iterator-prototype-next.js24
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-every.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-filter.js28
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-find.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-findindex.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-foreach.js31
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-includes.js103
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-indexof.js104
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-lastindexof.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-map.js29
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-pop.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-reduce.js31
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-slice.js29
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-array-prototype-some.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/number-constructor-deopt.js32
-rw-r--r--deps/v8/test/mjsunit/empirical_max_arraybuffer.js101
-rw-r--r--deps/v8/test/mjsunit/es6/array-fill-receiver.js118
-rw-r--r--deps/v8/test/mjsunit/es6/array-fill.js97
-rw-r--r--deps/v8/test/mjsunit/es6/array-iterator.js13
-rw-r--r--deps/v8/test/mjsunit/es6/math-log2-log10.js4
-rw-r--r--deps/v8/test/mjsunit/es6/promise-all-overflow-1.js2
-rw-r--r--deps/v8/test/mjsunit/es6/promise-all-overflow-2.js2
-rw-r--r--deps/v8/test/mjsunit/es6/promise-all.js2
-rw-r--r--deps/v8/test/mjsunit/es6/proxies.js2
-rw-r--r--deps/v8/test/mjsunit/es9/object-spread-ic.js101
-rw-r--r--deps/v8/test/mjsunit/es9/regress/regress-866229.js20
-rw-r--r--deps/v8/test/mjsunit/es9/regress/regress-866282.js17
-rw-r--r--deps/v8/test/mjsunit/es9/regress/regress-866357.js17
-rw-r--r--deps/v8/test/mjsunit/es9/regress/regress-866727.js19
-rw-r--r--deps/v8/test/mjsunit/es9/regress/regress-866861.js11
-rw-r--r--deps/v8/test/mjsunit/es9/regress/regress-867958.js13
-rw-r--r--deps/v8/test/mjsunit/es9/regress/regress-869342.js18
-rw-r--r--deps/v8/test/mjsunit/harmony/async-generators-resume-return.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/async-generators-return.js7
-rw-r--r--deps/v8/test/mjsunit/harmony/async-generators-yield.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/regress-tonumbercode.js16
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/turbo.js11
-rw-r--r--deps/v8/test/mjsunit/harmony/global-configurable.js (renamed from deps/v8/src/torque/TorqueListener.cpp)5
-rw-r--r--deps/v8/test/mjsunit/harmony/global-writable.js (renamed from deps/v8/src/torque/TorqueBaseVisitor.cpp)5
-rw-r--r--deps/v8/test/mjsunit/harmony/global.js28
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-namespace.js19
-rw-r--r--deps/v8/test/mjsunit/harmony/private-fields-special-object.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/promise-prototype-finally.js2
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js110
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status50
-rw-r--r--deps/v8/test/mjsunit/regress/regress-356053.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-357103.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-430201.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-430201b.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4325.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5085.js66
-rw-r--r--deps/v8/test/mjsunit/regress/regress-599414-array-concat-fast-path.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6700.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-685.js43
-rw-r--r--deps/v8/test/mjsunit/regress/regress-7716.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-797581.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8033.js45
-rw-r--r--deps/v8/test/mjsunit/regress/regress-865310.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-869735.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-875493.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-380671.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-759327.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-807096.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-830565.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-865312.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-865892.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-866315.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-871886.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-set-flags-stress-compact.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-8070.js23
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-776677.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-8059.js42
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-873600.js50
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-875556.js19
-rw-r--r--deps/v8/test/mjsunit/sparse-array-reverse.js131
-rw-r--r--deps/v8/test/mjsunit/string-pad.js155
-rw-r--r--deps/v8/test/mjsunit/test-async.js117
-rw-r--r--deps/v8/test/mjsunit/testcfg.py57
-rw-r--r--deps/v8/test/mjsunit/tools/csvparser.js4
-rw-r--r--deps/v8/test/mjsunit/tools/profviz.js2
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-imports.js181
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-memory.js212
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm.js356
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics.js75
-rw-r--r--deps/v8/test/mjsunit/wasm/bounds-check-64bit.js5
-rw-r--r--deps/v8/test/mjsunit/wasm/empirical_max_memory.js85
-rw-r--r--deps/v8/test/mjsunit/wasm/ffi-error.js274
-rw-r--r--deps/v8/test/mjsunit/wasm/function-names.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/graceful_shutdown_during_tierup.js28
-rw-r--r--deps/v8/test/mjsunit/wasm/interpreter-mixed.js24
-rw-r--r--deps/v8/test/mjsunit/wasm/interpreter.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/memory_1gb_oob.js99
-rw-r--r--deps/v8/test/mjsunit/wasm/memory_2gb_oob.js99
-rw-r--r--deps/v8/test/mjsunit/wasm/memory_4gb_oob.js97
-rw-r--r--deps/v8/test/mjsunit/wasm/module-memory.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/origin-trial-flags.js35
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-constants.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/worker-interpreter.js63
-rw-r--r--deps/v8/test/mjsunit/wasm/worker-module.js2
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.cc2
-rw-r--r--deps/v8/test/test262/test262.status22
-rw-r--r--deps/v8/test/test262/testcfg.py1
-rw-r--r--deps/v8/test/torque/test-torque.tq171
-rw-r--r--deps/v8/test/unittests/BUILD.gn42
-rw-r--r--deps/v8/test/unittests/api/remote-object-unittest.cc2
-rw-r--r--deps/v8/test/unittests/asmjs/asm-types-unittest.cc4
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc78
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc78
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc62
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc66
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc66
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc68
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc68
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc62
-rw-r--r--deps/v8/test/unittests/base/functional-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc1
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc38
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc77
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h6
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/load-elimination-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/mips/OWNERS5
-rw-r--r--deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc56
-rw-r--r--deps/v8/test/unittests/compiler/mips64/OWNERS5
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/counters-unittest.cc129
-rw-r--r--deps/v8/test/unittests/heap/embedder-tracing-unittest.cc57
-rw-r--r--deps/v8/test/unittests/heap/heap-controller-unittest.cc80
-rw-r--r--deps/v8/test/unittests/heap/spaces-unittest.cc64
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc3
-rw-r--r--deps/v8/test/unittests/object-unittest.cc1
-rw-r--r--deps/v8/test/unittests/parser/preparser-unittest.cc2
-rw-r--r--deps/v8/test/unittests/test-helpers.cc3
-rw-r--r--deps/v8/test/unittests/test-utils.cc18
-rw-r--r--deps/v8/test/unittests/test-utils.h7
-rw-r--r--deps/v8/test/unittests/torque/earley-parser-unittest.cc84
-rw-r--r--deps/v8/test/unittests/value-serializer-unittest.cc17
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc71
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc228
-rw-r--r--deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc38
-rw-r--r--deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc72
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/webkit/webkit.status4
-rw-r--r--deps/v8/third_party/antlr4/BUILD.gn342
-rw-r--r--deps/v8/third_party/antlr4/LICENSE.txt52
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/README.md59
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/VERSION1
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRErrorListener.cpp8
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRErrorListener.h171
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRErrorStrategy.cpp8
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRErrorStrategy.h121
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRFileStream.cpp32
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRFileStream.h28
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRInputStream.cpp143
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRInputStream.h69
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BailErrorStrategy.cpp59
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BailErrorStrategy.h60
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BaseErrorListener.cpp32
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BaseErrorListener.h43
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BufferedTokenStream.cpp407
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BufferedTokenStream.h207
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CharStream.cpp10
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CharStream.h38
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonToken.cpp170
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonToken.h162
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonTokenFactory.cpp41
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonTokenFactory.h77
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonTokenStream.cpp77
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonTokenStream.h78
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ConsoleErrorListener.cpp19
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ConsoleErrorListener.h37
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/DefaultErrorStrategy.cpp355
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/DefaultErrorStrategy.h478
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/DiagnosticErrorListener.cpp101
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/DiagnosticErrorListener.h89
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Exceptions.cpp61
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Exceptions.h109
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/FailedPredicateException.cpp51
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/FailedPredicateException.h33
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/InputMismatchException.cpp17
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/InputMismatchException.h24
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/IntStream.cpp12
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/IntStream.h222
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/InterpreterRuleContext.cpp17
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/InterpreterRuleContext.h46
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Lexer.cpp274
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Lexer.h198
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/LexerInterpreter.cpp82
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/LexerInterpreter.h58
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/LexerNoViableAltException.cpp37
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/LexerNoViableAltException.h31
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ListTokenSource.cpp95
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ListTokenSource.h94
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/NoViableAltException.cpp33
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/NoViableAltException.h41
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Parser.cpp637
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Parser.h489
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ParserInterpreter.cpp340
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ParserInterpreter.h186
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ParserRuleContext.cpp138
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ParserRuleContext.h149
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ProxyErrorListener.cpp63
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ProxyErrorListener.h45
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RecognitionException.cpp67
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RecognitionException.h100
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Recognizer.cpp163
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Recognizer.h171
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuleContext.cpp133
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuleContext.h143
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuleContextWithAltNum.cpp23
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuleContextWithAltNum.h32
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuntimeMetaData.cpp62
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuntimeMetaData.h159
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Token.cpp8
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Token.h93
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenFactory.h34
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenSource.cpp8
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenSource.h87
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenStream.cpp10
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenStream.h138
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenStreamRewriter.cpp463
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenStreamRewriter.h307
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp215
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/UnbufferedCharStream.h125
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp257
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/UnbufferedTokenStream.h117
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Vocabulary.cpp108
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Vocabulary.h198
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/WritableToken.cpp8
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/WritableToken.h23
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/antlr4-common.h134
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/antlr4-runtime.h164
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATN.cpp212
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATN.h115
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNConfig.cpp118
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNConfig.h148
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNConfigSet.cpp224
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNConfigSet.h114
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.cpp61
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.h51
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp813
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNDeserializer.h88
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp622
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNSerializer.h61
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNSimulator.cpp69
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNSimulator.h93
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNState.cpp69
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNState.h134
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNType.h20
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AbstractPredicateTransition.cpp13
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AbstractPredicateTransition.h22
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ActionTransition.cpp41
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ActionTransition.h35
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AmbiguityInfo.cpp18
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AmbiguityInfo.h70
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ArrayPredictionContext.cpp84
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ArrayPredictionContext.h45
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AtomTransition.cpp33
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AtomTransition.h31
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BasicBlockStartState.cpp10
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BasicBlockStartState.h20
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BasicState.cpp10
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BasicState.h19
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BlockEndState.cpp12
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BlockEndState.h24
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BlockStartState.cpp8
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BlockStartState.h21
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.cpp17
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.h49
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionEventInfo.cpp19
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionEventInfo.h70
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionInfo.cpp28
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionInfo.h232
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionState.cpp17
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionState.h28
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/EmptyPredictionContext.cpp30
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/EmptyPredictionContext.h28
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/EpsilonTransition.cpp35
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/EpsilonTransition.h40
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ErrorInfo.cpp16
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ErrorInfo.h44
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp173
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LL1Analyzer.h118
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerATNConfig.cpp90
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerATNConfig.h50
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp683
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerATNSimulator.h223
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerAction.cpp8
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerAction.h68
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerActionExecutor.cpp116
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerActionExecutor.h124
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerActionType.h55
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp49
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerChannelAction.h68
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp58
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerCustomAction.h92
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.cpp58
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.h85
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp49
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerModeAction.h63
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp40
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerMoreAction.h61
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp40
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerPopModeAction.h61
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp50
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerPushModeAction.h64
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp40
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerSkipAction.h59
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp49
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerTypeAction.h58
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LookaheadEventInfo.cpp18
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LookaheadEventInfo.h43
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LoopEndState.cpp10
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LoopEndState.h22
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/Makefile67
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/NotSetTransition.cpp30
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/NotSetTransition.h26
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.cpp10
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.h20
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ParseInfo.cpp101
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ParseInfo.h103
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp1508
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h942
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PlusBlockStartState.cpp10
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PlusBlockStartState.h25
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PlusLoopbackState.cpp10
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PlusLoopbackState.h21
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.cpp35
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.h30
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredicateEvalInfo.cpp22
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredicateEvalInfo.h64
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredicateTransition.cpp41
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredicateTransition.h41
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredictionContext.cpp694
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredictionContext.h290
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredictionMode.cpp217
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredictionMode.h442
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.cpp221
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.h74
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RangeTransition.cpp33
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RangeTransition.h30
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleStartState.cpp12
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleStartState.h24
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleStopState.cpp10
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleStopState.h23
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleTransition.cpp38
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleTransition.h44
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SemanticContext.cpp400
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SemanticContext.h229
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SetTransition.cpp31
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SetTransition.h31
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SingletonPredictionContext.cpp80
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SingletonPredictionContext.h39
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarBlockStartState.cpp10
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarBlockStartState.h20
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarLoopEntryState.cpp13
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarLoopEntryState.h35
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarLoopbackState.cpp17
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarLoopbackState.h21
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/TokensStartState.cpp10
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/TokensStartState.h20
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/Transition.cpp42
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/Transition.h77
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/WildcardTransition.cpp25
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/WildcardTransition.h26
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFA.cpp125
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFA.h93
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFASerializer.cpp68
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFASerializer.h32
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFAState.cpp95
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFAState.h142
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/LexerDFASerializer.cpp19
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/LexerDFASerializer.h23
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/InterpreterDataReader.cpp119
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/InterpreterDataReader.h32
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/Interval.cpp88
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/Interval.h85
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/IntervalSet.cpp516
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/IntervalSet.h198
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/MurmurHash.cpp127
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/MurmurHash.h76
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/Predicate.cpp3
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/Predicate.h21
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Any.cpp12
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Any.h121
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Arrays.cpp44
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Arrays.h101
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/BitSet.h75
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/CPPUtils.cpp237
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/CPPUtils.h87
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Declarations.h165
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/StringUtils.cpp36
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/StringUtils.h35
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/guid.cpp232
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/guid.h111
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/AbstractParseTreeVisitor.h127
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ErrorNode.cpp8
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ErrorNode.h19
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ErrorNodeImpl.cpp21
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ErrorNodeImpl.h34
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp71
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.h53
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTree.cpp14
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTree.h107
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeListener.cpp8
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeListener.h38
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeProperty.h44
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.cpp8
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.h58
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeWalker.cpp50
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeWalker.h32
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/TerminalNode.cpp8
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/TerminalNode.h32
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.cpp50
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.h32
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/Trees.cpp241
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/Trees.h84
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/Chunk.cpp8
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/Chunk.h44
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.cpp65
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.h141
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.cpp61
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.h111
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.cpp388
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.h196
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.cpp57
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.h130
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TagChunk.cpp33
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TagChunk.h87
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TextChunk.cpp24
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TextChunk.h53
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.cpp31
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.h84
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPath.cpp148
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPath.h85
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathElement.cpp26
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathElement.h40
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.cpp177
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.g464
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.h59
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.tokens12
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.cpp16
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.h24
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.cpp22
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.h27
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.cpp33
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.h26
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.cpp22
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.h26
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.cpp36
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.h26
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.cpp23
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.h23
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.cpp23
-rw-r--r--deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.h23
-rw-r--r--deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h1
-rw-r--r--deps/v8/third_party/inspector_protocol/README.v82
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template104
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template25
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template1
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Maybe_h.template54
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template51
-rw-r--r--deps/v8/third_party/v8/builtins/LICENSE254
-rw-r--r--deps/v8/third_party/v8/builtins/array-sort.tq1808
-rw-r--r--deps/v8/tools/BUILD.gn17
-rw-r--r--deps/v8/tools/callstats.py.vpython43
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/failure_output.txt4
-rwxr-xr-xdeps/v8/tools/clusterfuzz/v8_foozzie.py29
-rwxr-xr-xdeps/v8/tools/clusterfuzz/v8_foozzie_test.py2
-rw-r--r--deps/v8/tools/clusterfuzz/v8_fuzz_config.py10
-rw-r--r--deps/v8/tools/csvparser.js3
-rwxr-xr-xdeps/v8/tools/eval_gc_time.sh1
-rw-r--r--deps/v8/tools/gcmole/BUILD.gn1
-rw-r--r--deps/v8/tools/gdbinit20
-rwxr-xr-xdeps/v8/tools/generate-header-include-checks.py156
-rwxr-xr-xdeps/v8/tools/grokdump.py5
-rw-r--r--deps/v8/tools/heap-stats/categories.js21
-rw-r--r--deps/v8/tools/heap-stats/trace-file-reader.js13
-rw-r--r--deps/v8/tools/ic-processor.js5
-rw-r--r--deps/v8/tools/map-processor.js10
-rwxr-xr-xdeps/v8/tools/node/node_common.py5
-rw-r--r--deps/v8/tools/predictable_wrapper.py4
-rw-r--r--deps/v8/tools/profile.js27
-rw-r--r--deps/v8/tools/profview/index.html2
-rw-r--r--deps/v8/tools/profview/profview.css23
-rw-r--r--deps/v8/tools/profview/profview.js300
-rwxr-xr-xdeps/v8/tools/release/create_release.py3
-rwxr-xr-xdeps/v8/tools/release/filter_build_files.py2
-rw-r--r--deps/v8/tools/release/git_recipes.py6
-rwxr-xr-xdeps/v8/tools/release/test_scripts.py2
-rwxr-xr-xdeps/v8/tools/run_perf.py264
-rw-r--r--deps/v8/tools/testrunner/base_runner.py23
-rw-r--r--deps/v8/tools/testrunner/local/android.py207
-rw-r--r--deps/v8/tools/testrunner/local/command.py113
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py2
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py11
-rw-r--r--deps/v8/tools/testrunner/outproc/base.py6
-rw-r--r--deps/v8/tools/testrunner/testproc/filter.py20
-rw-r--r--deps/v8/tools/tickprocessor.js2
-rw-r--r--deps/v8/tools/torque/vim-torque/README.md33
-rw-r--r--deps/v8/tools/torque/vim-torque/ftdetect/torque.vim1
-rw-r--r--deps/v8/tools/torque/vim-torque/syntax/torque.vim84
-rwxr-xr-xdeps/v8/tools/try_perf.py3
-rwxr-xr-xdeps/v8/tools/unittests/run_perf_test.py12
-rw-r--r--deps/v8/tools/unittests/testdata/test1.json1
-rw-r--r--deps/v8/tools/unittests/testdata/test2.json1
-rw-r--r--deps/v8/tools/unittests/testdata/test3.json1
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json1
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json1
-rw-r--r--deps/v8/tools/v8heapconst.py372
-rw-r--r--deps/v8/tools/whitespace.txt5
1600 files changed, 52278 insertions, 73115 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index b76fda4dc4..511e24d90c 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -36,6 +36,7 @@
/_*
/build
/buildtools
+/check-header-includes
/hydrogen.cfg
/obj
/out
@@ -68,6 +69,7 @@
!/third_party/googletest/src/googletest/include/gtest
/third_party/googletest/src/googletest/include/gtest/*
!/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
+!/third_party/v8
/tools/clang
/tools/gcmole/gcmole-tools
/tools/gcmole/gcmole-tools.tar.gz
diff --git a/deps/v8/.vpython b/deps/v8/.vpython
index 6a9ce3f693..398cef1ad5 100644
--- a/deps/v8/.vpython
+++ b/deps/v8/.vpython
@@ -1,7 +1,7 @@
# This is a vpython "spec" file.
#
# It describes patterns for python wheel dependencies of the python scripts in
-# the chromium repo, particularly for dependencies that have compiled components
+# the V8 repo, particularly for dependencies that have compiled components
# (since pure-python dependencies can be easily vendored into third_party).
#
# When vpython is invoked, it finds this file and builds a python VirtualEnv,
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 6179e2230d..898bc8feae 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -17,6 +17,7 @@ Opera Software ASA <*@opera.com>
Intel Corporation <*@intel.com>
MIPS Technologies, Inc. <*@mips.com>
Imagination Technologies, LLC <*@imgtec.com>
+Wave Computing, Inc. <*@wavecomp.com>
Loongson Technology Corporation Limited <*@loongson.cn>
Code Aurora Forum <*@codeaurora.org>
Home Jinni Inc. <*@homejinni.com>
@@ -100,6 +101,7 @@ Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
Karl Skomski <karl@skomski.com>
Kevin Gibbons <bakkot@gmail.com>
Kris Selden <kris.selden@gmail.com>
+Kyounga Ra <kyounga@alticast.com>
Loo Rong Jie <loorongjie@gmail.com>
Luis Reis <luis.m.reis@gmail.com>
Luke Zarko <lukezarko@gmail.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 443694d880..c6a58776cd 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -25,10 +25,6 @@ declare_args() {
# Dynamically set an additional dependency from v8/custom_deps.
v8_custom_deps = ""
- # Turns on deprecation warnings for HeapObject::GetIsolate,
- # HeapObject::GetHeap, Handle(T* obj) and handle(T* obj).
- v8_deprecate_get_isolate = false
-
# Turns on all V8 debug features. Enables running V8 in a pseudo debug mode
# within a release Chrome.
v8_enable_debugging_features = is_debug
@@ -43,10 +39,10 @@ declare_args() {
v8_enable_verify_predictable = false
# Enable compiler warnings when using V8_DEPRECATED apis.
- v8_deprecation_warnings = false
+ v8_deprecation_warnings = true
# Enable compiler warnings when using V8_DEPRECATE_SOON apis.
- v8_imminent_deprecation_warnings = false
+ v8_imminent_deprecation_warnings = true
# Embeds the given script into the snapshot.
v8_embed_script = ""
@@ -77,10 +73,11 @@ declare_args() {
# Enable embedded builtins.
# TODO(jgruber,v8:6666): Support ia32 and maybe MSVC.
- # TODO(jgruber,v8:6666): Enable for remaining architectures once performance
- # regressions are addressed.
- v8_enable_embedded_builtins =
- v8_use_snapshot && v8_current_cpu == "x64" && (!is_win || is_clang)
+ v8_enable_embedded_builtins = v8_use_snapshot && v8_current_cpu != "x86" &&
+ !is_aix && (!is_win || is_clang)
+
+ # Enable embedded bytecode handlers.
+ v8_enable_embedded_bytecode_handlers = false
# Enable code-generation-time checking of types in the CodeStubAssembler.
v8_enable_verify_csa = false
@@ -162,6 +159,11 @@ declare_args() {
# Enable minor mark compact.
v8_enable_minor_mc = true
+
+ # Check that each header can be included in isolation (requires also
+ # setting the "check_v8_header_includes" gclient variable to run a
+ # specific hook).
+ v8_check_header_includes = false
}
# Derived defaults.
@@ -190,6 +192,13 @@ if (v8_check_microtasks_scopes_consistency == "") {
assert(!v8_enable_embedded_builtins || v8_use_snapshot,
"Embedded builtins only work with snapshots")
+assert(
+ v8_current_cpu != "x86" || !v8_enable_embedded_builtins ||
+ !v8_untrusted_code_mitigations,
+ "Embedded builtins on ia32 and untrusted code mitigations are incompatible")
+
+assert(!v8_enable_embedded_bytecode_handlers || v8_enable_embedded_builtins,
+ "Embedded bytecode handlers only work with embedded builtins")
# Specifies if the target build is a simulator build. Comparing target cpu
# with v8 target cpu to not affect simulator builds for making cross-compile
@@ -260,6 +269,12 @@ config("external_config") {
if (v8_enable_v8_checks) {
defines += [ "V8_ENABLE_CHECKS" ] # Used in "include/v8.h".
}
+ if (v8_deprecation_warnings) {
+ defines += [ "V8_DEPRECATION_WARNINGS" ]
+ }
+ if (v8_imminent_deprecation_warnings) {
+ defines += [ "V8_IMMINENT_DEPRECATION_WARNINGS" ]
+ }
include_dirs = [
"include",
"$target_gen_dir/include",
@@ -340,9 +355,6 @@ config("features") {
if (v8_imminent_deprecation_warnings) {
defines += [ "V8_IMMINENT_DEPRECATION_WARNINGS" ]
}
- if (v8_deprecate_get_isolate) {
- defines += [ "DEPRECATE_GET_ISOLATE" ]
- }
if (v8_enable_i18n_support) {
defines += [ "V8_INTL_SUPPORT" ]
}
@@ -367,6 +379,9 @@ config("features") {
if (v8_enable_embedded_builtins) {
defines += [ "V8_EMBEDDED_BUILTINS" ]
}
+ if (v8_enable_embedded_bytecode_handlers) {
+ defines += [ "V8_EMBEDDED_BYTECODE_HANDLERS" ]
+ }
if (v8_use_multi_snapshots) {
defines += [ "V8_MULTI_SNAPSHOTS" ]
}
@@ -586,10 +601,6 @@ config("toolchain") {
v8_current_cpu == "mips64el") {
cflags += [ "-Wshorten-64-to-32" ]
}
-
- if (v8_deprecate_get_isolate) {
- cflags += [ "-Wno-error=deprecated" ]
- }
}
if (is_win) {
@@ -846,6 +857,8 @@ action("postmortem-metadata") {
"src/objects/fixed-array.h",
"src/objects/js-array-inl.h",
"src/objects/js-array.h",
+ "src/objects/js-array-buffer-inl.h",
+ "src/objects/js-array-buffer.h",
"src/objects/js-regexp-inl.h",
"src/objects/js-regexp.h",
"src/objects/js-regexp-string-iterator-inl.h",
@@ -872,11 +885,13 @@ action("postmortem-metadata") {
torque_files = [
"src/builtins/base.tq",
"src/builtins/array.tq",
+ "src/builtins/array-copywithin.tq",
"src/builtins/array-foreach.tq",
- "src/builtins/array-sort.tq",
+ "src/builtins/array-reverse.tq",
"src/builtins/typed-array.tq",
"src/builtins/data-view.tq",
"test/torque/test-torque.tq",
+ "third_party/v8/builtins/array-sort.tq",
]
torque_modules = [
@@ -1106,6 +1121,7 @@ action("v8_dump_build_config") {
rebase_path("$root_out_dir/v8_build_config.json", root_build_dir),
"current_cpu=\"$current_cpu\"",
"dcheck_always_on=$dcheck_always_on",
+ "is_android=$is_android",
"is_asan=$is_asan",
"is_cfi=$is_cfi",
"is_component_build=$is_component_build",
@@ -1502,11 +1518,13 @@ v8_source_set("v8_base") {
"src/allocation.cc",
"src/allocation.h",
"src/api-arguments-inl.h",
+ "src/api-arguments.cc",
"src/api-arguments.h",
"src/api-natives.cc",
"src/api-natives.h",
"src/api.cc",
"src/api.h",
+ "src/arguments-inl.h",
"src/arguments.cc",
"src/arguments.h",
"src/asan.h",
@@ -1521,6 +1539,7 @@ v8_source_set("v8_base") {
"src/asmjs/asm-types.h",
"src/asmjs/switch-logic.cc",
"src/asmjs/switch-logic.h",
+ "src/assembler-arch-inl.h",
"src/assembler-arch.h",
"src/assembler-inl.h",
"src/assembler.cc",
@@ -1591,6 +1610,7 @@ v8_source_set("v8_base") {
"src/builtins/builtins-symbol.cc",
"src/builtins/builtins-trace.cc",
"src/builtins/builtins-typed-array.cc",
+ "src/builtins/builtins-utils-inl.h",
"src/builtins/builtins-utils.h",
"src/builtins/builtins.cc",
"src/builtins/builtins.h",
@@ -1726,6 +1746,8 @@ v8_source_set("v8_base") {
"src/compiler/js-graph.h",
"src/compiler/js-heap-broker.cc",
"src/compiler/js-heap-broker.h",
+ "src/compiler/js-heap-copy-reducer.cc",
+ "src/compiler/js-heap-copy-reducer.h",
"src/compiler/js-inlining-heuristic.cc",
"src/compiler/js-inlining-heuristic.h",
"src/compiler/js-inlining.cc",
@@ -1892,6 +1914,7 @@ v8_source_set("v8_base") {
"src/dtoa.h",
"src/eh-frame.cc",
"src/eh-frame.h",
+ "src/elements-inl.h",
"src/elements-kind.cc",
"src/elements-kind.h",
"src/elements.cc",
@@ -1967,6 +1990,8 @@ v8_source_set("v8_base") {
"src/heap/heap-controller.cc",
"src/heap/heap-controller.h",
"src/heap/heap-inl.h",
+ "src/heap/heap-write-barrier-inl.h",
+ "src/heap/heap-write-barrier.h",
"src/heap/heap.cc",
"src/heap/heap.h",
"src/heap/incremental-marking-inl.h",
@@ -1979,6 +2004,7 @@ v8_source_set("v8_base") {
"src/heap/invalidated-slots.h",
"src/heap/item-parallel-job.cc",
"src/heap/item-parallel-job.h",
+ "src/heap/local-allocator-inl.h",
"src/heap/local-allocator.h",
"src/heap/mark-compact-inl.h",
"src/heap/mark-compact.cc",
@@ -2002,6 +2028,7 @@ v8_source_set("v8_base") {
"src/heap/spaces-inl.h",
"src/heap/spaces.cc",
"src/heap/spaces.h",
+ "src/heap/store-buffer-inl.h",
"src/heap/store-buffer.cc",
"src/heap/store-buffer.h",
"src/heap/stress-marking-observer.cc",
@@ -2101,6 +2128,7 @@ v8_source_set("v8_base") {
"src/lookup-cache-inl.h",
"src/lookup-cache.cc",
"src/lookup-cache.h",
+ "src/lookup-inl.h",
"src/lookup.cc",
"src/lookup.h",
"src/lsan.h",
@@ -2110,6 +2138,8 @@ v8_source_set("v8_base") {
"src/macro-assembler.h",
"src/map-updater.cc",
"src/map-updater.h",
+ "src/maybe-handles-inl.h",
+ "src/maybe-handles.h",
"src/messages.cc",
"src/messages.h",
"src/msan.h",
@@ -2144,15 +2174,31 @@ v8_source_set("v8_base") {
"src/objects/intl-objects-inl.h",
"src/objects/intl-objects.cc",
"src/objects/intl-objects.h",
+ "src/objects/js-array-buffer-inl.h",
+ "src/objects/js-array-buffer.cc",
+ "src/objects/js-array-buffer.h",
"src/objects/js-array-inl.h",
"src/objects/js-array.h",
+ "src/objects/js-collator-inl.h",
+ "src/objects/js-collator.cc",
+ "src/objects/js-collator.h",
"src/objects/js-collection-inl.h",
"src/objects/js-collection.h",
+ "src/objects/js-generator-inl.h",
+ "src/objects/js-generator.h",
+ "src/objects/js-list-format-inl.h",
+ "src/objects/js-list-format.cc",
+ "src/objects/js-list-format.h",
"src/objects/js-locale-inl.h",
"src/objects/js-locale.cc",
"src/objects/js-locale.h",
+ "src/objects/js-plural-rules-inl.h",
+ "src/objects/js-plural-rules.cc",
+ "src/objects/js-plural-rules.h",
"src/objects/js-promise-inl.h",
"src/objects/js-promise.h",
+ "src/objects/js-proxy-inl.h",
+ "src/objects/js-proxy.h",
"src/objects/js-regexp-inl.h",
"src/objects/js-regexp-string-iterator-inl.h",
"src/objects/js-regexp-string-iterator.h",
@@ -2219,10 +2265,9 @@ v8_source_set("v8_base") {
"src/parsing/parsing.cc",
"src/parsing/parsing.h",
"src/parsing/pattern-rewriter.cc",
- "src/parsing/preparse-data.cc",
- "src/parsing/preparse-data.h",
"src/parsing/preparsed-scope-data.cc",
"src/parsing/preparsed-scope-data.h",
+ "src/parsing/preparser-logger.h",
"src/parsing/preparser.cc",
"src/parsing/preparser.h",
"src/parsing/rewriter.cc",
@@ -2294,6 +2339,8 @@ v8_source_set("v8_base") {
"src/register-configuration.cc",
"src/register-configuration.h",
"src/reglist.h",
+ "src/reloc-info.cc",
+ "src/reloc-info.h",
"src/roots-inl.h",
"src/roots.h",
"src/runtime-profiler.cc",
@@ -2386,8 +2433,8 @@ v8_source_set("v8_base") {
"src/splay-tree.h",
"src/startup-data-util.cc",
"src/startup-data-util.h",
+ "src/string-builder-inl.h",
"src/string-builder.cc",
- "src/string-builder.h",
"src/string-case.cc",
"src/string-case.h",
"src/string-hasher-inl.h",
@@ -2482,6 +2529,9 @@ v8_source_set("v8_base") {
"src/wasm/wasm-engine.h",
"src/wasm/wasm-external-refs.cc",
"src/wasm/wasm-external-refs.h",
+ "src/wasm/wasm-feature-flags.h",
+ "src/wasm/wasm-features.cc",
+ "src/wasm/wasm-features.h",
"src/wasm/wasm-interpreter.cc",
"src/wasm/wasm-interpreter.h",
"src/wasm/wasm-js.cc",
@@ -2505,6 +2555,7 @@ v8_source_set("v8_base") {
"src/wasm/wasm-serialization.h",
"src/wasm/wasm-text.cc",
"src/wasm/wasm-text.h",
+ "src/wasm/wasm-tier.h",
"src/wasm/wasm-value.h",
"src/zone/accounting-allocator.cc",
"src/zone/accounting-allocator.h",
@@ -2519,6 +2570,13 @@ v8_source_set("v8_base") {
"src/zone/zone.h",
]
+ if (v8_check_header_includes) {
+ # This file will be generated by tools/generate-header-include-checks.py
+ # if the "check_v8_header_includes" gclient variable is set.
+ import("check-header-includes/sources.gni")
+ sources += check_header_includes_sources
+ }
+
if (use_jumbo_build == true) {
jumbo_excluded_sources = [
# TODO(mostynb@vewd.com): don't exclude these http://crbug.com/752428
@@ -2833,9 +2891,18 @@ v8_source_set("v8_base") {
"src/objects/intl-objects-inl.h",
"src/objects/intl-objects.cc",
"src/objects/intl-objects.h",
+ "src/objects/js-collator-inl.h",
+ "src/objects/js-collator.cc",
+ "src/objects/js-collator.h",
+ "src/objects/js-list-format-inl.h",
+ "src/objects/js-list-format.cc",
+ "src/objects/js-list-format.h",
"src/objects/js-locale-inl.h",
"src/objects/js-locale.cc",
"src/objects/js-locale.h",
+ "src/objects/js-plural-rules-inl.h",
+ "src/objects/js-plural-rules.cc",
+ "src/objects/js-plural-rules.h",
"src/objects/js-relative-time-format-inl.h",
"src/objects/js-relative-time-format.cc",
"src/objects/js-relative-time-format.h",
@@ -2849,6 +2916,46 @@ v8_source_set("v8_base") {
}
}
+v8_source_set("torque_base") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+
+ sources = [
+ "src/torque/ast.h",
+ "src/torque/contextual.h",
+ "src/torque/declarable.cc",
+ "src/torque/declarable.h",
+ "src/torque/declaration-visitor.cc",
+ "src/torque/declaration-visitor.h",
+ "src/torque/declarations.cc",
+ "src/torque/declarations.h",
+ "src/torque/earley-parser.cc",
+ "src/torque/earley-parser.h",
+ "src/torque/file-visitor.cc",
+ "src/torque/file-visitor.h",
+ "src/torque/global-context.h",
+ "src/torque/implementation-visitor.cc",
+ "src/torque/implementation-visitor.h",
+ "src/torque/scope.cc",
+ "src/torque/scope.h",
+ "src/torque/source-positions.cc",
+ "src/torque/source-positions.h",
+ "src/torque/torque-parser.cc",
+ "src/torque/torque-parser.h",
+ "src/torque/type-oracle.cc",
+ "src/torque/type-oracle.h",
+ "src/torque/types.cc",
+ "src/torque/types.h",
+ "src/torque/utils.cc",
+ "src/torque/utils.h",
+ ]
+
+ deps = [
+ ":v8_libbase",
+ ]
+
+ configs = [ ":internal_config" ]
+}
+
v8_component("v8_libbase") {
sources = [
"src/base/adapters.h",
@@ -3153,65 +3260,16 @@ if (current_toolchain == v8_snapshot_toolchain) {
v8_executable("torque") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
- defines = [ "ANTLR4CPP_STATIC" ]
-
- include_dirs = [
- "third_party/antlr4/runtime/Cpp/runtime/src",
- "src/torque",
- ]
-
sources = [
- "src/torque/TorqueBaseVisitor.cpp",
- "src/torque/TorqueBaseVisitor.h",
- "src/torque/TorqueLexer.cpp",
- "src/torque/TorqueLexer.h",
- "src/torque/TorqueParser.cpp",
- "src/torque/TorqueParser.h",
- "src/torque/TorqueVisitor.cpp",
- "src/torque/TorqueVisitor.h",
- "src/torque/ast-generator.cc",
- "src/torque/ast-generator.h",
- "src/torque/ast.h",
- "src/torque/contextual.h",
- "src/torque/declarable.cc",
- "src/torque/declarable.h",
- "src/torque/declaration-visitor.cc",
- "src/torque/declaration-visitor.h",
- "src/torque/declarations.cc",
- "src/torque/declarations.h",
- "src/torque/file-visitor.cc",
- "src/torque/file-visitor.h",
- "src/torque/global-context.h",
- "src/torque/implementation-visitor.cc",
- "src/torque/implementation-visitor.h",
- "src/torque/scope.cc",
- "src/torque/scope.h",
"src/torque/torque.cc",
- "src/torque/type-oracle.h",
- "src/torque/types.cc",
- "src/torque/types.h",
- "src/torque/utils.cc",
- "src/torque/utils.h",
]
deps = [
- ":v8_libbase",
- "third_party/antlr4:antlr4",
+ ":torque_base",
"//build/win:default_exe_manifest",
]
- remove_configs = [
- "//build/config/compiler:no_rtti",
- "//build/config/compiler:no_exceptions",
- ]
-
- configs = [
- "//build/config/compiler:rtti",
- "//build/config/compiler:exceptions",
- "third_party/antlr4:antlr-compatibility",
- ":external_config",
- ":internal_config_base",
- ]
+ configs = [ ":internal_config" ]
}
}
@@ -3316,6 +3374,7 @@ if (is_component_build) {
]
public_deps = [
+ ":torque_base",
":v8_base",
":v8_maybe_snapshot",
]
@@ -3342,6 +3401,7 @@ if (is_component_build) {
testonly = true
public_deps = [
+ ":torque_base",
":v8_base",
":v8_maybe_snapshot",
]
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index fc7a93e2d9..428325ad58 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,1518 @@
+2018-08-27: Version 7.0.276
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-27: Version 7.0.275
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-26: Version 7.0.274
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-25: Version 7.0.273
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-25: Version 7.0.272
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-25: Version 7.0.271
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-25: Version 7.0.270
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-24: Version 7.0.269
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-24: Version 7.0.268
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-24: Version 7.0.267
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-24: Version 7.0.266
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-24: Version 7.0.265
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-24: Version 7.0.264
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-24: Version 7.0.263
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-24: Version 7.0.262
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-24: Version 7.0.261
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-24: Version 7.0.260
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-24: Version 7.0.259
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-23: Version 7.0.258
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-23: Version 7.0.257
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-23: Version 7.0.256
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-23: Version 7.0.255
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-23: Version 7.0.254
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-23: Version 7.0.253
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-23: Version 7.0.252
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-23: Version 7.0.251
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-23: Version 7.0.250
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-23: Version 7.0.249
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-22: Version 7.0.248
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-22: Version 7.0.247
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-21: Version 7.0.246
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-21: Version 7.0.245
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-21: Version 7.0.244
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-21: Version 7.0.243
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-17: Version 7.0.242
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-17: Version 7.0.241
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-17: Version 7.0.240
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-17: Version 7.0.239
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-17: Version 7.0.238
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-15: Version 7.0.237
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-14: Version 7.0.236
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-14: Version 7.0.235
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-14: Version 7.0.234
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-14: Version 7.0.233
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-14: Version 7.0.232
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-14: Version 7.0.231
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-12: Version 7.0.230
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-11: Version 7.0.229
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-11: Version 7.0.228
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-11: Version 7.0.227
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-11: Version 7.0.226
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-11: Version 7.0.225
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-11: Version 7.0.224
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-10: Version 7.0.223
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-10: Version 7.0.222
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-10: Version 7.0.221
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-10: Version 7.0.220
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-10: Version 7.0.219
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-10: Version 7.0.218
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-10: Version 7.0.217
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-10: Version 7.0.216
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-10: Version 7.0.215
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-10: Version 7.0.214
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-10: Version 7.0.213
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-10: Version 7.0.212
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-10: Version 7.0.211
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-10: Version 7.0.210
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-10: Version 7.0.209
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-10: Version 7.0.208
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-09: Version 7.0.207
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-09: Version 7.0.206
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-09: Version 7.0.205
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-09: Version 7.0.204
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-09: Version 7.0.203
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-09: Version 7.0.202
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-09: Version 7.0.201
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.200
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.199
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.198
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.197
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.196
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.195
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.194
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.193
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.192
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.191
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.190
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.189
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.188
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.187
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.186
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.185
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.184
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.183
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.182
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.181
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.180
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.179
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.178
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.177
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.176
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-08: Version 7.0.175
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-07: Version 7.0.174
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-07: Version 7.0.173
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-07: Version 7.0.172
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-07: Version 7.0.171
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-06: Version 7.0.170
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-06: Version 7.0.169
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-06: Version 7.0.168
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-06: Version 7.0.167
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-06: Version 7.0.166
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-06: Version 7.0.165
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-06: Version 7.0.164
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-06: Version 7.0.163
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-06: Version 7.0.162
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-06: Version 7.0.161
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-06: Version 7.0.160
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-06: Version 7.0.159
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-03: Version 7.0.158
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-03: Version 7.0.157
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-03: Version 7.0.156
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-03: Version 7.0.155
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-03: Version 7.0.154
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-03: Version 7.0.153
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-03: Version 7.0.152
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-03: Version 7.0.151
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-03: Version 7.0.150
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-03: Version 7.0.149
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-03: Version 7.0.148
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.147
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.146
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.145
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.144
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.143
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.142
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.141
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.140
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.139
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.138
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.137
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.136
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.135
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.134
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.133
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.132
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.131
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.130
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.129
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-02: Version 7.0.128
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-01: Version 7.0.127
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-01: Version 7.0.126
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-01: Version 7.0.125
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-01: Version 7.0.124
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-01: Version 7.0.123
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-01: Version 7.0.122
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-01: Version 7.0.121
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-01: Version 7.0.120
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-01: Version 7.0.119
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-01: Version 7.0.118
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-01: Version 7.0.117
+
+ Performance and stability improvements on all platforms.
+
+
+2018-08-01: Version 7.0.116
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.115
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.114
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.113
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.112
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.111
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.110
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.109
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.108
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.107
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.106
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.105
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.104
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.103
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.102
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.101
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.100
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.99
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.98
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.97
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.96
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-31: Version 7.0.95
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-30: Version 7.0.94
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-30: Version 7.0.93
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-30: Version 7.0.92
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-30: Version 7.0.91
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-30: Version 7.0.90
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-30: Version 7.0.89
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-30: Version 7.0.88
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-30: Version 7.0.87
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-29: Version 7.0.86
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-28: Version 7.0.85
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-28: Version 7.0.84
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-28: Version 7.0.83
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-27: Version 7.0.82
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-27: Version 7.0.81
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-27: Version 7.0.80
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-27: Version 7.0.79
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-27: Version 7.0.78
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-27: Version 7.0.77
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-27: Version 7.0.76
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-27: Version 7.0.75
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-27: Version 7.0.74
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-26: Version 7.0.73
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-26: Version 7.0.72
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-26: Version 7.0.71
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-26: Version 7.0.70
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-26: Version 7.0.69
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-26: Version 7.0.68
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-26: Version 7.0.67
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-26: Version 7.0.66
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-26: Version 7.0.65
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-26: Version 7.0.64
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-26: Version 7.0.63
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-26: Version 7.0.62
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-26: Version 7.0.61
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-26: Version 7.0.60
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-25: Version 7.0.59
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-25: Version 7.0.58
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-25: Version 7.0.57
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-25: Version 7.0.56
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-25: Version 7.0.55
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-25: Version 7.0.54
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-25: Version 7.0.53
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-25: Version 7.0.52
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-25: Version 7.0.51
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-25: Version 7.0.50
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-25: Version 7.0.49
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-25: Version 7.0.48
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-25: Version 7.0.47
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-25: Version 7.0.46
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-25: Version 7.0.45
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-25: Version 7.0.44
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-25: Version 7.0.43
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.42
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.41
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.40
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.39
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.38
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.37
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.36
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.35
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.34
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.33
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.32
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.31
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.30
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.29
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.28
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.27
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.26
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.25
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.24
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.23
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.22
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.21
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.20
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.19
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.18
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.17
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.16
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.15
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.14
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.13
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.12
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.11
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.10
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-24: Version 7.0.9
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-23: Version 7.0.8
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-23: Version 7.0.7
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-23: Version 7.0.6
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-23: Version 7.0.5
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-23: Version 7.0.4
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-23: Version 7.0.3
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-23: Version 7.0.2
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-20: Version 7.0.1
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-19: Version 6.9.454
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-19: Version 6.9.453
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-19: Version 6.9.452
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.451
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.450
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.449
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.448
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.447
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.446
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.445
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.444
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.443
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.442
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.441
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.440
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.439
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.438
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.437
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.436
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.435
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.434
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.433
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.432
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.431
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.430
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-18: Version 6.9.429
+
+ Performance and stability improvements on all platforms.
+
+
+2018-07-17: Version 6.9.428
+
+ Performance and stability improvements on all platforms.
+
+
2018-07-17: Version 6.9.427
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index cb9a7fe536..36d9d0eaeb 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -8,25 +8,26 @@ vars = {
'download_gcmole': False,
'download_jsfunfuzz': False,
'download_mips_toolchain': False,
+ 'check_v8_header_includes': False,
}
deps = {
'v8/build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + '7315579e388589b62236ad933f09afd1e838d234',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + 'dd6b994b32b498e9e766ce60c44da0aec3a2a188',
'v8/tools/gyp':
Var('chromium_url') + '/external/gyp.git' + '@' + 'd61a9397e668fa9843c4aa7da9e79460fe590bfb',
'v8/third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'fb734036f4b5ae6d5afc63cbfc41d3a5d1c29a82',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '1aa405fd859a3bd625b0d61184d6e4a3cf95c0b4',
'v8/third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'a9a2bd3ee4f1d313651c5272252aaf2a3e7ed529',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'a191af9d025859e8368b8b469120d78006e9f5f6',
'v8/third_party/instrumented_libraries':
- Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '323cf32193caecbf074d1a0cb5b02b905f163e0f',
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'd8cf40c4592dcec7fb01fcbdf1f6d4958b3fbf11',
'v8/buildtools':
- Var('chromium_url') + '/chromium/buildtools.git' + '@' + '0dd5c6f980d22be96b728155249df2da355989d9',
+ Var('chromium_url') + '/chromium/buildtools.git' + '@' + '2dff9c9c74e9d732e6fe57c84ef7fd044cc45d96',
'v8/base/trace_event/common':
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '211b3ed9d0481b4caddbee1322321b86a483ca1f',
'v8/third_party/android_ndk': {
- 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '5cd86312e794bdf542a3685c6f10cbb96072990b',
+ 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '4e2cea441bfd43f0863d14f57b1e1844260b9884',
'condition': 'checkout_android',
},
'v8/third_party/android_tools': {
@@ -34,7 +35,7 @@ deps = {
'condition': 'checkout_android',
},
'v8/third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + 'f5342c4cf3d3e85e43be84c22bdfd8ebff23ec70',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + 'bc2c0a9307285fa36e03e7cdb6bf8623390ff855',
'condition': 'checkout_android',
},
'v8/third_party/colorama/src': {
@@ -42,19 +43,19 @@ deps = {
'condition': 'checkout_android',
},
'v8/third_party/fuchsia-sdk': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '82277014aeccc89bae4d7a317813affa3f7de0ee',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '3ec92c896bcbddc46e2a073ebfdd25aa1194656e',
'condition': 'checkout_fuchsia',
},
'v8/third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'ce468a17c434e4e79724396ee1b51d86bfc8a88b',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'd5266326752f0a1dadbd310932d8f4fd8c3c5e7d',
'v8/third_party/jinja2':
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'b41863e42637544c2941b574c7877d3e1f663e25',
'v8/third_party/markupsafe':
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783',
'v8/third_party/proguard':
- Var('chromium_url') + '/chromium/src/third_party/proguard.git' + '@' + 'eba7a98d98735b2cc65c54d36baa5c9b46fe4f8e',
+ Var('chromium_url') + '/chromium/src/third_party/proguard.git' + '@' + 'a3729bea473bb5ffc5eaf289f5733bc5e2861c07',
'v8/tools/swarming_client':
- Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '9a518d097dca20b7b00ce3bdfc5d418ccc79893a',
+ Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '486c9b53c4d54dd4b95bb6ce0e31160e600dfc11',
'v8/test/benchmarks/data':
Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f',
'v8/test/mozilla/data':
@@ -63,12 +64,22 @@ deps = {
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'a6c1d05ac4fed084fa047e4c52ab2a8c9c2a8aef',
'v8/test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '0f2acdd882c84cff43b9d60df7574a1901e2cdcd',
+ 'v8/third_party/qemu': {
+ 'packages': [
+ {
+ 'package': 'fuchsia/qemu/linux-amd64',
+ 'version': '9cc486c5b18a0be515c39a280ca9a309c54cf994'
+ },
+ ],
+ 'condition': 'checkout_fuchsia',
+ 'dep_type': 'cipd',
+ },
'v8/tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'c0b1d892b2bc1291eb287d716ca239c1b03fb215',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'bb4146fb8a9dde405b71914657bb461dc93912ab',
'v8/tools/luci-go':
- Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + 'abcd908f74fdb155cc8870f5cae48dff1ece7c3c',
+ Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + '445d7c4b6a4f10e188edb395b132e3996b127691',
'v8/test/wasm-js':
- Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '2113ea7e106f8a964e0445ba38f289d2aa845edd',
+ Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '240ea673de6e75d78ae472f66127301ecab22a99',
}
recursedeps = [
@@ -333,6 +344,13 @@ hooks = [
'condition': 'checkout_win',
'action': ['python', 'v8/build/vs_toolchain.py', 'update'],
},
+ {
+ # Update the Mac toolchain if necessary.
+ 'name': 'mac_toolchain',
+ 'pattern': '.',
+ 'condition': 'checkout_mac',
+ 'action': ['python', 'v8/build/mac_toolchain.py'],
+ },
# Pull binutils for linux, enabled debug fission for faster linking /
# debugging when used with clang on Ubuntu Precise.
# https://code.google.com/p/chromium/issues/detail?id=352046
@@ -354,6 +372,13 @@ hooks = [
'action': ['python', 'v8/tools/clang/scripts/update.py'],
},
{
+ # Update LASTCHANGE.
+ 'name': 'lastchange',
+ 'pattern': '.',
+ 'action': ['python', 'v8/build/util/lastchange.py',
+ '-o', 'v8/build/util/LASTCHANGE'],
+ },
+ {
'name': 'fuchsia_sdk',
'pattern': '.',
'condition': 'checkout_fuchsia',
@@ -385,4 +410,13 @@ hooks = [
'-vpython-tool', 'install',
],
},
+ {
+ 'name': 'check_v8_header_includes',
+ 'pattern': '.',
+ 'condition': 'check_v8_header_includes',
+ 'action': [
+ 'python',
+ 'v8/tools/generate-header-include-checks.py',
+ ],
+ },
]
diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS
index a7ac912c0a..d20fb79fe1 100644
--- a/deps/v8/include/OWNERS
+++ b/deps/v8/include/OWNERS
@@ -7,7 +7,9 @@ yangguo@chromium.org
per-file v8-inspector.h=dgozman@chromium.org
per-file v8-inspector.h=pfeldman@chromium.org
+per-file v8-inspector.h=kozyatinskiy@chromium.org
per-file v8-inspector-protocol.h=dgozman@chromium.org
per-file v8-inspector-protocol.h=pfeldman@chromium.org
+per-file v8-inspector-protocol.h=kozyatinskiy@chromium.org
# COMPONENT: Blink>JavaScript>API
diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h
index a381b97f88..2b167cb9e5 100644
--- a/deps/v8/include/libplatform/libplatform.h
+++ b/deps/v8/include/libplatform/libplatform.h
@@ -62,11 +62,6 @@ V8_PLATFORM_EXPORT bool PumpMessageLoop(
v8::Platform* platform, v8::Isolate* isolate,
MessageLoopBehavior behavior = MessageLoopBehavior::kDoNotWait);
-V8_PLATFORM_EXPORT V8_DEPRECATED(
- "This function has become obsolete and is essentially a nop",
- void EnsureEventLoopInitialized(v8::Platform* platform,
- v8::Isolate* isolate));
-
/**
* Runs pending idle tasks for at most |idle_time_in_seconds| seconds.
*
diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h
index ad04d01bd2..e06963949a 100644
--- a/deps/v8/include/v8-inspector.h
+++ b/deps/v8/include/v8-inspector.h
@@ -245,6 +245,8 @@ class V8_EXPORT V8Inspector {
virtual void contextCreated(const V8ContextInfo&) = 0;
virtual void contextDestroyed(v8::Local<v8::Context>) = 0;
virtual void resetContextGroup(int contextGroupId) = 0;
+ virtual v8::MaybeLocal<v8::Context> contextById(int groupId,
+ v8::Maybe<int> contextId) = 0;
// Various instrumentation.
virtual void idleStarted() = 0;
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index b486683c27..9981061a44 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -47,24 +47,6 @@ template class V8_EXPORT std::vector<v8::CpuProfileDeoptInfo>;
namespace v8 {
-/**
- * TracingCpuProfiler monitors tracing being enabled/disabled
- * and emits CpuProfile trace events once v8.cpu_profiler tracing category
- * is enabled. It has no overhead unless the category is enabled.
- */
-class V8_EXPORT TracingCpuProfiler {
- public:
- V8_DEPRECATED(
- "The profiler is created automatically with the isolate.\n"
- "No need to create it explicitly.",
- static std::unique_ptr<TracingCpuProfiler> Create(Isolate*));
-
- virtual ~TracingCpuProfiler() = default;
-
- protected:
- TracingCpuProfiler() = default;
-};
-
// TickSample captures the information collected for each sample.
struct TickSample {
// Internal profiling (with --prof + tools/$OS-tick-processor) wants to
@@ -915,7 +897,7 @@ class V8_EXPORT HeapProfiler {
"Use AddBuildEmbedderGraphCallback to provide info about embedder nodes",
void SetGetRetainerInfosCallback(GetRetainerInfosCallback callback));
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Use AddBuildEmbedderGraphCallback to provide info about embedder nodes",
void SetBuildEmbedderGraphCallback(
LegacyBuildEmbedderGraphCallback callback));
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 57fbc62964..b4c837cda4 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -8,10 +8,10 @@
// These macros define the version number for the current version.
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
-#define V8_MAJOR_VERSION 6
-#define V8_MINOR_VERSION 9
-#define V8_BUILD_NUMBER 427
-#define V8_PATCH_LEVEL 23
+#define V8_MAJOR_VERSION 7
+#define V8_MINOR_VERSION 0
+#define V8_BUILD_NUMBER 276
+#define V8_PATCH_LEVEL 20
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 20a65afcbc..63edc67edf 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -155,7 +155,7 @@ class FunctionCallbackArguments;
class GlobalHandles;
namespace wasm {
-class CompilationResultResolver;
+class NativeModule;
class StreamingDecoder;
} // namespace wasm
@@ -203,7 +203,7 @@ struct SmiTagging<4> {
V8_INLINE static internal::Object* IntToSmi(int value) {
return internal::IntToSmi<kSmiShiftSize>(value);
}
- V8_INLINE static bool IsValidSmi(intptr_t value) {
+ V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
// To be representable as an tagged small integer, the two
// most-significant bits of 'value' must be either 00 or 11 due to
// sign-extension. To check this we add 01 to the two
@@ -233,7 +233,7 @@ struct SmiTagging<8> {
V8_INLINE static internal::Object* IntToSmi(int value) {
return internal::IntToSmi<kSmiShiftSize>(value);
}
- V8_INLINE static bool IsValidSmi(intptr_t value) {
+ V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
// To be representable as a long smi, the value must be a 32-bit integer.
return (value == static_cast<int32_t>(value));
}
@@ -1125,10 +1125,6 @@ class V8_EXPORT PrimitiveArray {
int Length() const;
void Set(Isolate* isolate, int index, Local<Primitive> item);
Local<Primitive> Get(Isolate* isolate, int index);
-
- V8_DEPRECATE_SOON("Use Isolate version",
- void Set(int index, Local<Primitive> item));
- V8_DEPRECATE_SOON("Use Isolate version", Local<Primitive> Get(int index));
};
/**
@@ -1356,23 +1352,15 @@ class V8_EXPORT Script {
/**
* A shorthand for ScriptCompiler::Compile().
*/
- static V8_DEPRECATED("Use maybe version",
- Local<Script> Compile(Local<String> source,
- ScriptOrigin* origin = nullptr));
static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
Local<Context> context, Local<String> source,
ScriptOrigin* origin = nullptr);
- static Local<Script> V8_DEPRECATED("Use maybe version",
- Compile(Local<String> source,
- Local<String> file_name));
-
/**
* Runs the script returning the resulting value. It will be run in the
* context in which it was created (ScriptCompiler::CompileBound or
* UnboundScript::BindToCurrentContext()).
*/
- V8_DEPRECATED("Use maybe version", Local<Value> Run());
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Run(Local<Context> context);
/**
@@ -1688,13 +1676,6 @@ class V8_EXPORT ScriptCompiler {
* It is possible to specify multiple context extensions (obj in the above
* example).
*/
- static V8_DEPRECATED("Use maybe version",
- Local<Function> CompileFunctionInContext(
- Isolate* isolate, Source* source,
- Local<Context> context, size_t arguments_count,
- Local<String> arguments[],
- size_t context_extension_count,
- Local<Object> context_extensions[]));
static V8_WARN_UNUSED_RESULT MaybeLocal<Function> CompileFunctionInContext(
Local<Context> context, Source* source, size_t arguments_count,
Local<String> arguments[], size_t context_extension_count,
@@ -1717,10 +1698,6 @@ class V8_EXPORT ScriptCompiler {
static CachedData* CreateCodeCache(
Local<UnboundModuleScript> unbound_module_script);
- V8_DEPRECATED("Source string is no longer required",
- static CachedData* CreateCodeCache(
- Local<UnboundScript> unbound_script, Local<String> source));
-
/**
* Creates and returns code cache for the specified function that was
* previously produced by CompileFunctionInContext.
@@ -1729,10 +1706,6 @@ class V8_EXPORT ScriptCompiler {
*/
static CachedData* CreateCodeCacheForFunction(Local<Function> function);
- V8_DEPRECATED("Source string is no longer required",
- static CachedData* CreateCodeCacheForFunction(
- Local<Function> function, Local<String> source));
-
private:
static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundInternal(
Isolate* isolate, Source* source, CompileOptions options,
@@ -1747,7 +1720,11 @@ class V8_EXPORT Message {
public:
Local<String> Get() const;
- V8_DEPRECATED("Use maybe version", Local<String> GetSourceLine() const);
+ /**
+ * Return the isolate to which the Message belongs.
+ */
+ Isolate* GetIsolate() const;
+
V8_WARN_UNUSED_RESULT MaybeLocal<String> GetSourceLine(
Local<Context> context) const;
@@ -1773,7 +1750,6 @@ class V8_EXPORT Message {
/**
* Returns the number, 1-based, of the line where the error occurred.
*/
- V8_DEPRECATED("Use maybe version", int GetLineNumber() const);
V8_WARN_UNUSED_RESULT Maybe<int> GetLineNumber(Local<Context> context) const;
/**
@@ -1853,8 +1829,6 @@ class V8_EXPORT StackTrace {
/**
* Returns a StackFrame at a particular index.
*/
- V8_DEPRECATE_SOON("Use Isolate version",
- Local<StackFrame> GetFrame(uint32_t index) const);
Local<StackFrame> GetFrame(Isolate* isolate, uint32_t index) const;
/**
@@ -2563,13 +2537,6 @@ class V8_EXPORT Value : public Data {
V8_DEPRECATE_SOON("Use maybe version",
Local<Int32> ToInt32(Isolate* isolate) const);
- inline V8_DEPRECATE_SOON("Use maybe version",
- Local<Boolean> ToBoolean() const);
- inline V8_DEPRECATE_SOON("Use maybe version", Local<String> ToString() const);
- inline V8_DEPRECATE_SOON("Use maybe version", Local<Object> ToObject() const);
- inline V8_DEPRECATE_SOON("Use maybe version",
- Local<Integer> ToInteger() const);
-
/**
* Attempts to convert a string to an array index.
* Returns an empty handle if the conversion fails.
@@ -2585,14 +2552,7 @@ class V8_EXPORT Value : public Data {
Local<Context> context) const;
V8_WARN_UNUSED_RESULT Maybe<int32_t> Int32Value(Local<Context> context) const;
- V8_DEPRECATE_SOON("Use maybe version", bool BooleanValue() const);
- V8_DEPRECATE_SOON("Use maybe version", double NumberValue() const);
- V8_DEPRECATE_SOON("Use maybe version", int64_t IntegerValue() const);
- V8_DEPRECATE_SOON("Use maybe version", uint32_t Uint32Value() const);
- V8_DEPRECATE_SOON("Use maybe version", int32_t Int32Value() const);
-
/** JS == */
- V8_DEPRECATE_SOON("Use maybe version", bool Equals(Local<Value> that) const);
V8_WARN_UNUSED_RESULT Maybe<bool> Equals(Local<Context> context,
Local<Value> that) const;
bool StrictEquals(Local<Value> that) const;
@@ -2699,8 +2659,6 @@ class V8_EXPORT String : public Name {
* Returns the number of bytes in the UTF-8 encoded
* representation of this string.
*/
- V8_DEPRECATE_SOON("Use Isolate version instead", int Utf8Length() const);
-
int Utf8Length(Isolate* isolate) const;
/**
@@ -2757,23 +2715,12 @@ class V8_EXPORT String : public Name {
// 16-bit character codes.
int Write(Isolate* isolate, uint16_t* buffer, int start = 0, int length = -1,
int options = NO_OPTIONS) const;
- V8_DEPRECATE_SOON("Use Isolate* version",
- int Write(uint16_t* buffer, int start = 0, int length = -1,
- int options = NO_OPTIONS) const);
// One byte characters.
int WriteOneByte(Isolate* isolate, uint8_t* buffer, int start = 0,
int length = -1, int options = NO_OPTIONS) const;
- V8_DEPRECATE_SOON("Use Isolate* version",
- int WriteOneByte(uint8_t* buffer, int start = 0,
- int length = -1, int options = NO_OPTIONS)
- const);
// UTF-8 encoded characters.
int WriteUtf8(Isolate* isolate, char* buffer, int length = -1,
int* nchars_ref = NULL, int options = NO_OPTIONS) const;
- V8_DEPRECATE_SOON("Use Isolate* version",
- int WriteUtf8(char* buffer, int length = -1,
- int* nchars_ref = NULL,
- int options = NO_OPTIONS) const);
/**
* A zero length string.
@@ -2937,9 +2884,6 @@ class V8_EXPORT String : public Name {
*/
static Local<String> Concat(Isolate* isolate, Local<String> left,
Local<String> right);
- static V8_DEPRECATE_SOON("Use Isolate* version",
- Local<String> Concat(Local<String> left,
- Local<String> right));
/**
* Creates a new external string using the data defined in the given
@@ -2995,6 +2939,11 @@ class V8_EXPORT String : public Name {
bool CanMakeExternal();
/**
+ * Returns true if the strings values are equal. Same as JS ==/===.
+ */
+ bool StringEquals(Local<String> str);
+
+ /**
* Converts an object to a UTF-8-encoded character array. Useful if
* you want to print the object. If conversion to a string fails
* (e.g. due to an exception in the toString() method of the object)
@@ -3003,8 +2952,6 @@ class V8_EXPORT String : public Name {
*/
class V8_EXPORT Utf8Value {
public:
- V8_DEPRECATED("Use Isolate version",
- explicit Utf8Value(Local<v8::Value> obj));
Utf8Value(Isolate* isolate, Local<v8::Value> obj);
~Utf8Value();
char* operator*() { return str_; }
@@ -3028,7 +2975,6 @@ class V8_EXPORT String : public Name {
*/
class V8_EXPORT Value {
public:
- V8_DEPRECATED("Use Isolate version", explicit Value(Local<v8::Value> obj));
Value(Isolate* isolate, Local<v8::Value> obj);
~Value();
uint16_t* operator*() { return str_; }
@@ -3048,6 +2994,12 @@ class V8_EXPORT String : public Name {
void VerifyExternalStringResourceBase(ExternalStringResourceBase* v,
Encoding encoding) const;
void VerifyExternalStringResource(ExternalStringResource* val) const;
+ ExternalStringResource* GetExternalStringResourceSlow() const;
+ ExternalStringResourceBase* GetExternalStringResourceBaseSlow(
+ String::Encoding* encoding_out) const;
+ const ExternalOneByteStringResource* GetExternalOneByteStringResourceSlow()
+ const;
+
static void CheckCast(v8::Value* obj);
};
@@ -4204,8 +4156,6 @@ class V8_EXPORT Promise : public Object {
/**
* Create a new resolver, along with an associated promise in pending state.
*/
- static V8_DEPRECATED("Use maybe version",
- Local<Resolver> New(Isolate* isolate));
static V8_WARN_UNUSED_RESULT MaybeLocal<Resolver> New(
Local<Context> context);
@@ -4218,11 +4168,9 @@ class V8_EXPORT Promise : public Object {
* Resolve/reject the associated promise with a given value.
* Ignored if the promise is no longer pending.
*/
- V8_DEPRECATED("Use maybe version", void Resolve(Local<Value> value));
V8_WARN_UNUSED_RESULT Maybe<bool> Resolve(Local<Context> context,
Local<Value> value);
- V8_DEPRECATED("Use maybe version", void Reject(Local<Value> value));
V8_WARN_UNUSED_RESULT Maybe<bool> Reject(Local<Context> context,
Local<Value> value);
@@ -4375,9 +4323,9 @@ class V8_EXPORT WasmCompiledModule : public Object {
public:
typedef std::pair<std::unique_ptr<const uint8_t[]>, size_t> SerializedModule;
-// The COMMA macro allows us to use ',' inside of the V8_DEPRECATE_SOON macro.
+// The COMMA macro allows us to use ',' inside of the V8_DEPRECATED macro.
#define COMMA ,
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Use BufferReference.",
typedef std::pair<const uint8_t * COMMA size_t> CallerOwnedBuffer);
#undef COMMA
@@ -4391,10 +4339,10 @@ class V8_EXPORT WasmCompiledModule : public Object {
BufferReference(const uint8_t* start, size_t size)
: start(start), size(size) {}
// Temporarily allow conversion to and from CallerOwnedBuffer.
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Use BufferReference directly.",
inline BufferReference(CallerOwnedBuffer)); // NOLINT(runtime/explicit)
- V8_DEPRECATE_SOON("Use BufferReference directly.",
+ V8_DEPRECATED("Use BufferReference directly.",
inline operator CallerOwnedBuffer());
};
@@ -4411,13 +4359,17 @@ class V8_EXPORT WasmCompiledModule : public Object {
TransferrableModule& operator=(const TransferrableModule& src) = delete;
private:
+ typedef std::shared_ptr<internal::wasm::NativeModule> SharedModule;
typedef std::pair<std::unique_ptr<const uint8_t[]>, size_t> OwnedBuffer;
friend class WasmCompiledModule;
- TransferrableModule(OwnedBuffer code, OwnedBuffer bytes)
- : compiled_code(std::move(code)), wire_bytes(std::move(bytes)) {}
-
- OwnedBuffer compiled_code = {nullptr, 0};
- OwnedBuffer wire_bytes = {nullptr, 0};
+ explicit TransferrableModule(SharedModule shared_module)
+ : shared_module_(std::move(shared_module)) {}
+ TransferrableModule(OwnedBuffer serialized, OwnedBuffer bytes)
+ : serialized_(std::move(serialized)), wire_bytes_(std::move(bytes)) {}
+
+ SharedModule shared_module_;
+ OwnedBuffer serialized_ = {nullptr, 0};
+ OwnedBuffer wire_bytes_ = {nullptr, 0};
};
/**
@@ -4438,7 +4390,7 @@ class V8_EXPORT WasmCompiledModule : public Object {
* Get the wasm-encoded bytes that were used to compile this module.
*/
BufferReference GetWasmWireBytesRef();
- V8_DEPRECATE_SOON("Use GetWasmWireBytesRef version.",
+ V8_DEPRECATED("Use GetWasmWireBytesRef version.",
Local<String> GetWasmWireBytes());
/**
@@ -4472,7 +4424,7 @@ class V8_EXPORT WasmCompiledModule : public Object {
static void CheckCast(Value* obj);
};
-// TODO(clemensh): Remove after M69 branch.
+// TODO(clemensh): Remove after M70 branch.
WasmCompiledModule::BufferReference::BufferReference(
WasmCompiledModule::CallerOwnedBuffer buf)
: BufferReference(buf.first, buf.second) {}
@@ -4645,17 +4597,22 @@ class V8_EXPORT ArrayBuffer : public Object {
* returns an instance of this class, populated, with a pointer to data
* and byte length.
*
- * The Data pointer of ArrayBuffer::Contents is always allocated with
- * Allocator::Allocate that is set via Isolate::CreateParams.
+ * The Data pointer of ArrayBuffer::Contents must be freed using the provided
+ * deleter, which will call ArrayBuffer::Allocator::Free if the buffer
+ * was allocated with ArraryBuffer::Allocator::Allocate.
*/
class V8_EXPORT Contents { // NOLINT
public:
+ using DeleterCallback = void (*)(void* buffer, size_t length, void* info);
+
Contents()
: data_(nullptr),
byte_length_(0),
allocation_base_(nullptr),
allocation_length_(0),
- allocation_mode_(Allocator::AllocationMode::kNormal) {}
+ allocation_mode_(Allocator::AllocationMode::kNormal),
+ deleter_(nullptr),
+ deleter_data_(nullptr) {}
void* AllocationBase() const { return allocation_base_; }
size_t AllocationLength() const { return allocation_length_; }
@@ -4665,13 +4622,22 @@ class V8_EXPORT ArrayBuffer : public Object {
void* Data() const { return data_; }
size_t ByteLength() const { return byte_length_; }
+ DeleterCallback Deleter() const { return deleter_; }
+ void* DeleterData() const { return deleter_data_; }
private:
+ Contents(void* data, size_t byte_length, void* allocation_base,
+ size_t allocation_length,
+ Allocator::AllocationMode allocation_mode, DeleterCallback deleter,
+ void* deleter_data);
+
void* data_;
size_t byte_length_;
void* allocation_base_;
size_t allocation_length_;
Allocator::AllocationMode allocation_mode_;
+ DeleterCallback deleter_;
+ void* deleter_data_;
friend class ArrayBuffer;
};
@@ -4728,8 +4694,9 @@ class V8_EXPORT ArrayBuffer : public Object {
* had been externalized, it does no longer own the memory block. The caller
* should take steps to free memory when it is no longer needed.
*
- * The memory block is guaranteed to be allocated with |Allocator::Allocate|
- * that has been set via Isolate::CreateParams.
+ * The Data pointer of ArrayBuffer::Contents must be freed using the provided
+ * deleter, which will call ArrayBuffer::Allocator::Free if the buffer
+ * was allocated with ArraryBuffer::Allocator::Allocate.
*/
Contents Externalize();
@@ -4740,8 +4707,6 @@ class V8_EXPORT ArrayBuffer : public Object {
*
* The embedder should make sure to hold a strong reference to the
* ArrayBuffer while accessing this pointer.
- *
- * The memory block is guaranteed to be allocated with |Allocator::Allocate|.
*/
Contents GetContents();
@@ -5048,41 +5013,54 @@ class V8_EXPORT SharedArrayBuffer : public Object {
* |SharedArrayBuffer| returns an instance of this class, populated, with a
* pointer to data and byte length.
*
- * The Data pointer of SharedArrayBuffer::Contents is always allocated with
- * |ArrayBuffer::Allocator::Allocate| by the allocator specified in
- * v8::Isolate::CreateParams::array_buffer_allocator.
+ * The Data pointer of ArrayBuffer::Contents must be freed using the provided
+ * deleter, which will call ArrayBuffer::Allocator::Free if the buffer
+ * was allocated with ArraryBuffer::Allocator::Allocate.
*
* This API is experimental and may change significantly.
*/
class V8_EXPORT Contents { // NOLINT
public:
+ using Allocator = v8::ArrayBuffer::Allocator;
+ using DeleterCallback = void (*)(void* buffer, size_t length, void* info);
+
Contents()
: data_(nullptr),
byte_length_(0),
allocation_base_(nullptr),
allocation_length_(0),
- allocation_mode_(ArrayBuffer::Allocator::AllocationMode::kNormal) {}
+ allocation_mode_(Allocator::AllocationMode::kNormal),
+ deleter_(nullptr),
+ deleter_data_(nullptr) {}
void* AllocationBase() const { return allocation_base_; }
size_t AllocationLength() const { return allocation_length_; }
- ArrayBuffer::Allocator::AllocationMode AllocationMode() const {
+ Allocator::AllocationMode AllocationMode() const {
return allocation_mode_;
}
void* Data() const { return data_; }
size_t ByteLength() const { return byte_length_; }
+ DeleterCallback Deleter() const { return deleter_; }
+ void* DeleterData() const { return deleter_data_; }
private:
+ Contents(void* data, size_t byte_length, void* allocation_base,
+ size_t allocation_length,
+ Allocator::AllocationMode allocation_mode, DeleterCallback deleter,
+ void* deleter_data);
+
void* data_;
size_t byte_length_;
void* allocation_base_;
size_t allocation_length_;
- ArrayBuffer::Allocator::AllocationMode allocation_mode_;
+ Allocator::AllocationMode allocation_mode_;
+ DeleterCallback deleter_;
+ void* deleter_data_;
friend class SharedArrayBuffer;
};
-
/**
* Data length in bytes.
*/
@@ -5239,8 +5217,6 @@ class V8_EXPORT BooleanObject : public Object {
class V8_EXPORT StringObject : public Object {
public:
static Local<Value> New(Isolate* isolate, Local<String> value);
- static V8_DEPRECATE_SOON("Use Isolate* version",
- Local<Value> New(Local<String> value));
Local<String> ValueOf() const;
@@ -5296,8 +5272,6 @@ class V8_EXPORT RegExp : public Object {
* static_cast<RegExp::Flags>(kGlobal | kMultiline))
* is equivalent to evaluating "/foo/gm".
*/
- static V8_DEPRECATED("Use maybe version",
- Local<RegExp> New(Local<String> pattern, Flags flags));
static V8_WARN_UNUSED_RESULT MaybeLocal<RegExp> New(Local<Context> context,
Local<String> pattern,
Flags flags);
@@ -5441,57 +5415,8 @@ class V8_EXPORT Template : public Data {
friend class FunctionTemplate;
};
-
-/**
- * NamedProperty[Getter|Setter] are used as interceptors on object.
- * See ObjectTemplate::SetNamedPropertyHandler.
- */
-typedef void (*NamedPropertyGetterCallback)(
- Local<String> property,
- const PropertyCallbackInfo<Value>& info);
-
-
-/**
- * Returns the value if the setter intercepts the request.
- * Otherwise, returns an empty handle.
- */
-typedef void (*NamedPropertySetterCallback)(
- Local<String> property,
- Local<Value> value,
- const PropertyCallbackInfo<Value>& info);
-
-
-/**
- * Returns a non-empty handle if the interceptor intercepts the request.
- * The result is an integer encoding property attributes (like v8::None,
- * v8::DontEnum, etc.)
- */
-typedef void (*NamedPropertyQueryCallback)(
- Local<String> property,
- const PropertyCallbackInfo<Integer>& info);
-
-
-/**
- * Returns a non-empty handle if the deleter intercepts the request.
- * The return value is true if the property could be deleted and false
- * otherwise.
- */
-typedef void (*NamedPropertyDeleterCallback)(
- Local<String> property,
- const PropertyCallbackInfo<Boolean>& info);
-
-/**
- * Returns an array containing the names of the properties the named
- * property getter intercepts.
- *
- * Note: The values in the array must be of type v8::Name.
- */
-typedef void (*NamedPropertyEnumeratorCallback)(
- const PropertyCallbackInfo<Array>& info);
-
-
-// TODO(dcarney): Deprecate and remove previous typedefs, and replace
-// GenericNamedPropertyFooCallback with just NamedPropertyFooCallback.
+// TODO(dcarney): Replace GenericNamedPropertyFooCallback with just
+// NamedPropertyFooCallback.
/**
* Interceptor for get requests on an object.
@@ -6200,39 +6125,6 @@ class V8_EXPORT ObjectTemplate : public Template {
/**
* Sets a named property handler on the object template.
*
- * Whenever a property whose name is a string is accessed on objects created
- * from this object template, the provided callback is invoked instead of
- * accessing the property directly on the JavaScript object.
- *
- * SetNamedPropertyHandler() is different from SetHandler(), in
- * that the latter can intercept symbol-named properties as well as
- * string-named properties when called with a
- * NamedPropertyHandlerConfiguration. New code should use SetHandler().
- *
- * \param getter The callback to invoke when getting a property.
- * \param setter The callback to invoke when setting a property.
- * \param query The callback to invoke to check if a property is present,
- * and if present, get its attributes.
- * \param deleter The callback to invoke when deleting a property.
- * \param enumerator The callback to invoke to enumerate all the named
- * properties of an object.
- * \param data A piece of data that will be passed to the callbacks
- * whenever they are invoked.
- */
- V8_DEPRECATED(
- "Use SetHandler(const NamedPropertyHandlerConfiguration) "
- "with the kOnlyInterceptStrings flag set.",
- void SetNamedPropertyHandler(
- NamedPropertyGetterCallback getter,
- NamedPropertySetterCallback setter = 0,
- NamedPropertyQueryCallback query = 0,
- NamedPropertyDeleterCallback deleter = 0,
- NamedPropertyEnumeratorCallback enumerator = 0,
- Local<Value> data = Local<Value>()));
-
- /**
- * Sets a named property handler on the object template.
- *
* Whenever a property whose name is a string or a symbol is accessed on
* objects created from this object template, the provided callback is
* invoked instead of accessing the property directly on the JavaScript
@@ -6401,9 +6293,8 @@ class V8_EXPORT AccessorSignature : public Data {
// --- Extensions ---
-V8_DEPRECATE_SOON("Implementation detail",
- class ExternalOneByteStringResourceImpl);
-class V8_EXPORT ExternalOneByteStringResourceImpl
+V8_DEPRECATE_SOON("Implementation detail", class)
+V8_EXPORT ExternalOneByteStringResourceImpl
: public String::ExternalOneByteStringResource {
public:
ExternalOneByteStringResourceImpl() : data_(0), length_(0) {}
@@ -6613,7 +6504,6 @@ typedef void (*AddHistogramSampleCallback)(void* histogram, int sample);
// --- Enter/Leave Script Callback ---
typedef void (*BeforeCallEnteredCallback)(Isolate*);
typedef void (*CallCompletedCallback)(Isolate*);
-typedef void (*DeprecatedCallCompletedCallback)();
/**
* HostImportModuleDynamicallyCallback is called when we require the
@@ -6787,6 +6677,9 @@ typedef void (*ApiImplementationCallback)(const FunctionCallbackInfo<Value>&);
// --- Callback for WebAssembly.compileStreaming ---
typedef void (*WasmStreamingCallback)(const FunctionCallbackInfo<Value>&);
+// --- Callback for checking if WebAssembly threads are enabled ---
+typedef bool (*WasmThreadsEnabledCallback)(Local<Context> context);
+
// --- Garbage Collection Callbacks ---
/**
@@ -6859,6 +6752,7 @@ class V8_EXPORT HeapStatistics {
size_t used_heap_size() { return used_heap_size_; }
size_t heap_size_limit() { return heap_size_limit_; }
size_t malloced_memory() { return malloced_memory_; }
+ size_t external_memory() { return external_memory_; }
size_t peak_malloced_memory() { return peak_malloced_memory_; }
size_t number_of_native_contexts() { return number_of_native_contexts_; }
size_t number_of_detached_contexts() { return number_of_detached_contexts_; }
@@ -6877,6 +6771,7 @@ class V8_EXPORT HeapStatistics {
size_t used_heap_size_;
size_t heap_size_limit_;
size_t malloced_memory_;
+ size_t external_memory_;
size_t peak_malloced_memory_;
bool does_zap_garbage_;
size_t number_of_native_contexts_;
@@ -7102,18 +6997,21 @@ class V8_EXPORT PersistentHandleVisitor { // NOLINT
enum class MemoryPressureLevel { kNone, kModerate, kCritical };
/**
- * Interface for tracing through the embedder heap. During a v8 garbage
- * collection, v8 collects hidden fields of all potential wrappers, and at the
+ * Interface for tracing through the embedder heap. During a V8 garbage
+ * collection, V8 collects hidden fields of all potential wrappers, and at the
* end of its marking phase iterates the collection and asks the embedder to
* trace through its heap and use reporter to report each JavaScript object
* reachable from any of the given wrappers.
- *
- * Before the first call to the TraceWrappersFrom function TracePrologue will be
- * called. When the garbage collection cycle is finished, TraceEpilogue will be
- * called.
*/
class V8_EXPORT EmbedderHeapTracer {
public:
+ // Indicator for the stack state of the embedder.
+ enum EmbedderStackState {
+ kUnknown,
+ kNonEmpty,
+ kEmpty,
+ };
+
enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
struct AdvanceTracingActions {
@@ -7123,6 +7021,8 @@ class V8_EXPORT EmbedderHeapTracer {
ForceCompletionAction force_completion;
};
+ virtual ~EmbedderHeapTracer() = default;
+
/**
* Called by v8 to register internal fields of found wrappers.
*
@@ -7138,7 +7038,7 @@ class V8_EXPORT EmbedderHeapTracer {
virtual void TracePrologue() = 0;
/**
- * Called to to make a tracing step in the embedder.
+ * Called to make a tracing step in the embedder.
*
* The embedder is expected to trace its heap starting from wrappers reported
* by RegisterV8References method, and report back all reachable wrappers.
@@ -7146,15 +7046,36 @@ class V8_EXPORT EmbedderHeapTracer {
* deadline.
*
* Returns true if there is still work to do.
+ *
+ * Note: Only one of the AdvanceTracing methods needs to be overriden by the
+ * embedder.
+ */
+ V8_DEPRECATE_SOON("Use void AdvanceTracing(deadline_in_ms)",
+ virtual bool AdvanceTracing(
+ double deadline_in_ms, AdvanceTracingActions actions)) {
+ return false;
+ }
+
+ /**
+ * Called to advance tracing in the embedder.
+ *
+ * The embedder is expected to trace its heap starting from wrappers reported
+ * by RegisterV8References method, and report back all reachable wrappers.
+ * Furthermore, the embedder is expected to stop tracing by the given
+ * deadline. A deadline of infinity means that tracing should be finished.
+ *
+ * Returns |true| if tracing is done, and false otherwise.
+ *
+ * Note: Only one of the AdvanceTracing methods needs to be overriden by the
+ * embedder.
*/
- virtual bool AdvanceTracing(double deadline_in_ms,
- AdvanceTracingActions actions) = 0;
+ virtual bool AdvanceTracing(double deadline_in_ms);
/*
* Returns true if there no more tracing work to be done (see AdvanceTracing)
* and false otherwise.
*/
- virtual bool IsTracingDone() { return NumberOfWrappersToTrace() == 0; }
+ virtual bool IsTracingDone();
/**
* Called at the end of a GC cycle.
@@ -7166,8 +7087,13 @@ class V8_EXPORT EmbedderHeapTracer {
/**
* Called upon entering the final marking pause. No more incremental marking
* steps will follow this call.
+ *
+ * Note: Only one of the EnterFinalPause methods needs to be overriden by the
+ * embedder.
*/
- virtual void EnterFinalPause() = 0;
+ V8_DEPRECATE_SOON("Use void EnterFinalPause(EmbedderStackState)",
+ virtual void EnterFinalPause()) {}
+ virtual void EnterFinalPause(EmbedderStackState stack_state);
/**
* Called when tracing is aborted.
@@ -7178,7 +7104,7 @@ class V8_EXPORT EmbedderHeapTracer {
virtual void AbortTracing() = 0;
/*
- * Called by the embedder to request immediaet finalization of the currently
+ * Called by the embedder to request immediate finalization of the currently
* running tracing phase that has been started with TracePrologue and not
* yet finished with TraceEpilogue.
*
@@ -7189,6 +7115,13 @@ class V8_EXPORT EmbedderHeapTracer {
void FinalizeTracing();
/*
+ * Called by the embedder to immediately perform a full garbage collection.
+ *
+ * Should only be used in testing code.
+ */
+ void GarbageCollectionForTesting(EmbedderStackState stack_state);
+
+ /*
* Returns the v8::Isolate this tracer is attached too and |nullptr| if it
* is not attached to any v8::Isolate.
*/
@@ -7198,11 +7131,11 @@ class V8_EXPORT EmbedderHeapTracer {
* Returns the number of wrappers that are still to be traced by the embedder.
*/
V8_DEPRECATE_SOON("Use IsTracingDone",
- virtual size_t NumberOfWrappersToTrace() { return 0; });
+ virtual size_t NumberOfWrappersToTrace()) {
+ return 0;
+ }
protected:
- virtual ~EmbedderHeapTracer() = default;
-
v8::Isolate* isolate_ = nullptr;
friend class internal::LocalEmbedderHeapTracer;
@@ -7500,6 +7433,8 @@ class V8_EXPORT Isolate {
kDeoptimizerDisableSpeculation = 47,
kArrayPrototypeSortJSArrayModifiedPrototype = 48,
kFunctionTokenOffsetTooLongForToString = 49,
+ kWasmSharedMemory = 50,
+ kWasmThreadOpcodes = 51,
// If you add new values here, you'll also need to update Chromium's:
// web_feature.mojom, UseCounterCallback.cpp, and enums.xml. V8 changes to
@@ -7768,16 +7703,6 @@ class V8_EXPORT Isolate {
*/
Local<Context> GetCurrentContext();
- /**
- * Returns the context of the calling JavaScript code. That is the
- * context of the top-most JavaScript frame. If there are no
- * JavaScript frames an empty handle is returned.
- */
- V8_DEPRECATED(
- "Calling context concept is not compatible with tail calls, and will be "
- "removed.",
- Local<Context> GetCallingContext());
-
/** Returns the last context entered through V8's C++ API. */
Local<Context> GetEnteredContext();
@@ -8029,17 +7954,11 @@ class V8_EXPORT Isolate {
* further callbacks.
*/
void AddCallCompletedCallback(CallCompletedCallback callback);
- V8_DEPRECATED(
- "Use callback with parameter",
- void AddCallCompletedCallback(DeprecatedCallCompletedCallback callback));
/**
* Removes callback that was installed by AddCallCompletedCallback.
*/
void RemoveCallCompletedCallback(CallCompletedCallback callback);
- V8_DEPRECATED("Use callback with parameter",
- void RemoveCallCompletedCallback(
- DeprecatedCallCompletedCallback callback));
/**
* Set the PromiseHook callback for various promise lifecycle
@@ -8073,14 +7992,11 @@ class V8_EXPORT Isolate {
* Controls how Microtasks are invoked. See MicrotasksPolicy for details.
*/
void SetMicrotasksPolicy(MicrotasksPolicy policy);
- V8_DEPRECATED("Use SetMicrotasksPolicy",
- void SetAutorunMicrotasks(bool autorun));
/**
* Returns the policy controlling how Microtasks are invoked.
*/
MicrotasksPolicy GetMicrotasksPolicy() const;
- V8_DEPRECATED("Use GetMicrotasksPolicy", bool WillAutorunMicrotasks() const);
/**
* Adds a callback to notify the host application after
@@ -8303,6 +8219,8 @@ class V8_EXPORT Isolate {
void SetWasmStreamingCallback(WasmStreamingCallback callback);
+ void SetWasmThreadsEnabledCallback(WasmThreadsEnabledCallback callback);
+
/**
* Check if V8 is dead and therefore unusable. This is the case after
* fatal errors such as out-of-memory situations.
@@ -8467,28 +8385,6 @@ class V8_EXPORT V8 {
static void SetNativesDataBlob(StartupData* startup_blob);
static void SetSnapshotDataBlob(StartupData* startup_blob);
- /**
- * Bootstrap an isolate and a context from scratch to create a startup
- * snapshot. Include the side-effects of running the optional script.
- * Returns { NULL, 0 } on failure.
- * The caller acquires ownership of the data array in the return value.
- */
- V8_DEPRECATED("Use SnapshotCreator",
- static StartupData CreateSnapshotDataBlob(
- const char* embedded_source = NULL));
-
- /**
- * Bootstrap an isolate and a context from the cold startup blob, run the
- * warm-up script to trigger code compilation. The side effects are then
- * discarded. The resulting startup snapshot will include compiled code.
- * Returns { NULL, 0 } on failure.
- * The caller acquires ownership of the data array in the return value.
- * The argument startup blob is untouched.
- */
- V8_DEPRECATED("Use SnapshotCreator",
- static StartupData WarmUpSnapshotDataBlob(
- StartupData cold_startup_blob, const char* warmup_source));
-
/** Set the callback to invoke in case of Dcheck failures. */
static void SetDcheckErrorHandler(DcheckErrorCallback that);
@@ -8956,7 +8852,6 @@ class V8_EXPORT TryCatch {
* Returns the .stack property of the thrown object. If no .stack
* property is present an empty handle is returned.
*/
- V8_DEPRECATED("Use maybe version.", Local<Value> StackTrace() const);
V8_WARN_UNUSED_RESULT MaybeLocal<Value> StackTrace(
Local<Context> context) const;
@@ -9535,7 +9430,7 @@ class Internals {
return PlatformSmiTagging::IntToSmi(value);
}
- V8_INLINE static bool IsValidSmi(intptr_t value) {
+ V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
return PlatformSmiTagging::IsValidSmi(value);
}
@@ -10209,12 +10104,13 @@ String::ExternalStringResource* String::GetExternalStringResource() const {
typedef internal::Object O;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O* const*>(this);
- String::ExternalStringResource* result;
+
+ ExternalStringResource* result;
if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) {
void* value = I::ReadField<void*>(obj, I::kStringResourceOffset);
result = reinterpret_cast<String::ExternalStringResource*>(value);
} else {
- result = NULL;
+ result = GetExternalStringResourceSlow();
}
#ifdef V8_ENABLE_CHECKS
VerifyExternalStringResource(result);
@@ -10230,14 +10126,16 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBase(
O* obj = *reinterpret_cast<O* const*>(this);
int type = I::GetInstanceType(obj) & I::kFullStringRepresentationMask;
*encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
- ExternalStringResourceBase* resource = NULL;
+ ExternalStringResourceBase* resource;
if (type == I::kExternalOneByteRepresentationTag ||
type == I::kExternalTwoByteRepresentationTag) {
void* value = I::ReadField<void*>(obj, I::kStringResourceOffset);
resource = static_cast<ExternalStringResourceBase*>(value);
+ } else {
+ resource = GetExternalStringResourceBaseSlow(encoding_out);
}
#ifdef V8_ENABLE_CHECKS
- VerifyExternalStringResourceBase(resource, *encoding_out);
+ VerifyExternalStringResourceBase(resource, *encoding_out);
#endif
return resource;
}
@@ -10318,30 +10216,6 @@ template <class T> Value* Value::Cast(T* value) {
}
-Local<Boolean> Value::ToBoolean() const {
- return ToBoolean(Isolate::GetCurrent()->GetCurrentContext())
- .FromMaybe(Local<Boolean>());
-}
-
-
-Local<String> Value::ToString() const {
- return ToString(Isolate::GetCurrent()->GetCurrentContext())
- .FromMaybe(Local<String>());
-}
-
-
-Local<Object> Value::ToObject() const {
- return ToObject(Isolate::GetCurrent()->GetCurrentContext())
- .FromMaybe(Local<Object>());
-}
-
-
-Local<Integer> Value::ToInteger() const {
- return ToInteger(Isolate::GetCurrent()->GetCurrentContext())
- .FromMaybe(Local<Integer>());
-}
-
-
Boolean* Boolean::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index d8e1f36c12..75fd5aa7e7 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -214,6 +214,8 @@
# define V8_HAS_ATTRIBUTE_ALIGNED (__has_attribute(aligned))
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
# define V8_HAS_ATTRIBUTE_DEPRECATED (__has_attribute(deprecated))
+# define V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE \
+ (__has_extension(attribute_deprecated_with_message))
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
# define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused))
# define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))
diff --git a/deps/v8/infra/config/cq.cfg b/deps/v8/infra/config/cq.cfg
index 85fb595ae7..a0ede58b91 100644
--- a/deps/v8/infra/config/cq.cfg
+++ b/deps/v8/infra/config/cq.cfg
@@ -2,7 +2,6 @@
# documentation of this file format.
version: 1
-cq_name: "v8"
cq_status_url: "https://chromium-cq-status.appspot.com"
git_repo_url: "https://chromium.googlesource.com/v8/v8"
commit_burst_delay: 60
@@ -36,6 +35,7 @@ verifiers {
triggered_by: "v8_linux64_dbg_ng"
}
builders { name: "v8_linux64_gcc_compile_dbg" }
+ builders { name: "v8_linux64_header_includes_dbg" }
builders { name: "v8_linux64_jumbo_compile_rel" }
builders { name: "v8_linux64_rel_ng" }
builders {
@@ -67,8 +67,6 @@ verifiers {
}
builders { name: "v8_linux_chromium_gn_rel" }
builders { name: "v8_linux_gcc_compile_rel" }
- builders { name: "v8_linux_mips64el_compile_rel" }
- builders { name: "v8_linux_mipsel_compile_rel" }
builders { name: "v8_linux_nodcheck_rel_ng" }
builders {
name: "v8_linux_nodcheck_rel_ng_triggered"
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index 0492cf31a5..095aeefc5c 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -69,10 +69,10 @@
'V8 Linux - verify csa': 'release_x86_verify_csa',
# Linux64.
'V8 Linux64 - builder': 'release_x64',
- 'V8 Linux64 - concurrent marking - builder': 'release_x64_concurrent_marking',
'V8 Linux64 - debug builder': 'debug_x64',
'V8 Linux64 - custom snapshot - debug builder': 'debug_x64_custom',
'V8 Linux64 - internal snapshot': 'release_x64_internal',
+ 'V8 Linux64 - debug - header includes': 'debug_x64_header_includes',
'V8 Linux64 - verify csa': 'release_x64_verify_csa',
# Jumbo.
'V8 Linux64 Jumbo': 'release_x64_jumbo',
@@ -96,9 +96,7 @@
'V8 Mac64 ASAN': 'release_x64_asan_no_lsan',
# Sanitizers.
'V8 Linux64 ASAN': 'release_x64_asan',
- 'V8 Linux64 TSAN': 'release_x64_tsan',
- 'V8 Linux64 TSAN - concurrent marking':
- 'release_x64_tsan_concurrent_marking',
+ 'V8 Linux64 TSAN - builder': 'release_x64_tsan',
'V8 Linux - arm64 - sim - MSAN': 'release_simulate_arm64_msan',
# Misc.
'V8 Linux gcc 4.8': 'release_x86_gcc',
@@ -161,6 +159,7 @@
'V8 Linux - arm - sim - debug': 'debug_simulate_arm',
# Arm64.
'V8 Android Arm64 - builder': 'release_android_arm64',
+ 'V8 Android Arm64 - debug builder': 'debug_android_arm64',
'V8 Linux - arm64 - sim': 'release_simulate_arm64',
'V8 Linux - arm64 - sim - debug': 'debug_simulate_arm64',
'V8 Linux - arm64 - sim - nosnap - debug':
@@ -204,6 +203,9 @@
'V8 s390x - sim - stable branch': 'release_simulate_s390x',
},
'tryserver.v8': {
+ 'v8_android_arm_compile_rel': 'release_android_arm',
+ 'v8_android_arm64_compile_dbg': 'debug_android_arm64',
+ 'v8_android_arm64_n5x_rel_ng': 'release_android_arm64',
'v8_fuchsia_rel_ng': 'release_x64_fuchsia_trybot',
'v8_linux_rel_ng': 'release_x86_gcmole_trybot',
'v8_linux_verify_csa_rel_ng': 'release_x86_verify_csa',
@@ -218,6 +220,7 @@
'v8_linux_shared_compile_rel': 'release_x86_shared_verify_heap',
'v8_linux64_dbg_ng': 'debug_x64_trybot',
'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc',
+ 'v8_linux64_header_includes_dbg': 'debug_x64_header_includes',
'v8_linux64_fyi_rel_ng': 'release_x64_test_features_trybot',
'v8_linux64_rel_ng': 'release_x64_test_features_trybot',
'v8_linux64_verify_csa_rel_ng': 'release_x64_verify_csa',
@@ -229,8 +232,8 @@
'v8_linux64_jumbo_compile_rel': 'release_x64_jumbo_trybot',
'v8_linux64_jumbo_limited_compile_rel': 'release_x64_jumbo_limited_trybot',
'v8_linux64_tsan_rel': 'release_x64_tsan_minimal_symbols',
- 'v8_linux64_tsan_concurrent_marking_rel_ng':
- 'release_x64_tsan_concurrent_marking_minimal_symbols',
+ 'v8_linux64_tsan_isolates_rel_ng':
+ 'release_x64_tsan_minimal_symbols',
'v8_linux64_ubsan_rel_ng': 'release_x64_ubsan_vptr_minimal_symbols',
# TODO(machenbach): Remove after switching to x64 on infra side.
'v8_win_dbg': 'debug_x86_trybot',
@@ -258,7 +261,6 @@
'v8_linux_arm64_gc_stress_dbg': 'debug_simulate_arm64',
'v8_linux_mipsel_compile_rel': 'release_simulate_mipsel',
'v8_linux_mips64el_compile_rel': 'release_simulate_mips64el',
- 'v8_android_arm_compile_rel': 'release_android_arm',
},
},
@@ -405,6 +407,8 @@
'release_bot', 'simulate_s390x'],
# Debug configs for arm.
+ 'debug_android_arm64': [
+ 'debug_bot', 'arm64', 'android', 'minimal_symbols'],
'debug_arm': [
'debug_bot', 'arm', 'hard_float'],
@@ -443,8 +447,6 @@
'release_bot', 'x64', 'cfi_clusterfuzz'],
'release_x64_msvc': [
'release_bot', 'x64', 'msvc'],
- 'release_x64_concurrent_marking': [
- 'release_bot', 'x64', 'v8_enable_concurrent_marking'],
'release_x64_correctness_fuzzer' : [
'release_bot', 'x64', 'v8_correctness_fuzzer'],
'release_x64_fuchsia': [
@@ -473,11 +475,6 @@
'release_trybot', 'x64', 'v8_enable_test_features'],
'release_x64_tsan': [
'release_bot', 'x64', 'tsan'],
- 'release_x64_tsan_concurrent_marking': [
- 'release_bot', 'x64', 'v8_enable_concurrent_marking', 'tsan'],
- 'release_x64_tsan_concurrent_marking_minimal_symbols': [
- 'release_bot', 'x64', 'v8_enable_concurrent_marking', 'tsan',
- 'minimal_symbols'],
'release_x64_tsan_minimal_symbols': [
'release_bot', 'x64', 'tsan', 'minimal_symbols'],
'release_x64_ubsan': [
@@ -506,6 +503,8 @@
'debug_bot', 'x64', 'fuchsia'],
'debug_x64_gcc': [
'debug_bot', 'x64', 'gcc'],
+ 'debug_x64_header_includes': [
+ 'debug_bot', 'x64', 'v8_check_header_includes'],
'debug_x64_jumbo': [
'debug_bot', 'x64', 'jumbo'],
'debug_x64_jumbo_limited': [
@@ -716,7 +715,7 @@
},
'release': {
- 'gn_args': 'is_debug=false',
+ 'gn_args': 'is_debug=false android_unstripped_runtime_outputs=false',
},
'release_bot': {
@@ -800,12 +799,8 @@
'gn_args': 'is_ubsan_vptr=true is_ubsan_no_recover=false',
},
- 'v8_no_i18n': {
- 'gn_args': 'v8_enable_i18n_support=false icu_use_data_file=false',
- },
-
- 'v8_enable_concurrent_marking': {
- 'gn_args': 'v8_enable_concurrent_marking=true',
+ 'v8_check_header_includes': {
+ 'gn_args': 'v8_check_header_includes=true',
},
'v8_correctness_fuzzer': {
@@ -841,6 +836,10 @@
# This is the default in gn for debug.
},
+ 'v8_no_i18n': {
+ 'gn_args': 'v8_enable_i18n_support=false icu_use_data_file=false',
+ },
+
'v8_snapshot_custom': {
# GN path is relative to project root.
'gn_args': 'v8_embed_script="test/mjsunit/mjsunit.js"',
diff --git a/deps/v8/infra/testing/PRESUBMIT.py b/deps/v8/infra/testing/PRESUBMIT.py
index 9f242a9299..e145534e6e 100644
--- a/deps/v8/infra/testing/PRESUBMIT.py
+++ b/deps/v8/infra/testing/PRESUBMIT.py
@@ -23,6 +23,8 @@ SUPPORTED_BUILDER_SPEC_KEYS = [
SUPPORTED_SWARMING_DIMENSIONS = [
'cores',
'cpu',
+ 'device_os',
+ 'device_type',
'os',
]
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index f8fad69cb1..b2cd761817 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -31,6 +31,18 @@
##############################################################################
### luci.v8.try
##############################################################################
+ # Android
+ 'v8_android_arm64_n5x_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'device_os': 'MMB29Q',
+ 'device_type': 'bullhead',
+ 'os': 'Android',
+ },
+ 'tests': [
+ {'name': 'mjsunit', 'variant': 'default', 'shards': 2},
+ ],
+ },
+ ##############################################################################
# Linux32
'v8_linux_dbg_ng_triggered': {
'swarming_dimensions' : {
@@ -109,6 +121,7 @@
{'name': 'test262_variants', 'shards': 4},
{'name': 'test262_variants', 'variant': 'extra', 'shards': 2},
{'name': 'v8testing'},
+ {'name': 'v8testing', 'suffix': 'isolates', 'test_args': ['--isolates'], 'shards': 2},
{'name': 'v8testing', 'variant': 'extra'},
],
},
@@ -217,6 +230,11 @@
{'name': 'v8testing', 'variant': 'slow_path'},
],
},
+ 'v8_linux64_tsan_isolates_rel_ng_triggered': {
+ 'tests': [
+ {'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 7},
+ ],
+ },
'v8_linux64_ubsan_rel_ng_triggered': {
'tests': [
{'name': 'v8testing', 'shards': 2},
@@ -249,7 +267,7 @@
{'name': 'mozilla'},
{'name': 'test262'},
{'name': 'v8testing', 'shards': 9},
- {'name': 'v8testing', 'variant': 'extra', 'shards': 3},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 4},
],
},
##############################################################################
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index 275595d0d8..90cfd737f2 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -12,6 +12,7 @@ include_rules = [
"+src/heap/factory-inl.h",
"+src/heap/heap.h",
"+src/heap/heap-inl.h",
+ "+src/heap/heap-write-barrier-inl.h",
"-src/inspector",
"-src/interpreter",
"+src/interpreter/bytecode-array-accessor.h",
@@ -22,6 +23,7 @@ include_rules = [
"+src/interpreter/bytecode-register.h",
"+src/interpreter/bytecodes.h",
"+src/interpreter/interpreter.h",
+ "+src/interpreter/interpreter-generator.h",
"+src/interpreter/setup-interpreter.h",
"-src/trap-handler",
"+src/trap-handler/trap-handler.h",
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index ae59ec3356..da935f3652 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -4,7 +4,7 @@
#include "src/accessors.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/contexts.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
@@ -13,6 +13,7 @@
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/module-inl.h"
#include "src/property-details.h"
#include "src/prototype.h"
@@ -77,8 +78,7 @@ bool Accessors::IsJSObjectFieldAccessor(Isolate* isolate, Handle<Map> map,
}
V8_WARN_UNUSED_RESULT MaybeHandle<Object>
-Accessors::ReplaceAccessorWithDataProperty(Isolate* isolate,
- Handle<Object> receiver,
+Accessors::ReplaceAccessorWithDataProperty(Handle<Object> receiver,
Handle<JSObject> holder,
Handle<Name> name,
Handle<Object> value) {
@@ -112,8 +112,8 @@ void Accessors::ReconfigureToDataProperty(
Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
Handle<Name> name = Utils::OpenHandle(*key);
Handle<Object> value = Utils::OpenHandle(*val);
- MaybeHandle<Object> result = Accessors::ReplaceAccessorWithDataProperty(
- isolate, receiver, holder, name, value);
+ MaybeHandle<Object> result =
+ Accessors::ReplaceAccessorWithDataProperty(receiver, holder, name, value);
if (result.is_null()) {
isolate->OptionalRescheduleException(false);
} else {
@@ -327,6 +327,7 @@ void Accessors::FunctionPrototypeGetter(
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
+ DCHECK(function->has_prototype_property());
Handle<Object> result = GetFunctionPrototype(isolate, function);
info.GetReturnValue().Set(Utils::ToLocal(result));
}
@@ -341,6 +342,7 @@ void Accessors::FunctionPrototypeSetter(
Handle<Object> value = Utils::OpenHandle(*val);
Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
+ DCHECK(object->has_prototype_property());
JSFunction::SetPrototype(object, value);
info.GetReturnValue().Set(true);
}
@@ -857,8 +859,8 @@ void Accessors::ErrorStackGetter(
Utils::OpenHandle(*v8::Local<v8::Value>(info.This()));
Handle<Name> name = Utils::OpenHandle(*key);
if (IsAccessor(receiver, name, holder)) {
- result = Accessors::ReplaceAccessorWithDataProperty(
- isolate, receiver, holder, name, formatted_stack_trace);
+ result = Accessors::ReplaceAccessorWithDataProperty(receiver, holder, name,
+ formatted_stack_trace);
if (result.is_null()) {
isolate->OptionalRescheduleException(false);
return;
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 301f830a9a..69fdbbb74e 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -98,8 +98,8 @@ class Accessors : public AllStatic {
FieldIndex* field_index);
static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
- Isolate* isolate, Handle<Object> receiver, Handle<JSObject> holder,
- Handle<Name> name, Handle<Object> value);
+ Handle<Object> receiver, Handle<JSObject> holder, Handle<Name> name,
+ Handle<Object> value);
// Create an AccessorInfo. The setter is optional (can be nullptr).
//
diff --git a/deps/v8/src/api-arguments-inl.h b/deps/v8/src/api-arguments-inl.h
index 4d91b68521..89f606ed41 100644
--- a/deps/v8/src/api-arguments-inl.h
+++ b/deps/v8/src/api-arguments-inl.h
@@ -7,6 +7,7 @@
#include "src/api-arguments.h"
+#include "src/api-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/tracing/trace-event.h"
#include "src/vm-state-inl.h"
@@ -14,6 +15,29 @@
namespace v8 {
namespace internal {
+CustomArgumentsBase::CustomArgumentsBase(Isolate* isolate)
+ : Relocatable(isolate) {}
+
+template <typename T>
+template <typename V>
+Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
+ // Check the ReturnValue.
+ Object** handle = &this->begin()[kReturnValueOffset];
+ // Nothing was set, return empty handle as per previous behaviour.
+ if ((*handle)->IsTheHole(isolate)) return Handle<V>();
+ Handle<V> result = Handle<V>::cast(Handle<Object>(handle));
+ result->VerifyApiCallResultType();
+ return result;
+}
+
+inline JSObject* PropertyCallbackArguments::holder() {
+ return JSObject::cast(this->begin()[T::kHolderIndex]);
+}
+
+inline JSObject* FunctionCallbackArguments::holder() {
+ return JSObject::cast(this->begin()[T::kHolderIndex]);
+}
+
#define FOR_EACH_CALLBACK(F) \
F(Query, query, Object, v8::Integer, interceptor) \
F(Deleter, deleter, Object, v8::Boolean, Handle<Object>())
diff --git a/deps/v8/src/api-arguments.cc b/deps/v8/src/api-arguments.cc
new file mode 100644
index 0000000000..4b290d9dab
--- /dev/null
+++ b/deps/v8/src/api-arguments.cc
@@ -0,0 +1,54 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api-arguments.h"
+
+#include "src/api-arguments-inl.h"
+
+namespace v8 {
+namespace internal {
+
+PropertyCallbackArguments::PropertyCallbackArguments(Isolate* isolate,
+ Object* data, Object* self,
+ JSObject* holder,
+ ShouldThrow should_throw)
+ : Super(isolate) {
+ Object** values = this->begin();
+ values[T::kThisIndex] = self;
+ values[T::kHolderIndex] = holder;
+ values[T::kDataIndex] = data;
+ values[T::kIsolateIndex] = reinterpret_cast<Object*>(isolate);
+ values[T::kShouldThrowOnErrorIndex] =
+ Smi::FromInt(should_throw == kThrowOnError ? 1 : 0);
+
+ // Here the hole is set as default value.
+ // It cannot escape into js as it's removed in Call below.
+ HeapObject* the_hole = ReadOnlyRoots(isolate).the_hole_value();
+ values[T::kReturnValueDefaultValueIndex] = the_hole;
+ values[T::kReturnValueIndex] = the_hole;
+ DCHECK(values[T::kHolderIndex]->IsHeapObject());
+ DCHECK(values[T::kIsolateIndex]->IsSmi());
+}
+
+FunctionCallbackArguments::FunctionCallbackArguments(
+ internal::Isolate* isolate, internal::Object* data,
+ internal::HeapObject* callee, internal::Object* holder,
+ internal::HeapObject* new_target, internal::Object** argv, int argc)
+ : Super(isolate), argv_(argv), argc_(argc) {
+ Object** values = begin();
+ values[T::kDataIndex] = data;
+ values[T::kHolderIndex] = holder;
+ values[T::kNewTargetIndex] = new_target;
+ values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
+ // Here the hole is set as default value.
+ // It cannot escape into js as it's remove in Call below.
+ HeapObject* the_hole = ReadOnlyRoots(isolate).the_hole_value();
+ values[T::kReturnValueDefaultValueIndex] = the_hole;
+ values[T::kReturnValueIndex] = the_hole;
+ DCHECK(values[T::kHolderIndex]->IsHeapObject());
+ DCHECK(values[T::kIsolateIndex]->IsSmi());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
index bed1c123e0..0a0a7362c7 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api-arguments.h
@@ -18,8 +18,7 @@ namespace internal {
// can.
class CustomArgumentsBase : public Relocatable {
protected:
- explicit inline CustomArgumentsBase(Isolate* isolate)
- : Relocatable(isolate) {}
+ explicit inline CustomArgumentsBase(Isolate* isolate);
};
template <typename T>
@@ -52,18 +51,6 @@ class CustomArguments : public CustomArgumentsBase {
Object* values_[T::kArgsLength];
};
-template <typename T>
-template <typename V>
-Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
- // Check the ReturnValue.
- Object** handle = &this->begin()[kReturnValueOffset];
- // Nothing was set, return empty handle as per previous behaviour.
- if ((*handle)->IsTheHole(isolate)) return Handle<V>();
- Handle<V> result = Handle<V>::cast(Handle<Object>(handle));
- result->VerifyApiCallResultType();
- return result;
-}
-
// Note: Calling args.Call() sets the return value on args. For multiple
// Call()'s, a new args should be used every time.
class PropertyCallbackArguments
@@ -81,24 +68,7 @@ class PropertyCallbackArguments
static const int kShouldThrowOnErrorIndex = T::kShouldThrowOnErrorIndex;
PropertyCallbackArguments(Isolate* isolate, Object* data, Object* self,
- JSObject* holder, ShouldThrow should_throw)
- : Super(isolate) {
- Object** values = this->begin();
- values[T::kThisIndex] = self;
- values[T::kHolderIndex] = holder;
- values[T::kDataIndex] = data;
- values[T::kIsolateIndex] = reinterpret_cast<Object*>(isolate);
- values[T::kShouldThrowOnErrorIndex] =
- Smi::FromInt(should_throw == kThrowOnError ? 1 : 0);
-
- // Here the hole is set as default value.
- // It cannot escape into js as it's removed in Call below.
- HeapObject* the_hole = ReadOnlyRoots(isolate).the_hole_value();
- values[T::kReturnValueDefaultValueIndex] = the_hole;
- values[T::kReturnValueIndex] = the_hole;
- DCHECK(values[T::kHolderIndex]->IsHeapObject());
- DCHECK(values[T::kIsolateIndex]->IsSmi());
- }
+ JSObject* holder, ShouldThrow should_throw);
// -------------------------------------------------------------------------
// Accessor Callbacks
@@ -165,9 +135,7 @@ class PropertyCallbackArguments
GenericNamedPropertyGetterCallback f, Handle<Name> name,
Handle<Object> info);
- inline JSObject* holder() {
- return JSObject::cast(this->begin()[T::kHolderIndex]);
- }
+ inline JSObject* holder();
// Don't copy PropertyCallbackArguments, because they would both have the
// same prev_ pointer.
@@ -191,21 +159,7 @@ class FunctionCallbackArguments
internal::HeapObject* callee,
internal::Object* holder,
internal::HeapObject* new_target,
- internal::Object** argv, int argc)
- : Super(isolate), argv_(argv), argc_(argc) {
- Object** values = begin();
- values[T::kDataIndex] = data;
- values[T::kHolderIndex] = holder;
- values[T::kNewTargetIndex] = new_target;
- values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
- // Here the hole is set as default value.
- // It cannot escape into js as it's remove in Call below.
- HeapObject* the_hole = ReadOnlyRoots(isolate).the_hole_value();
- values[T::kReturnValueDefaultValueIndex] = the_hole;
- values[T::kReturnValueIndex] = the_hole;
- DCHECK(values[T::kHolderIndex]->IsHeapObject());
- DCHECK(values[T::kIsolateIndex]->IsSmi());
- }
+ internal::Object** argv, int argc);
/*
* The following Call function wraps the calling of all callbacks to handle
@@ -218,9 +172,7 @@ class FunctionCallbackArguments
inline Handle<Object> Call(CallHandlerInfo* handler);
private:
- inline JSObject* holder() {
- return JSObject::cast(this->begin()[T::kHolderIndex]);
- }
+ inline JSObject* holder();
internal::Object** argv_;
int argc_;
diff --git a/deps/v8/src/api-inl.h b/deps/v8/src/api-inl.h
new file mode 100644
index 0000000000..50586814d8
--- /dev/null
+++ b/deps/v8/src/api-inl.h
@@ -0,0 +1,140 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_API_INL_H_
+#define V8_API_INL_H_
+
+#include "src/api.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+
+template <typename T>
+inline T ToCData(v8::internal::Object* obj) {
+ STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
+ if (obj == v8::internal::Smi::kZero) return nullptr;
+ return reinterpret_cast<T>(
+ v8::internal::Foreign::cast(obj)->foreign_address());
+}
+
+template <>
+inline v8::internal::Address ToCData(v8::internal::Object* obj) {
+ if (obj == v8::internal::Smi::kZero) return v8::internal::kNullAddress;
+ return v8::internal::Foreign::cast(obj)->foreign_address();
+}
+
+template <typename T>
+inline v8::internal::Handle<v8::internal::Object> FromCData(
+ v8::internal::Isolate* isolate, T obj) {
+ STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
+ if (obj == nullptr) return handle(v8::internal::Smi::kZero, isolate);
+ return isolate->factory()->NewForeign(
+ reinterpret_cast<v8::internal::Address>(obj));
+}
+
+template <>
+inline v8::internal::Handle<v8::internal::Object> FromCData(
+ v8::internal::Isolate* isolate, v8::internal::Address obj) {
+ if (obj == v8::internal::kNullAddress) {
+ return handle(v8::internal::Smi::kZero, isolate);
+ }
+ return isolate->factory()->NewForeign(obj);
+}
+
+template <class From, class To>
+inline Local<To> Utils::Convert(v8::internal::Handle<From> obj) {
+ DCHECK(obj.is_null() || (obj->IsSmi() || !obj->IsTheHole()));
+ return Local<To>(reinterpret_cast<To*>(obj.location()));
+}
+
+// Implementations of ToLocal
+
+#define MAKE_TO_LOCAL(Name, From, To) \
+ Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \
+ return Convert<v8::internal::From, v8::To>(obj); \
+ }
+
+#define MAKE_TO_LOCAL_TYPED_ARRAY(Type, typeName, TYPE, ctype) \
+ Local<v8::Type##Array> Utils::ToLocal##Type##Array( \
+ v8::internal::Handle<v8::internal::JSTypedArray> obj) { \
+ DCHECK(obj->type() == v8::internal::kExternal##Type##Array); \
+ return Convert<v8::internal::JSTypedArray, v8::Type##Array>(obj); \
+ }
+
+MAKE_TO_LOCAL(ToLocal, Context, Context)
+MAKE_TO_LOCAL(ToLocal, Object, Value)
+MAKE_TO_LOCAL(ToLocal, Module, Module)
+MAKE_TO_LOCAL(ToLocal, Name, Name)
+MAKE_TO_LOCAL(ToLocal, String, String)
+MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
+MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
+MAKE_TO_LOCAL(ToLocal, JSReceiver, Object)
+MAKE_TO_LOCAL(ToLocal, JSObject, Object)
+MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
+MAKE_TO_LOCAL(ToLocal, JSArray, Array)
+MAKE_TO_LOCAL(ToLocal, JSMap, Map)
+MAKE_TO_LOCAL(ToLocal, JSSet, Set)
+MAKE_TO_LOCAL(ToLocal, JSProxy, Proxy)
+MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
+MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
+MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
+MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
+MAKE_TO_LOCAL(ToLocalShared, JSArrayBuffer, SharedArrayBuffer)
+
+TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)
+
+MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
+MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
+MAKE_TO_LOCAL(SignatureToLocal, FunctionTemplateInfo, Signature)
+MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
+MAKE_TO_LOCAL(MessageToLocal, Object, Message)
+MAKE_TO_LOCAL(PromiseToLocal, JSObject, Promise)
+MAKE_TO_LOCAL(StackTraceToLocal, FixedArray, StackTrace)
+MAKE_TO_LOCAL(StackFrameToLocal, StackFrameInfo, StackFrame)
+MAKE_TO_LOCAL(NumberToLocal, Object, Number)
+MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
+MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
+MAKE_TO_LOCAL(ToLocal, BigInt, BigInt);
+MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
+MAKE_TO_LOCAL(CallableToLocal, JSReceiver, Function)
+MAKE_TO_LOCAL(ToLocalPrimitive, Object, Primitive)
+MAKE_TO_LOCAL(ToLocal, FixedArray, PrimitiveArray)
+MAKE_TO_LOCAL(ScriptOrModuleToLocal, Script, ScriptOrModule)
+
+#undef MAKE_TO_LOCAL_TYPED_ARRAY
+#undef MAKE_TO_LOCAL
+
+// Implementations of OpenHandle
+
+#define MAKE_OPEN_HANDLE(From, To) \
+ v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
+ const v8::From* that, bool allow_empty_handle) { \
+ DCHECK(allow_empty_handle || that != nullptr); \
+ DCHECK(that == nullptr || \
+ (*reinterpret_cast<v8::internal::Object* const*>(that))->Is##To()); \
+ return v8::internal::Handle<v8::internal::To>( \
+ reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
+ }
+
+OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
+
+#undef MAKE_OPEN_HANDLE
+#undef OPEN_HANDLE_LIST
+
+namespace internal {
+
+Handle<Context> HandleScopeImplementer::MicrotaskContext() {
+ if (microtask_context_) return Handle<Context>(microtask_context_, isolate_);
+ return Handle<Context>::null();
+}
+
+Handle<Context> HandleScopeImplementer::LastEnteredContext() {
+ if (entered_contexts_.empty()) return Handle<Context>::null();
+ return Handle<Context>(entered_contexts_.back(), isolate_);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_API_INL_H_
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index 11dd4d67d5..977d6cdafc 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -4,7 +4,7 @@
#include "src/api-natives.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/isolate-inl.h"
#include "src/lookup.h"
#include "src/messages.h"
diff --git a/deps/v8/src/api-natives.h b/deps/v8/src/api-natives.h
index 67a4b80060..e8bb32d40a 100644
--- a/deps/v8/src/api-natives.h
+++ b/deps/v8/src/api-natives.h
@@ -8,6 +8,7 @@
#include "include/v8.h"
#include "src/base/macros.h"
#include "src/handles.h"
+#include "src/maybe-handles.h"
#include "src/property-details.h"
namespace v8 {
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 6155cbb325..d141496c57 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -8,6 +8,9 @@
#include <cmath> // For isnan.
#include <limits>
#include <vector>
+
+#include "src/api-inl.h"
+
#include "include/v8-profiler.h"
#include "include/v8-testing.h"
#include "include/v8-util.h"
@@ -48,7 +51,9 @@
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-generator-inl.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/module-inl.h"
@@ -207,17 +212,6 @@ namespace v8 {
#define RETURN_ESCAPED(value) return handle_scope.Escape(value);
-// TODO(v8:7786): Remove this when HeapObject::GetIsolate is removed.
-#ifdef DEPRECATE_GET_ISOLATE
-#define DISABLE_DEPRECATED_WARNINGS \
- _Pragma("clang diagnostic push") \
- _Pragma("clang diagnostic ignored \"-Wdeprecated\"")
-#define RESET_DEPRECATED_WARNINGS _Pragma("clang diagnostic pop")
-#else
-#define DISABLE_DEPRECATED_WARNINGS
-#define RESET_DEPRECATED_WARNINGS
-#endif
-
namespace {
Local<Context> ContextFromNeverReadOnlySpaceObject(
@@ -225,19 +219,6 @@ Local<Context> ContextFromNeverReadOnlySpaceObject(
return reinterpret_cast<v8::Isolate*>(obj->GetIsolate())->GetCurrentContext();
}
-// This is unsafe because obj could be in RO_SPACE which would not be tied to a
-// particular isolate.
-#ifdef DEPRECATE_GET_ISOLATE
-[[deprecated("Pass Context explicitly or use a NeverReadOnlySpaceObject")]]
-#endif
- Local<Context>
- UnsafeContextFromHeapObject(i::Handle<i::Object> obj) {
- DISABLE_DEPRECATED_WARNINGS
- return reinterpret_cast<v8::Isolate*>(i::HeapObject::cast(*obj)->GetIsolate())
- ->GetCurrentContext();
- RESET_DEPRECATED_WARNINGS
-}
-
class InternalEscapableScope : public v8::EscapableHandleScope {
public:
explicit inline InternalEscapableScope(i::Isolate* isolate)
@@ -534,34 +515,6 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
void Free(void* data, size_t) override { free(data); }
};
-bool RunExtraCode(Isolate* isolate, Local<Context> context,
- const char* utf8_source, const char* name) {
- base::ElapsedTimer timer;
- timer.Start();
- Context::Scope context_scope(context);
- TryCatch try_catch(isolate);
- Local<String> source_string;
- if (!String::NewFromUtf8(isolate, utf8_source, NewStringType::kNormal)
- .ToLocal(&source_string)) {
- return false;
- }
- Local<String> resource_name =
- String::NewFromUtf8(isolate, name, NewStringType::kNormal)
- .ToLocalChecked();
- ScriptOrigin origin(resource_name);
- ScriptCompiler::Source source(source_string, origin);
- Local<Script> script;
- if (!ScriptCompiler::Compile(context, &source).ToLocal(&script)) return false;
- if (script->Run(context).IsEmpty()) return false;
- if (i::FLAG_profile_deserialization) {
- i::PrintF("Executing custom snapshot script %s took %0.3f ms\n", name,
- timer.Elapsed().InMillisecondsF());
- }
- timer.Stop();
- CHECK(!try_catch.HasCaught());
- return true;
-}
-
struct SnapshotCreatorData {
explicit SnapshotCreatorData(Isolate* isolate)
: isolate_(isolate),
@@ -763,7 +716,7 @@ StartupData SnapshotCreator::CreateBlob(
i::GarbageCollectionReason::kSnapshotCreator);
{
i::HandleScope scope(isolate);
- isolate->heap()->CompactFixedArraysOfWeakCells();
+ isolate->heap()->CompactWeakArrayLists(internal::TENURED);
}
isolate->heap()->read_only_space()->ClearStringPaddingIfNeeded();
@@ -884,77 +837,6 @@ StartupData SnapshotCreator::CreateBlob(
return result;
}
-StartupData V8::CreateSnapshotDataBlob(const char* embedded_source) {
- // Create a new isolate and a new context from scratch, optionally run
- // a script to embed, and serialize to create a snapshot blob.
- StartupData result = {nullptr, 0};
- base::ElapsedTimer timer;
- timer.Start();
- {
- SnapshotCreator snapshot_creator;
- Isolate* isolate = snapshot_creator.GetIsolate();
- {
- HandleScope scope(isolate);
- Local<Context> context = Context::New(isolate);
- if (embedded_source != nullptr &&
- !RunExtraCode(isolate, context, embedded_source, "<embedded>")) {
- return result;
- }
- snapshot_creator.SetDefaultContext(context);
- }
- result = snapshot_creator.CreateBlob(
- SnapshotCreator::FunctionCodeHandling::kClear);
- }
-
- if (i::FLAG_profile_deserialization) {
- i::PrintF("Creating snapshot took %0.3f ms\n",
- timer.Elapsed().InMillisecondsF());
- }
- timer.Stop();
- return result;
-}
-
-StartupData V8::WarmUpSnapshotDataBlob(StartupData cold_snapshot_blob,
- const char* warmup_source) {
- CHECK(cold_snapshot_blob.raw_size > 0 && cold_snapshot_blob.data != nullptr);
- CHECK_NOT_NULL(warmup_source);
- // Use following steps to create a warmed up snapshot blob from a cold one:
- // - Create a new isolate from the cold snapshot.
- // - Create a new context to run the warmup script. This will trigger
- // compilation of executed functions.
- // - Create a new context. This context will be unpolluted.
- // - Serialize the isolate and the second context into a new snapshot blob.
- StartupData result = {nullptr, 0};
- base::ElapsedTimer timer;
- timer.Start();
- {
- SnapshotCreator snapshot_creator(nullptr, &cold_snapshot_blob);
- Isolate* isolate = snapshot_creator.GetIsolate();
- {
- HandleScope scope(isolate);
- Local<Context> context = Context::New(isolate);
- if (!RunExtraCode(isolate, context, warmup_source, "<warm-up>")) {
- return result;
- }
- }
- {
- HandleScope handle_scope(isolate);
- isolate->ContextDisposedNotification(false);
- Local<Context> context = Context::New(isolate);
- snapshot_creator.SetDefaultContext(context);
- }
- result = snapshot_creator.CreateBlob(
- SnapshotCreator::FunctionCodeHandling::kKeep);
- }
-
- if (i::FLAG_profile_deserialization) {
- i::PrintF("Warming up snapshot took %0.3f ms\n",
- timer.Elapsed().InMillisecondsF());
- }
- timer.Stop();
- return result;
-}
-
void V8::SetDcheckErrorHandler(DcheckErrorCallback that) {
v8::base::SetDcheckFunction(that);
}
@@ -1953,16 +1835,6 @@ static void ObjectTemplateSetNamedPropertyHandler(
cons->set_named_property_handler(*obj);
}
-// TODO(cbruni) deprecate.
-void ObjectTemplate::SetNamedPropertyHandler(
- NamedPropertyGetterCallback getter, NamedPropertySetterCallback setter,
- NamedPropertyQueryCallback query, NamedPropertyDeleterCallback remover,
- NamedPropertyEnumeratorCallback enumerator, Local<Value> data) {
- ObjectTemplateSetNamedPropertyHandler(
- this, getter, setter, query, nullptr, remover, enumerator, nullptr, data,
- PropertyHandlerFlags::kOnlyInterceptStrings);
-}
-
void ObjectTemplate::SetHandler(
const NamedPropertyHandlerConfiguration& config) {
ObjectTemplateSetNamedPropertyHandler(
@@ -2247,15 +2119,6 @@ MaybeLocal<Value> Script::Run(Local<Context> context) {
}
-Local<Value> Script::Run() {
- auto self = Utils::OpenHandle(this, true);
- // If execution is terminating, Compile(..)->Run() requires this
- // check.
- if (self.is_null()) return Local<Value>();
- auto context = ContextFromNeverReadOnlySpaceObject(self);
- RETURN_TO_LOCAL_UNCHECKED(Run(context), Value);
-}
-
Local<Value> ScriptOrModule::GetResourceName() {
i::Handle<i::Script> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
@@ -2307,14 +2170,6 @@ void PrimitiveArray::Set(Isolate* v8_isolate, int index,
array->set(index, *i_item);
}
-void PrimitiveArray::Set(int index, Local<Primitive> item) {
- i::Handle<i::FixedArray> array = Utils::OpenHandle(this);
- DISABLE_DEPRECATED_WARNINGS
- i::Isolate* isolate = array->GetIsolate();
- RESET_DEPRECATED_WARNINGS
- Set(reinterpret_cast<Isolate*>(isolate), index, item);
-}
-
Local<Primitive> PrimitiveArray::Get(Isolate* v8_isolate, int index) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
i::Handle<i::FixedArray> array = Utils::OpenHandle(this);
@@ -2327,14 +2182,6 @@ Local<Primitive> PrimitiveArray::Get(Isolate* v8_isolate, int index) {
return ToApiHandle<Primitive>(i_item);
}
-Local<Primitive> PrimitiveArray::Get(int index) {
- i::Handle<i::FixedArray> array = Utils::OpenHandle(this);
- DISABLE_DEPRECATED_WARNINGS
- i::Isolate* isolate = array->GetIsolate();
- RESET_DEPRECATED_WARNINGS
- return Get(reinterpret_cast<Isolate*>(isolate), index);
-}
-
Module::Status Module::GetStatus() const {
i::Handle<i::Module> self = Utils::OpenHandle(this);
switch (self->status()) {
@@ -2513,8 +2360,8 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
source->host_defined_options);
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
i::Compiler::GetSharedFunctionInfoForScript(
- str, script_details, source->resource_options, nullptr, script_data,
- options, no_cache_reason, i::NOT_NATIVES_CODE);
+ isolate, str, script_details, source->resource_options, nullptr,
+ script_data, options, no_cache_reason, i::NOT_NATIVES_CODE);
if (options == kConsumeCodeCache) {
source->cached_data->rejected = script_data->rejected();
}
@@ -2680,17 +2527,6 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
}
-Local<Function> ScriptCompiler::CompileFunctionInContext(
- Isolate* v8_isolate, Source* source, Local<Context> v8_context,
- size_t arguments_count, Local<String> arguments[],
- size_t context_extension_count, Local<Object> context_extensions[]) {
- RETURN_TO_LOCAL_UNCHECKED(
- CompileFunctionInContext(v8_context, source, arguments_count, arguments,
- context_extension_count, context_extensions),
- Function);
-}
-
-
ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
Isolate* v8_isolate, StreamedSource* source, CompileOptions options) {
if (!i::FLAG_script_streaming) {
@@ -2722,7 +2558,7 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
i::Compiler::GetSharedFunctionInfoForStreamedScript(
- str, script_details, origin.Options(), streaming_data);
+ isolate, str, script_details, origin.Options(), streaming_data);
i::Handle<i::SharedFunctionInfo> result;
has_pending_exception = !maybe_function_info.ToHandle(&result);
@@ -2744,11 +2580,6 @@ uint32_t ScriptCompiler::CachedDataVersionTag() {
}
ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCache(
- Local<UnboundScript> unbound_script, Local<String> source) {
- return CreateCodeCache(unbound_script);
-}
-
-ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCache(
Local<UnboundScript> unbound_script) {
i::Handle<i::SharedFunctionInfo> shared =
i::Handle<i::SharedFunctionInfo>::cast(
@@ -2768,11 +2599,6 @@ ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCache(
}
ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCacheForFunction(
- Local<Function> function, Local<String> source) {
- return CreateCodeCacheForFunction(function);
-}
-
-ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCacheForFunction(
Local<Function> function) {
auto js_function =
i::Handle<i::JSFunction>::cast(Utils::OpenHandle(*function));
@@ -2793,27 +2619,6 @@ MaybeLocal<Script> Script::Compile(Local<Context> context, Local<String> source,
}
-Local<Script> Script::Compile(v8::Local<String> source,
- v8::ScriptOrigin* origin) {
- auto str = Utils::OpenHandle(*source);
- DISABLE_DEPRECATED_WARNINGS
- auto context = UnsafeContextFromHeapObject(str);
- RESET_DEPRECATED_WARNINGS
- RETURN_TO_LOCAL_UNCHECKED(Compile(context, source, origin), Script);
-}
-
-
-Local<Script> Script::Compile(v8::Local<String> source,
- v8::Local<String> file_name) {
- auto str = Utils::OpenHandle(*source);
- DISABLE_DEPRECATED_WARNINGS
- auto context = UnsafeContextFromHeapObject(str);
- RESET_DEPRECATED_WARNINGS
- ScriptOrigin origin(file_name);
- return Compile(context, source, &origin).FromMaybe(Local<Script>());
-}
-
-
// --- E x c e p t i o n s ---
v8::TryCatch::TryCatch(v8::Isolate* isolate)
@@ -2919,12 +2724,6 @@ MaybeLocal<Value> v8::TryCatch::StackTrace(Local<Context> context) const {
}
-v8::Local<Value> v8::TryCatch::StackTrace() const {
- auto context = reinterpret_cast<v8::Isolate*>(isolate_)->GetCurrentContext();
- RETURN_TO_LOCAL_UNCHECKED(StackTrace(context), Value);
-}
-
-
v8::Local<v8::Message> v8::TryCatch::Message() const {
i::Object* message = reinterpret_cast<i::Object*>(message_obj_);
DCHECK(message->IsJSMessageObject() || message->IsTheHole(isolate_));
@@ -2978,6 +2777,10 @@ Local<String> Message::Get() const {
return scope.Escape(result);
}
+v8::Isolate* Message::GetIsolate() const {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ return reinterpret_cast<Isolate*>(isolate);
+}
ScriptOrigin Message::GetScriptOrigin() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
@@ -3015,12 +2818,6 @@ Maybe<int> Message::GetLineNumber(Local<Context> context) const {
}
-int Message::GetLineNumber() const {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- return GetLineNumber(context).FromMaybe(0);
-}
-
-
int Message::GetStartPosition() const {
auto self = Utils::OpenHandle(this);
return self->start_position();
@@ -3094,12 +2891,6 @@ MaybeLocal<String> Message::GetSourceLine(Local<Context> context) const {
}
-Local<String> Message::GetSourceLine() const {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(GetSourceLine(context), String)
-}
-
-
void Message::PrintCurrentStackTrace(Isolate* isolate, FILE* out) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
@@ -3119,13 +2910,6 @@ Local<StackFrame> StackTrace::GetFrame(Isolate* v8_isolate,
return scope.Escape(Utils::StackFrameToLocal(info));
}
-Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
- DISABLE_DEPRECATED_WARNINGS
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- RESET_DEPRECATED_WARNINGS
- return GetFrame(reinterpret_cast<Isolate*>(isolate), index);
-}
-
int StackTrace::GetFrameCount() const {
return Utils::OpenHandle(this)->length();
}
@@ -3244,9 +3028,8 @@ MaybeLocal<String> JSON::Stringify(Local<Context> context,
? isolate->factory()->empty_string()
: Utils::OpenHandle(*gap);
i::Handle<i::Object> maybe;
- has_pending_exception = !i::JsonStringifier(isolate)
- .Stringify(object, replacer, gap_string)
- .ToHandle(&maybe);
+ has_pending_exception =
+ !i::JsonStringify(isolate, object, replacer, gap_string).ToHandle(&maybe);
RETURN_ON_FAILED_EXECUTION(String);
Local<String> result;
has_pending_exception =
@@ -3580,15 +3363,13 @@ bool Value::IsTypedArray() const {
return Utils::OpenHandle(this)->IsJSTypedArray();
}
-
-#define VALUE_IS_TYPED_ARRAY(Type, typeName, TYPE, ctype, size) \
+#define VALUE_IS_TYPED_ARRAY(Type, typeName, TYPE, ctype) \
bool Value::Is##Type##Array() const { \
i::Handle<i::Object> obj = Utils::OpenHandle(this); \
return obj->IsJSTypedArray() && \
i::JSTypedArray::cast(*obj)->type() == i::kExternal##Type##Array; \
}
-
TYPED_ARRAYS(VALUE_IS_TYPED_ARRAY)
#undef VALUE_IS_TYPED_ARRAY
@@ -3616,14 +3397,6 @@ bool Value::IsBigInt() const { return Utils::OpenHandle(this)->IsBigInt(); }
bool Value::IsProxy() const { return Utils::OpenHandle(this)->IsJSProxy(); }
-bool Value::IsWebAssemblyCompiledModule() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (!obj->IsJSObject()) return false;
- i::Handle<i::JSObject> js_obj = i::Handle<i::JSObject>::cast(obj);
- return js_obj->GetIsolate()->native_context()->wasm_module_constructor() ==
- js_obj->map()->GetConstructor();
-}
-
#define VALUE_IS_SPECIFIC_TYPE(Type, Check) \
bool Value::Is##Type() const { \
i::Handle<i::Object> obj = Utils::OpenHandle(this); \
@@ -3641,6 +3414,7 @@ VALUE_IS_SPECIFIC_TYPE(Map, JSMap)
VALUE_IS_SPECIFIC_TYPE(Set, JSSet)
VALUE_IS_SPECIFIC_TYPE(WeakMap, JSWeakMap)
VALUE_IS_SPECIFIC_TYPE(WeakSet, JSWeakSet)
+VALUE_IS_SPECIFIC_TYPE(WebAssemblyCompiledModule, WasmModuleObject)
#undef VALUE_IS_SPECIFIC_TYPE
@@ -4023,8 +3797,7 @@ void v8::TypedArray::CheckCast(Value* that) {
"Could not convert to TypedArray");
}
-
-#define CHECK_TYPED_ARRAY_CAST(Type, typeName, TYPE, ctype, size) \
+#define CHECK_TYPED_ARRAY_CAST(Type, typeName, TYPE, ctype) \
void v8::Type##Array::CheckCast(Value* that) { \
i::Handle<i::Object> obj = Utils::OpenHandle(that); \
Utils::ApiCheck( \
@@ -4033,7 +3806,6 @@ void v8::TypedArray::CheckCast(Value* that) {
"v8::" #Type "Array::Cast()", "Could not convert to " #Type "Array"); \
}
-
TYPED_ARRAYS(CHECK_TYPED_ARRAY_CAST)
#undef CHECK_TYPED_ARRAY_CAST
@@ -4110,17 +3882,6 @@ Maybe<bool> Value::BooleanValue(Local<Context> context) const {
}
-bool Value::BooleanValue() const {
- auto obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) return *obj != i::Smi::kZero;
- DCHECK(obj->IsHeapObject());
- DISABLE_DEPRECATED_WARNINGS
- i::Isolate* isolate = i::Handle<i::HeapObject>::cast(obj)->GetIsolate();
- RESET_DEPRECATED_WARNINGS
- return obj->BooleanValue(isolate);
-}
-
-
Maybe<double> Value::NumberValue(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) return Just(obj->Number());
@@ -4134,16 +3895,6 @@ Maybe<double> Value::NumberValue(Local<Context> context) const {
}
-double Value::NumberValue() const {
- auto obj = Utils::OpenHandle(this);
- if (obj->IsNumber()) return obj->Number();
- DISABLE_DEPRECATED_WARNINGS
- return NumberValue(UnsafeContextFromHeapObject(obj))
- .FromMaybe(std::numeric_limits<double>::quiet_NaN());
- RESET_DEPRECATED_WARNINGS
-}
-
-
Maybe<int64_t> Value::IntegerValue(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) {
@@ -4159,21 +3910,6 @@ Maybe<int64_t> Value::IntegerValue(Local<Context> context) const {
}
-int64_t Value::IntegerValue() const {
- auto obj = Utils::OpenHandle(this);
- if (obj->IsNumber()) {
- if (obj->IsSmi()) {
- return i::Smi::ToInt(*obj);
- } else {
- return static_cast<int64_t>(obj->Number());
- }
- }
- DISABLE_DEPRECATED_WARNINGS
- return IntegerValue(UnsafeContextFromHeapObject(obj)).FromMaybe(0);
- RESET_DEPRECATED_WARNINGS
-}
-
-
Maybe<int32_t> Value::Int32Value(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) return Just(NumberToInt32(*obj));
@@ -4188,15 +3924,6 @@ Maybe<int32_t> Value::Int32Value(Local<Context> context) const {
}
-int32_t Value::Int32Value() const {
- auto obj = Utils::OpenHandle(this);
- if (obj->IsNumber()) return NumberToInt32(*obj);
- DISABLE_DEPRECATED_WARNINGS
- return Int32Value(UnsafeContextFromHeapObject(obj)).FromMaybe(0);
- RESET_DEPRECATED_WARNINGS
-}
-
-
Maybe<uint32_t> Value::Uint32Value(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) return Just(NumberToUint32(*obj));
@@ -4211,15 +3938,6 @@ Maybe<uint32_t> Value::Uint32Value(Local<Context> context) const {
}
-uint32_t Value::Uint32Value() const {
- auto obj = Utils::OpenHandle(this);
- if (obj->IsNumber()) return NumberToUint32(*obj);
- DISABLE_DEPRECATED_WARNINGS
- return Uint32Value(UnsafeContextFromHeapObject(obj)).FromMaybe(0);
- RESET_DEPRECATED_WARNINGS
-}
-
-
MaybeLocal<Uint32> Value::ToArrayIndex(Local<Context> context) const {
auto self = Utils::OpenHandle(this);
if (self->IsSmi()) {
@@ -4254,23 +3972,6 @@ Maybe<bool> Value::Equals(Local<Context> context, Local<Value> that) const {
}
-bool Value::Equals(Local<Value> that) const {
- auto self = Utils::OpenHandle(this);
- auto other = Utils::OpenHandle(*that);
- if (self->IsSmi() && other->IsSmi()) {
- return self->Number() == other->Number();
- }
- if (self->IsJSObject() && other->IsJSObject()) {
- return *self == *other;
- }
- auto heap_object = self->IsSmi() ? other : self;
- DISABLE_DEPRECATED_WARNINGS
- auto context = UnsafeContextFromHeapObject(heap_object);
- RESET_DEPRECATED_WARNINGS
- return Equals(context, that).FromMaybe(false);
-}
-
-
bool Value::StrictEquals(Local<Value> that) const {
auto self = Utils::OpenHandle(this);
auto other = Utils::OpenHandle(*that);
@@ -4357,10 +4058,8 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- i::LookupIterator it = i::LookupIterator::PropertyOrElement(
- isolate, self, key_obj, self, i::LookupIterator::OWN);
- Maybe<bool> result =
- i::JSReceiver::CreateDataProperty(&it, value_obj, i::kDontThrow);
+ Maybe<bool> result = i::JSReceiver::CreateDataProperty(
+ isolate, self, key_obj, value_obj, i::kDontThrow);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -5596,11 +5295,6 @@ bool String::ContainsOnlyOneByte() const {
return helper.Check(*str);
}
-int String::Utf8Length() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- return Utf8Length(reinterpret_cast<Isolate*>(isolate));
-}
-
int String::Utf8Length(Isolate* isolate) const {
i::Handle<i::String> str = Utils::OpenHandle(this);
str = i::String::Flatten(reinterpret_cast<i::Isolate*>(isolate), str);
@@ -5843,11 +5537,12 @@ int String::WriteUtf8(Isolate* v8_isolate, char* buffer, int capacity,
if (success) return writer.CompleteWrite(write_null, nchars_ref);
} else if (capacity >= string_length) {
// First check that the buffer is large enough.
- int utf8_bytes = Utf8Length(reinterpret_cast<Isolate*>(isolate));
+ int utf8_bytes = Utf8Length(v8_isolate);
if (utf8_bytes <= capacity) {
// one-byte fast path.
if (utf8_bytes == string_length) {
- WriteOneByte(reinterpret_cast<uint8_t*>(buffer), 0, capacity, options);
+ WriteOneByte(v8_isolate, reinterpret_cast<uint8_t*>(buffer), 0,
+ capacity, options);
if (nchars_ref != nullptr) *nchars_ref = string_length;
if (write_null && (utf8_bytes+1 <= capacity)) {
return string_length + 1;
@@ -5860,7 +5555,7 @@ int String::WriteUtf8(Isolate* v8_isolate, char* buffer, int capacity,
// Recurse once without a capacity limit.
// This will get into the first branch above.
// TODO(dcarney) Check max left rec. in Utf8Length and fall through.
- return WriteUtf8(buffer, -1, nchars_ref, options);
+ return WriteUtf8(v8_isolate, buffer, -1, nchars_ref, options);
}
}
Utf8WriterVisitor writer(buffer, capacity, false, replace_invalid_utf8);
@@ -5868,16 +5563,6 @@ int String::WriteUtf8(Isolate* v8_isolate, char* buffer, int capacity,
return writer.CompleteWrite(write_null, nchars_ref);
}
-int String::WriteUtf8(char* buffer, int capacity, int* nchars_ref,
- int options) const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- DISABLE_DEPRECATED_WARNINGS
- i::Isolate* isolate = str->GetIsolate();
- RESET_DEPRECATED_WARNINGS
- return WriteUtf8(reinterpret_cast<Isolate*>(isolate), buffer, capacity,
- nchars_ref, options);
-}
-
template <typename CharType>
static inline int WriteHelper(i::Isolate* isolate, const String* string,
CharType* buffer, int start, int length,
@@ -5900,16 +5585,6 @@ static inline int WriteHelper(i::Isolate* isolate, const String* string,
}
-int String::WriteOneByte(uint8_t* buffer,
- int start,
- int length,
- int options) const {
- DISABLE_DEPRECATED_WARNINGS
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- RESET_DEPRECATED_WARNINGS
- return WriteHelper(isolate, this, buffer, start, length, options);
-}
-
int String::WriteOneByte(Isolate* isolate, uint8_t* buffer, int start,
int length, int options) const {
return WriteHelper(reinterpret_cast<i::Isolate*>(isolate), this, buffer,
@@ -5917,16 +5592,6 @@ int String::WriteOneByte(Isolate* isolate, uint8_t* buffer, int start,
}
-int String::Write(uint16_t* buffer,
- int start,
- int length,
- int options) const {
- DISABLE_DEPRECATED_WARNINGS
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- RESET_DEPRECATED_WARNINGS
- return WriteHelper(isolate, this, buffer, start, length, options);
-}
-
int String::Write(Isolate* isolate, uint16_t* buffer, int start, int length,
int options) const {
return WriteHelper(reinterpret_cast<i::Isolate*>(isolate), this, buffer,
@@ -5948,11 +5613,16 @@ bool v8::String::IsExternalOneByte() const {
void v8::String::VerifyExternalStringResource(
v8::String::ExternalStringResource* value) const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
+ i::DisallowHeapAllocation no_allocation;
+ i::String* str = *Utils::OpenHandle(this);
const v8::String::ExternalStringResource* expected;
- if (i::StringShape(*str).IsExternalTwoByte()) {
- const void* resource =
- i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
+
+ if (str->IsThinString()) {
+ str = i::ThinString::cast(str)->actual();
+ }
+
+ if (i::StringShape(str).IsExternalTwoByte()) {
+ const void* resource = i::ExternalTwoByteString::cast(str)->resource();
expected = reinterpret_cast<const ExternalStringResource*>(resource);
} else {
expected = nullptr;
@@ -5962,17 +5632,21 @@ void v8::String::VerifyExternalStringResource(
void v8::String::VerifyExternalStringResourceBase(
v8::String::ExternalStringResourceBase* value, Encoding encoding) const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
+ i::DisallowHeapAllocation no_allocation;
+ i::String* str = *Utils::OpenHandle(this);
const v8::String::ExternalStringResourceBase* expected;
Encoding expectedEncoding;
- if (i::StringShape(*str).IsExternalOneByte()) {
- const void* resource =
- i::Handle<i::ExternalOneByteString>::cast(str)->resource();
+
+ if (str->IsThinString()) {
+ str = i::ThinString::cast(str)->actual();
+ }
+
+ if (i::StringShape(str).IsExternalOneByte()) {
+ const void* resource = i::ExternalOneByteString::cast(str)->resource();
expected = reinterpret_cast<const ExternalStringResourceBase*>(resource);
expectedEncoding = ONE_BYTE_ENCODING;
- } else if (i::StringShape(*str).IsExternalTwoByte()) {
- const void* resource =
- i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
+ } else if (i::StringShape(str).IsExternalTwoByte()) {
+ const void* resource = i::ExternalTwoByteString::cast(str)->resource();
expected = reinterpret_cast<const ExternalStringResourceBase*>(resource);
expectedEncoding = TWO_BYTE_ENCODING;
} else {
@@ -5984,15 +5658,69 @@ void v8::String::VerifyExternalStringResourceBase(
CHECK_EQ(expectedEncoding, encoding);
}
+String::ExternalStringResource* String::GetExternalStringResourceSlow() const {
+ i::DisallowHeapAllocation no_allocation;
+ typedef internal::Internals I;
+ ExternalStringResource* result = nullptr;
+ i::String* str = *Utils::OpenHandle(this);
+
+ if (str->IsThinString()) {
+ str = i::ThinString::cast(str)->actual();
+ }
+
+ if (i::StringShape(str).IsExternalTwoByte()) {
+ void* value = I::ReadField<void*>(str, I::kStringResourceOffset);
+ result = reinterpret_cast<String::ExternalStringResource*>(value);
+ }
+ return result;
+}
+
+String::ExternalStringResourceBase* String::GetExternalStringResourceBaseSlow(
+ String::Encoding* encoding_out) const {
+ i::DisallowHeapAllocation no_allocation;
+ typedef internal::Internals I;
+ ExternalStringResourceBase* resource = nullptr;
+ i::String* str = *Utils::OpenHandle(this);
+
+ if (str->IsThinString()) {
+ str = i::ThinString::cast(str)->actual();
+ }
+
+ int type = I::GetInstanceType(str) & I::kFullStringRepresentationMask;
+ *encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
+ if (i::StringShape(str).IsExternalOneByte() ||
+ i::StringShape(str).IsExternalTwoByte()) {
+ void* value = I::ReadField<void*>(str, I::kStringResourceOffset);
+ resource = static_cast<ExternalStringResourceBase*>(value);
+ }
+ return resource;
+}
+
+const String::ExternalOneByteStringResource*
+String::GetExternalOneByteStringResourceSlow() const {
+ i::DisallowHeapAllocation no_allocation;
+ i::String* str = *Utils::OpenHandle(this);
+
+ if (str->IsThinString()) {
+ str = i::ThinString::cast(str)->actual();
+ }
+
+ if (i::StringShape(str).IsExternalOneByte()) {
+ const void* resource = i::ExternalOneByteString::cast(str)->resource();
+ return reinterpret_cast<const ExternalOneByteStringResource*>(resource);
+ }
+ return nullptr;
+}
+
const v8::String::ExternalOneByteStringResource*
v8::String::GetExternalOneByteStringResource() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (i::StringShape(*str).IsExternalOneByte()) {
- const void* resource =
- i::Handle<i::ExternalOneByteString>::cast(str)->resource();
+ i::DisallowHeapAllocation no_allocation;
+ i::String* str = *Utils::OpenHandle(this);
+ if (i::StringShape(str).IsExternalOneByte()) {
+ const void* resource = i::ExternalOneByteString::cast(str)->resource();
return reinterpret_cast<const ExternalOneByteStringResource*>(resource);
} else {
- return nullptr;
+ return GetExternalOneByteStringResourceSlow();
}
}
@@ -6000,17 +5728,18 @@ v8::String::GetExternalOneByteStringResource() const {
Local<Value> Symbol::Name() const {
i::Handle<i::Symbol> sym = Utils::OpenHandle(this);
- i::MemoryChunk* chunk = i::MemoryChunk::FromHeapObject(*sym);
- // If the Symbol is in RO_SPACE, then its name must be too. Since RO_SPACE
- // objects are immovable we can use the Handle(T**) constructor with the
- // address of the name field in the Symbol object without needing an isolate.
- if (chunk->owner()->identity() == i::RO_SPACE) {
+ i::Isolate* isolate;
+ if (!i::Isolate::FromWritableHeapObject(*sym, &isolate)) {
+ // If the Symbol is in RO_SPACE, then its name must be too. Since RO_SPACE
+ // objects are immovable we can use the Handle(T**) constructor with the
+ // address of the name field in the Symbol object without needing an
+ // isolate.
i::Handle<i::HeapObject> ro_name(reinterpret_cast<i::HeapObject**>(
sym->GetFieldAddress(i::Symbol::kNameOffset)));
return Utils::ToLocal(ro_name);
}
- i::Handle<i::Object> name(sym->name(), chunk->heap()->isolate());
+ i::Handle<i::Object> name(sym->name(), isolate);
return Utils::ToLocal(name);
}
@@ -6207,6 +5936,7 @@ HeapStatistics::HeapStatistics()
used_heap_size_(0),
heap_size_limit_(0),
malloced_memory_(0),
+ external_memory_(0),
peak_malloced_memory_(0),
does_zap_garbage_(0),
number_of_native_contexts_(0),
@@ -6819,14 +6549,6 @@ Local<String> v8::String::Concat(Isolate* v8_isolate, Local<String> left,
return Utils::ToLocal(result);
}
-Local<String> v8::String::Concat(Local<String> left, Local<String> right) {
- i::Handle<i::String> left_string = Utils::OpenHandle(*left);
- DISABLE_DEPRECATED_WARNINGS
- i::Isolate* isolate = left_string->GetIsolate();
- RESET_DEPRECATED_WARNINGS
- return Concat(reinterpret_cast<Isolate*>(isolate), left, right);
-}
-
MaybeLocal<String> v8::String::NewExternalTwoByte(
Isolate* isolate, v8::String::ExternalStringResource* resource) {
CHECK(resource && resource->data());
@@ -6880,73 +6602,83 @@ Local<String> v8::String::NewExternal(
bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
- i::Handle<i::String> obj = Utils::OpenHandle(this);
- // RO_SPACE strings cannot be externalized.
- i::MemoryChunk* chunk = i::MemoryChunk::FromHeapObject(*obj);
- if (chunk->owner()->identity() == i::RO_SPACE) {
- return false;
- }
+ i::DisallowHeapAllocation no_allocation;
- i::Isolate* isolate = chunk->heap()->isolate();
- if (i::StringShape(*obj).IsExternal()) {
- return false; // Already an external string.
+ i::String* obj = *Utils::OpenHandle(this);
+
+ if (obj->IsThinString()) {
+ obj = i::ThinString::cast(obj)->actual();
}
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- if (isolate->heap()->IsInGCPostProcessing()) {
+
+ if (!obj->SupportsExternalization()) {
return false;
}
+
+ // It is safe to call FromWritable because SupportsExternalization already
+ // checked that the object is writable.
+ i::Isolate* isolate;
+ i::Isolate::FromWritableHeapObject(obj, &isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+
CHECK(resource && resource->data());
bool result = obj->MakeExternal(resource);
- // Assert that if CanMakeExternal(), then externalizing actually succeeds.
- DCHECK(!CanMakeExternal() || result);
- if (result) {
- DCHECK(obj->IsExternalString());
- }
+ DCHECK(result);
+ DCHECK(obj->IsExternalString());
return result;
}
bool v8::String::MakeExternal(
v8::String::ExternalOneByteStringResource* resource) {
- i::Handle<i::String> obj = Utils::OpenHandle(this);
+ i::DisallowHeapAllocation no_allocation;
- // RO_SPACE strings cannot be externalized.
- i::MemoryChunk* chunk = i::MemoryChunk::FromHeapObject(*obj);
- if (chunk->owner()->identity() == i::RO_SPACE) {
- return false;
- }
+ i::String* obj = *Utils::OpenHandle(this);
- i::Isolate* isolate = chunk->heap()->isolate();
- if (i::StringShape(*obj).IsExternal()) {
- return false; // Already an external string.
+ if (obj->IsThinString()) {
+ obj = i::ThinString::cast(obj)->actual();
}
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- if (isolate->heap()->IsInGCPostProcessing()) {
+
+ if (!obj->SupportsExternalization()) {
return false;
}
+
+ // It is safe to call FromWritable because SupportsExternalization already
+ // checked that the object is writable.
+ i::Isolate* isolate;
+ i::Isolate::FromWritableHeapObject(obj, &isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+
CHECK(resource && resource->data());
bool result = obj->MakeExternal(resource);
- // Assert that if CanMakeExternal(), then externalizing actually succeeds.
- DCHECK(!CanMakeExternal() || result);
- if (result) {
- DCHECK(obj->IsExternalString());
- }
+ DCHECK(result);
+ DCHECK(obj->IsExternalString());
return result;
}
bool v8::String::CanMakeExternal() {
- i::Handle<i::String> obj = Utils::OpenHandle(this);
- if (obj->IsExternalString()) return false;
+ i::DisallowHeapAllocation no_allocation;
+ i::String* obj = *Utils::OpenHandle(this);
+
+ if (obj->IsThinString()) {
+ obj = i::ThinString::cast(obj)->actual();
+ }
+
+ if (!obj->SupportsExternalization()) {
+ return false;
+ }
// Only old space strings should be externalized.
- i::MemoryChunk* chunk = i::MemoryChunk::FromHeapObject(*obj);
- i::AllocationSpace space = chunk->owner()->identity();
- return space != i::NEW_SPACE && space != i::RO_SPACE;
+ return !i::Heap::InNewSpace(obj);
}
+bool v8::String::StringEquals(Local<String> that) {
+ auto self = Utils::OpenHandle(this);
+ auto other = Utils::OpenHandle(*that);
+ return self->Equals(*other);
+}
Isolate* v8::Object::GetIsolate() {
i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
@@ -7026,14 +6758,6 @@ bool v8::BooleanObject::ValueOf() const {
}
-Local<v8::Value> v8::StringObject::New(Local<String> value) {
- i::Handle<i::String> string = Utils::OpenHandle(*value);
- DISABLE_DEPRECATED_WARNINGS
- i::Isolate* isolate = string->GetIsolate();
- RESET_DEPRECATED_WARNINGS
- return New(reinterpret_cast<Isolate*>(isolate), value);
-}
-
Local<v8::Value> v8::StringObject::New(Isolate* v8_isolate,
Local<String> value) {
i::Handle<i::String> string = Utils::OpenHandle(*value);
@@ -7138,14 +6862,6 @@ MaybeLocal<v8::RegExp> v8::RegExp::New(Local<Context> context,
}
-Local<v8::RegExp> v8::RegExp::New(Local<String> pattern, Flags flags) {
- auto isolate =
- reinterpret_cast<Isolate*>(Utils::OpenHandle(*pattern)->GetIsolate());
- auto context = isolate->GetCurrentContext();
- RETURN_TO_LOCAL_UNCHECKED(New(context, pattern, flags), RegExp);
-}
-
-
Local<v8::String> v8::RegExp::GetSource() const {
i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
return Utils::ToLocal(
@@ -7436,12 +7152,6 @@ MaybeLocal<Promise::Resolver> Promise::Resolver::New(Local<Context> context) {
}
-Local<Promise::Resolver> Promise::Resolver::New(Isolate* isolate) {
- RETURN_TO_LOCAL_UNCHECKED(New(isolate->GetCurrentContext()),
- Promise::Resolver);
-}
-
-
Local<Promise> Promise::Resolver::GetPromise() {
i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
return Local<Promise>::Cast(Utils::ToLocal(promise));
@@ -7467,12 +7177,6 @@ Maybe<bool> Promise::Resolver::Resolve(Local<Context> context,
}
-void Promise::Resolver::Resolve(Local<Value> value) {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- USE(Resolve(context, value));
-}
-
-
Maybe<bool> Promise::Resolver::Reject(Local<Context> context,
Local<Value> value) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -7492,12 +7196,6 @@ Maybe<bool> Promise::Resolver::Reject(Local<Context> context,
}
-void Promise::Resolver::Reject(Local<Value> value) {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- USE(Reject(context, value));
-}
-
-
MaybeLocal<Promise> Promise::Catch(Local<Context> context,
Local<Function> handler) {
PREPARE_FOR_EXECUTION(context, Promise, Catch, Promise);
@@ -7610,42 +7308,47 @@ Local<String> WasmCompiledModule::GetWasmWireBytes() {
.ToLocalChecked();
}
-// Currently, wasm modules are bound, both to Isolate and to
-// the Context they were created in. The currently-supported means to
-// decontextualize and then re-contextualize a module is via
-// serialization/deserialization.
WasmCompiledModule::TransferrableModule
WasmCompiledModule::GetTransferrableModule() {
- i::DisallowHeapAllocation no_gc;
- WasmCompiledModule::SerializedModule compiled_part = Serialize();
-
- BufferReference wire_bytes_ref = GetWasmWireBytesRef();
- size_t wire_size = wire_bytes_ref.size;
- std::unique_ptr<uint8_t[]> wire_bytes_copy(new uint8_t[wire_size]);
- memcpy(wire_bytes_copy.get(), wire_bytes_ref.start, wire_size);
-
- return TransferrableModule(std::move(compiled_part),
- {std::move(wire_bytes_copy), wire_size});
+ if (i::FLAG_wasm_shared_code) {
+ i::Handle<i::WasmModuleObject> obj =
+ i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
+ return TransferrableModule(obj->managed_native_module()->get());
+ } else {
+ WasmCompiledModule::SerializedModule serialized_module = Serialize();
+ BufferReference wire_bytes_ref = GetWasmWireBytesRef();
+ size_t wire_size = wire_bytes_ref.size;
+ std::unique_ptr<uint8_t[]> wire_bytes_copy(new uint8_t[wire_size]);
+ memcpy(wire_bytes_copy.get(), wire_bytes_ref.start, wire_size);
+ return TransferrableModule(std::move(serialized_module),
+ {std::move(wire_bytes_copy), wire_size});
+ }
}
MaybeLocal<WasmCompiledModule> WasmCompiledModule::FromTransferrableModule(
Isolate* isolate,
const WasmCompiledModule::TransferrableModule& transferrable_module) {
- MaybeLocal<WasmCompiledModule> ret =
- Deserialize(isolate, AsReference(transferrable_module.compiled_code),
- AsReference(transferrable_module.wire_bytes));
- return ret;
+ if (i::FLAG_wasm_shared_code) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::WasmModuleObject> module_object =
+ i_isolate->wasm_engine()->ImportNativeModule(
+ i_isolate, transferrable_module.shared_module_);
+ return Local<WasmCompiledModule>::Cast(
+ Utils::ToLocal(i::Handle<i::JSObject>::cast(module_object)));
+ } else {
+ return Deserialize(isolate, AsReference(transferrable_module.serialized_),
+ AsReference(transferrable_module.wire_bytes_));
+ }
}
WasmCompiledModule::SerializedModule WasmCompiledModule::Serialize() {
i::Handle<i::WasmModuleObject> obj =
i::Handle<i::WasmModuleObject>::cast(Utils::OpenHandle(this));
i::wasm::NativeModule* native_module = obj->native_module();
- size_t buffer_size =
- i::wasm::GetSerializedNativeModuleSize(obj->GetIsolate(), native_module);
+ i::wasm::WasmSerializer wasm_serializer(obj->GetIsolate(), native_module);
+ size_t buffer_size = wasm_serializer.GetSerializedNativeModuleSize();
std::unique_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
- if (i::wasm::SerializeNativeModule(obj->GetIsolate(), native_module,
- {buffer.get(), buffer_size}))
+ if (wasm_serializer.SerializeNativeModule({buffer.get(), buffer_size}))
return {std::move(buffer), buffer_size};
return {};
}
@@ -7685,9 +7388,11 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::Compile(Isolate* isolate,
if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
return MaybeLocal<WasmCompiledModule>();
}
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
i::MaybeHandle<i::JSObject> maybe_compiled =
i_isolate->wasm_engine()->SyncCompile(
- i_isolate, &thrower, i::wasm::ModuleWireBytes(start, start + length));
+ i_isolate, enabled_features, &thrower,
+ i::wasm::ModuleWireBytes(start, start + length));
if (maybe_compiled.is_null()) return MaybeLocal<WasmCompiledModule>();
return Local<WasmCompiledModule>::Cast(
Utils::ToLocal(maybe_compiled.ToHandleChecked()));
@@ -7698,7 +7403,7 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::Compile(Isolate* isolate,
// move to wasm-js.cc.
class AsyncCompilationResolver : public i::wasm::CompilationResultResolver {
public:
- AsyncCompilationResolver(Isolate* isolate, Handle<Promise> promise)
+ AsyncCompilationResolver(Isolate* isolate, Local<Promise> promise)
: promise_(
reinterpret_cast<i::Isolate*>(isolate)->global_handles()->Create(
*Utils::OpenHandle(*promise))) {}
@@ -7726,55 +7431,23 @@ class AsyncCompilationResolver : public i::wasm::CompilationResultResolver {
};
WasmModuleObjectBuilderStreaming::WasmModuleObjectBuilderStreaming(
- Isolate* isolate)
- : isolate_(isolate) {
- MaybeLocal<Promise::Resolver> maybe_resolver =
- Promise::Resolver::New(isolate->GetCurrentContext());
- Local<Promise::Resolver> resolver = maybe_resolver.ToLocalChecked();
- promise_.Reset(isolate, resolver->GetPromise());
-
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- streaming_decoder_ = i_isolate->wasm_engine()->StartStreamingCompilation(
- i_isolate, handle(i_isolate->context(), i_isolate),
- base::make_unique<AsyncCompilationResolver>(isolate, GetPromise()));
+ Isolate* isolate) {
+ USE(isolate_);
}
-Local<Promise> WasmModuleObjectBuilderStreaming::GetPromise() {
- return promise_.Get(isolate_);
-}
+Local<Promise> WasmModuleObjectBuilderStreaming::GetPromise() { return {}; }
void WasmModuleObjectBuilderStreaming::OnBytesReceived(const uint8_t* bytes,
size_t size) {
- streaming_decoder_->OnBytesReceived(i::Vector<const uint8_t>(bytes, size));
}
void WasmModuleObjectBuilderStreaming::Finish() {
- streaming_decoder_->Finish();
}
void WasmModuleObjectBuilderStreaming::Abort(MaybeLocal<Value> exception) {
- Local<Promise> promise = GetPromise();
- // The promise has already been resolved, e.g. because of a compilation
- // error.
- if (promise->State() != v8::Promise::kPending) return;
- streaming_decoder_->Abort();
-
- // If no exception value is provided, we do not reject the promise. This can
- // happen when streaming compilation gets aborted when no script execution is
- // allowed anymore, e.g. when a browser tab gets refreshed.
- if (exception.IsEmpty()) return;
-
- Local<Promise::Resolver> resolver = promise.As<Promise::Resolver>();
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
- i::HandleScope scope(i_isolate);
- Local<Context> context =
- Utils::ToLocal(handle(i_isolate->context(), i_isolate));
- auto maybe = resolver->Reject(context, exception.ToLocalChecked());
- CHECK_IMPLIES(!maybe.FromMaybe(false), i_isolate->has_scheduled_exception());
}
WasmModuleObjectBuilderStreaming::~WasmModuleObjectBuilderStreaming() {
- promise_.Reset();
}
// static
@@ -7799,35 +7472,53 @@ v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
"ArrayBuffer already externalized");
self->set_is_external(true);
- // We need to capture the contents before releasing the allocation from the
- // Wasm tracker, because otherwise we will not correctly capture the
- // allocation data.
const v8::ArrayBuffer::Contents contents = GetContents();
- if (self->is_wasm_memory()) {
- // Since this is being externalized, the Wasm Allocation Tracker can no
- // longer track it.
- //
- // TODO(eholk): Find a way to track this across externalization
- self->StopTrackingWasmMemory(isolate);
- }
isolate->heap()->UnregisterArrayBuffer(*self);
// A regular copy is good enough. No move semantics needed.
return contents;
}
+v8::ArrayBuffer::Contents::Contents(void* data, size_t byte_length,
+ void* allocation_base,
+ size_t allocation_length,
+ Allocator::AllocationMode allocation_mode,
+ DeleterCallback deleter, void* deleter_data)
+ : data_(data),
+ byte_length_(byte_length),
+ allocation_base_(allocation_base),
+ allocation_length_(allocation_length),
+ allocation_mode_(allocation_mode),
+ deleter_(deleter),
+ deleter_data_(deleter_data) {
+ DCHECK_LE(allocation_base_, data_);
+ DCHECK_LE(byte_length_, allocation_length_);
+}
+
+void WasmMemoryDeleter(void* buffer, size_t lenght, void* info) {
+ internal::wasm::WasmEngine* engine =
+ reinterpret_cast<internal::wasm::WasmEngine*>(info);
+ CHECK(engine->memory_tracker()->FreeMemoryIfIsWasmMemory(nullptr, buffer));
+}
+
+void ArrayBufferDeleter(void* buffer, size_t length, void* info) {
+ v8::ArrayBuffer::Allocator* allocator =
+ reinterpret_cast<v8::ArrayBuffer::Allocator*>(info);
+ allocator->Free(buffer, length);
+}
v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents() {
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
size_t byte_length = static_cast<size_t>(self->byte_length()->Number());
- Contents contents;
- contents.allocation_base_ = self->allocation_base();
- contents.allocation_length_ = self->allocation_length();
- contents.allocation_mode_ = self->is_wasm_memory()
- ? Allocator::AllocationMode::kReservation
- : Allocator::AllocationMode::kNormal;
- contents.data_ = self->backing_store();
- contents.byte_length_ = byte_length;
+ Contents contents(
+ self->backing_store(), byte_length, self->allocation_base(),
+ self->allocation_length(),
+ self->is_wasm_memory() ? Allocator::AllocationMode::kReservation
+ : Allocator::AllocationMode::kNormal,
+ self->is_wasm_memory() ? WasmMemoryDeleter : ArrayBufferDeleter,
+ self->is_wasm_memory()
+ ? static_cast<void*>(self->GetIsolate()->wasm_engine())
+ : static_cast<void*>(self->GetIsolate()->array_buffer_allocator()));
return contents;
}
@@ -7948,13 +7639,13 @@ size_t v8::ArrayBufferView::ByteLength() {
size_t v8::TypedArray::Length() {
i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
- return static_cast<size_t>(obj->length_value());
+ return obj->length_value();
}
static_assert(v8::TypedArray::kMaxLength == i::Smi::kMaxValue,
"v8::TypedArray::kMaxLength must match i::Smi::kMaxValue");
-#define TYPED_ARRAY_NEW(Type, type, TYPE, ctype, size) \
+#define TYPED_ARRAY_NEW(Type, type, TYPE, ctype) \
Local<Type##Array> Type##Array::New(Local<ArrayBuffer> array_buffer, \
size_t byte_offset, size_t length) { \
i::Isolate* isolate = Utils::OpenHandle(*array_buffer)->GetIsolate(); \
@@ -8025,7 +7716,6 @@ bool v8::SharedArrayBuffer::IsExternal() const {
return Utils::OpenHandle(this)->is_external();
}
-
v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
@@ -8033,46 +7723,51 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
"SharedArrayBuffer already externalized");
self->set_is_external(true);
- // We need to capture the contents before releasing the allocation from the
- // Wasm tracker, because otherwise we will not correctly capture the
- // allocation data.
const v8::SharedArrayBuffer::Contents contents = GetContents();
- if (self->is_wasm_memory()) {
- // Since this is being externalized, the Wasm Allocation Tracker can no
- // longer track it.
- //
- // TODO(eholk): Find a way to track this across externalization
- self->StopTrackingWasmMemory(isolate);
- }
isolate->heap()->UnregisterArrayBuffer(*self);
// A regular copy is good enough. No move semantics needed.
return contents;
}
+v8::SharedArrayBuffer::Contents::Contents(
+ void* data, size_t byte_length, void* allocation_base,
+ size_t allocation_length, Allocator::AllocationMode allocation_mode,
+ DeleterCallback deleter, void* deleter_data)
+ : data_(data),
+ byte_length_(byte_length),
+ allocation_base_(allocation_base),
+ allocation_length_(allocation_length),
+ allocation_mode_(allocation_mode),
+ deleter_(deleter),
+ deleter_data_(deleter_data) {
+ DCHECK_LE(allocation_base_, data_);
+ DCHECK_LE(byte_length_, allocation_length_);
+}
v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() {
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
size_t byte_length = static_cast<size_t>(self->byte_length()->Number());
- Contents contents;
- contents.allocation_base_ = self->allocation_base();
- contents.allocation_length_ = self->allocation_length();
- contents.allocation_mode_ =
+ Contents contents(
+ self->backing_store(), byte_length, self->allocation_base(),
+ self->allocation_length(),
+ self->is_wasm_memory()
+ ? ArrayBuffer::Allocator::AllocationMode::kReservation
+ : ArrayBuffer::Allocator::AllocationMode::kNormal,
+ self->is_wasm_memory()
+ ? reinterpret_cast<Contents::DeleterCallback>(WasmMemoryDeleter)
+ : reinterpret_cast<Contents::DeleterCallback>(ArrayBufferDeleter),
self->is_wasm_memory()
- ? ArrayBufferAllocator::Allocator::AllocationMode::kReservation
- : ArrayBufferAllocator::Allocator::AllocationMode::kNormal;
- contents.data_ = self->backing_store();
- contents.byte_length_ = byte_length;
+ ? static_cast<void*>(self->GetIsolate()->wasm_engine())
+ : static_cast<void*>(self->GetIsolate()->array_buffer_allocator()));
return contents;
}
-
size_t v8::SharedArrayBuffer::ByteLength() const {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
return static_cast<size_t>(obj->byte_length()->Number());
}
-
Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(Isolate* isolate,
size_t byte_length) {
CHECK(i::FLAG_harmony_sharedarraybuffer);
@@ -8102,9 +7797,11 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
+ bool is_wasm_memory =
+ i_isolate->wasm_engine()->memory_tracker()->IsWasmMemory(data);
i::JSArrayBuffer::Setup(obj, i_isolate,
mode == ArrayBufferCreationMode::kExternalized, data,
- byte_length, i::SharedFlag::kShared);
+ byte_length, i::SharedFlag::kShared, is_wasm_memory);
return Utils::ToLocalShared(obj);
}
@@ -8302,14 +7999,6 @@ v8::Local<v8::Context> Isolate::GetCurrentContext() {
}
-v8::Local<v8::Context> Isolate::GetCallingContext() {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- i::Handle<i::Object> calling = isolate->GetCallingNativeContext();
- if (calling.is_null()) return Local<Context>();
- return Utils::ToLocal(i::Handle<i::Context>::cast(calling));
-}
-
-
v8::Local<v8::Context> Isolate::GetEnteredContext() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::Handle<i::Object> last =
@@ -8670,6 +8359,7 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->malloced_memory_ =
isolate->allocator()->GetCurrentMemoryUsage() +
isolate->wasm_engine()->allocator()->GetCurrentMemoryUsage();
+ heap_statistics->external_memory_ = isolate->heap()->external_memory();
heap_statistics->peak_malloced_memory_ =
isolate->allocator()->GetMaxMemoryUsage() +
isolate->wasm_engine()->allocator()->GetMaxMemoryUsage();
@@ -8806,18 +8496,6 @@ void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) {
isolate->RemoveCallCompletedCallback(callback);
}
-void Isolate::AddCallCompletedCallback(
- DeprecatedCallCompletedCallback callback) {
- AddCallCompletedCallback(reinterpret_cast<CallCompletedCallback>(callback));
-}
-
-
-void Isolate::RemoveCallCompletedCallback(
- DeprecatedCallCompletedCallback callback) {
- RemoveCallCompletedCallback(
- reinterpret_cast<CallCompletedCallback>(callback));
-}
-
void Isolate::AtomicsWaitWakeHandle::Wake() {
reinterpret_cast<i::AtomicsWaitWakeHandle*>(this)->Wake();
}
@@ -8861,17 +8539,6 @@ void Isolate::EnqueueMicrotask(MicrotaskCallback callback, void* data) {
}
-void Isolate::SetAutorunMicrotasks(bool autorun) {
- SetMicrotasksPolicy(
- autorun ? MicrotasksPolicy::kAuto : MicrotasksPolicy::kExplicit);
-}
-
-
-bool Isolate::WillAutorunMicrotasks() const {
- return GetMicrotasksPolicy() == MicrotasksPolicy::kAuto;
-}
-
-
void Isolate::SetMicrotasksPolicy(MicrotasksPolicy policy) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->handle_scope_implementer()->set_microtasks_policy(policy);
@@ -8959,7 +8626,7 @@ int Isolate::ContextDisposedNotification(bool dependant_context) {
if (!dependant_context) {
// We left the current context, we can abort all WebAssembly compilations on
// that isolate.
- isolate->wasm_engine()->AbortCompileJobsOnIsolate(isolate);
+ isolate->wasm_engine()->DeleteCompileJobsOnIsolate(isolate);
}
// TODO(ahaas): move other non-heap activity out of the heap call.
return isolate->heap()->NotifyContextDisposed(dependant_context);
@@ -9065,6 +8732,9 @@ CALLBACK_SETTER(WasmCompileStreamingCallback, ApiImplementationCallback,
CALLBACK_SETTER(WasmStreamingCallback, WasmStreamingCallback,
wasm_streaming_callback)
+CALLBACK_SETTER(WasmThreadsEnabledCallback, WasmThreadsEnabledCallback,
+ wasm_threads_enabled_callback)
+
void Isolate::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
void* data) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -9235,12 +8905,9 @@ String::Utf8Value::Utf8Value(v8::Isolate* isolate, v8::Local<v8::Value> obj)
if (!obj->ToString(context).ToLocal(&str)) return;
length_ = str->Utf8Length(isolate);
str_ = i::NewArray<char>(length_ + 1);
- str->WriteUtf8(str_);
+ str->WriteUtf8(isolate, str_);
}
-String::Utf8Value::Utf8Value(v8::Local<v8::Value> obj)
- : String::Utf8Value::Utf8Value(Isolate::GetCurrent(), obj) {}
-
String::Utf8Value::~Utf8Value() {
i::DeleteArray(str_);
}
@@ -9257,12 +8924,9 @@ String::Value::Value(v8::Isolate* isolate, v8::Local<v8::Value> obj)
if (!obj->ToString(context).ToLocal(&str)) return;
length_ = str->Length();
str_ = i::NewArray<uint16_t>(length_ + 1);
- str->Write(str_);
+ str->Write(isolate, str_);
}
-String::Value::Value(v8::Local<v8::Value> obj)
- : String::Value::Value(v8::Isolate::GetCurrent(), obj) {}
-
String::Value::~Value() {
i::DeleteArray(str_);
}
@@ -9757,7 +9421,7 @@ MaybeLocal<UnboundScript> debug::CompileInspectorScript(Isolate* v8_isolate,
i::ScriptData* script_data = nullptr;
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
i::Compiler::GetSharedFunctionInfoForScript(
- str, i::Compiler::ScriptDetails(), origin_options, nullptr,
+ isolate, str, i::Compiler::ScriptDetails(), origin_options, nullptr,
script_data, ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheBecauseInspector,
i::FLAG_expose_inspector_scripts ? i::NOT_NATIVES_CODE
@@ -9990,7 +9654,6 @@ void debug::GlobalLexicalScopeNames(
void debug::SetReturnValue(v8::Isolate* v8_isolate,
v8::Local<v8::Value> value) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- if (!isolate->debug()->break_id()) return;
isolate->debug()->set_return_value(*Utils::OpenHandle(*value));
}
@@ -10773,6 +10436,55 @@ void EmbedderHeapTracer::FinalizeTracing() {
}
}
+void EmbedderHeapTracer::GarbageCollectionForTesting(
+ EmbedderStackState stack_state) {
+ CHECK(isolate_);
+ CHECK(i::FLAG_expose_gc);
+ i::Heap* const heap = reinterpret_cast<i::Isolate*>(isolate_)->heap();
+ heap->SetEmbedderStackStateForNextFinalizaton(stack_state);
+ heap->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask,
+ i::GarbageCollectionReason::kTesting,
+ kGCCallbackFlagForced);
+}
+
+bool EmbedderHeapTracer::AdvanceTracing(double deadline_in_ms) {
+#if __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated"
+#endif
+ return !this->AdvanceTracing(
+ deadline_in_ms, AdvanceTracingActions(std::isinf(deadline_in_ms)
+ ? FORCE_COMPLETION
+ : DO_NOT_FORCE_COMPLETION));
+#if __clang__
+#pragma clang diagnostic pop
+#endif
+}
+
+void EmbedderHeapTracer::EnterFinalPause(EmbedderStackState stack_state) {
+#if __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated"
+#endif
+ this->EnterFinalPause();
+#if __clang__
+#pragma clang diagnostic pop
+#endif
+}
+
+bool EmbedderHeapTracer::IsTracingDone() {
+// TODO(mlippautz): Implement using "return true" after removing the deprecated
+// call.
+#if __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated"
+#endif
+ return NumberOfWrappersToTrace() == 0;
+#if __clang__
+#pragma clang diagnostic pop
+#endif
+}
+
namespace internal {
void HandleScopeImplementer::FreeThreadResources() {
@@ -10968,8 +10680,6 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
#undef SET_FIELD_WRAPPED
#undef NEW_STRING
#undef CALLBACK_SETTER
-#undef DISABLE_DEPRECATED_WARNINGS
-#undef RESET_DEPRECATED_WARNINGS
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 8ffec881ed..ae0ce350a4 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -11,10 +11,15 @@
#include "src/detachable-vector.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
+#include "src/objects.h"
#include "src/objects/bigint.h"
#include "src/objects/js-collection.h"
+#include "src/objects/js-generator.h"
#include "src/objects/js-promise.h"
+#include "src/objects/js-proxy.h"
#include "src/objects/module.h"
+#include "src/objects/shared-function-info.h"
+
#include "src/objects/templates.h"
namespace v8 {
@@ -31,36 +36,19 @@ class Consts {
};
};
-template <typename T> inline T ToCData(v8::internal::Object* obj) {
- STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
- if (obj == v8::internal::Smi::kZero) return nullptr;
- return reinterpret_cast<T>(
- v8::internal::Foreign::cast(obj)->foreign_address());
-}
+template <typename T>
+inline T ToCData(v8::internal::Object* obj);
template <>
-inline v8::internal::Address ToCData(v8::internal::Object* obj) {
- if (obj == v8::internal::Smi::kZero) return v8::internal::kNullAddress;
- return v8::internal::Foreign::cast(obj)->foreign_address();
-}
+inline v8::internal::Address ToCData(v8::internal::Object* obj);
template <typename T>
inline v8::internal::Handle<v8::internal::Object> FromCData(
- v8::internal::Isolate* isolate, T obj) {
- STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
- if (obj == nullptr) return handle(v8::internal::Smi::kZero, isolate);
- return isolate->factory()->NewForeign(
- reinterpret_cast<v8::internal::Address>(obj));
-}
+ v8::internal::Isolate* isolate, T obj);
template <>
inline v8::internal::Handle<v8::internal::Object> FromCData(
- v8::internal::Isolate* isolate, v8::internal::Address obj) {
- if (obj == v8::internal::kNullAddress) {
- return handle(v8::internal::Smi::kZero, isolate);
- }
- return isolate->factory()->NewForeign(obj);
-}
+ v8::internal::Isolate* isolate, v8::internal::Address obj);
class ApiFunction {
public:
@@ -250,17 +238,14 @@ OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
#undef DECLARE_OPEN_HANDLE
- template<class From, class To>
- static inline Local<To> Convert(v8::internal::Handle<From> obj) {
- DCHECK(obj.is_null() || (obj->IsSmi() || !obj->IsTheHole()));
- return Local<To>(reinterpret_cast<To*>(obj.location()));
- }
+template <class From, class To>
+static inline Local<To> Convert(v8::internal::Handle<From> obj);
- template <class T>
- static inline v8::internal::Handle<v8::internal::Object> OpenPersistent(
- const v8::Persistent<T>& persistent) {
- return v8::internal::Handle<v8::internal::Object>(
- reinterpret_cast<v8::internal::Object**>(persistent.val_));
+template <class T>
+static inline v8::internal::Handle<v8::internal::Object> OpenPersistent(
+ const v8::Persistent<T>& persistent) {
+ return v8::internal::Handle<v8::internal::Object>(
+ reinterpret_cast<v8::internal::Object**>(persistent.val_));
}
template <class T>
@@ -302,84 +287,6 @@ inline bool ToLocal(v8::internal::MaybeHandle<v8::internal::Object> maybe,
return false;
}
-
-// Implementations of ToLocal
-
-#define MAKE_TO_LOCAL(Name, From, To) \
- Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \
- return Convert<v8::internal::From, v8::To>(obj); \
- }
-
-
-#define MAKE_TO_LOCAL_TYPED_ARRAY(Type, typeName, TYPE, ctype, size) \
- Local<v8::Type##Array> Utils::ToLocal##Type##Array( \
- v8::internal::Handle<v8::internal::JSTypedArray> obj) { \
- DCHECK(obj->type() == v8::internal::kExternal##Type##Array); \
- return Convert<v8::internal::JSTypedArray, v8::Type##Array>(obj); \
- }
-
-
-MAKE_TO_LOCAL(ToLocal, Context, Context)
-MAKE_TO_LOCAL(ToLocal, Object, Value)
-MAKE_TO_LOCAL(ToLocal, Module, Module)
-MAKE_TO_LOCAL(ToLocal, Name, Name)
-MAKE_TO_LOCAL(ToLocal, String, String)
-MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
-MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
-MAKE_TO_LOCAL(ToLocal, JSReceiver, Object)
-MAKE_TO_LOCAL(ToLocal, JSObject, Object)
-MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
-MAKE_TO_LOCAL(ToLocal, JSArray, Array)
-MAKE_TO_LOCAL(ToLocal, JSMap, Map)
-MAKE_TO_LOCAL(ToLocal, JSSet, Set)
-MAKE_TO_LOCAL(ToLocal, JSProxy, Proxy)
-MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
-MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
-MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
-MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
-MAKE_TO_LOCAL(ToLocalShared, JSArrayBuffer, SharedArrayBuffer)
-
-TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)
-
-MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
-MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
-MAKE_TO_LOCAL(SignatureToLocal, FunctionTemplateInfo, Signature)
-MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
-MAKE_TO_LOCAL(MessageToLocal, Object, Message)
-MAKE_TO_LOCAL(PromiseToLocal, JSObject, Promise)
-MAKE_TO_LOCAL(StackTraceToLocal, FixedArray, StackTrace)
-MAKE_TO_LOCAL(StackFrameToLocal, StackFrameInfo, StackFrame)
-MAKE_TO_LOCAL(NumberToLocal, Object, Number)
-MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
-MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
-MAKE_TO_LOCAL(ToLocal, BigInt, BigInt);
-MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
-MAKE_TO_LOCAL(CallableToLocal, JSReceiver, Function)
-MAKE_TO_LOCAL(ToLocalPrimitive, Object, Primitive)
-MAKE_TO_LOCAL(ToLocal, FixedArray, PrimitiveArray)
-MAKE_TO_LOCAL(ScriptOrModuleToLocal, Script, ScriptOrModule)
-
-#undef MAKE_TO_LOCAL_TYPED_ARRAY
-#undef MAKE_TO_LOCAL
-
-
-// Implementations of OpenHandle
-
-#define MAKE_OPEN_HANDLE(From, To) \
- v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
- const v8::From* that, bool allow_empty_handle) { \
- DCHECK(allow_empty_handle || that != nullptr); \
- DCHECK(that == nullptr || \
- (*reinterpret_cast<v8::internal::Object* const*>(that))->Is##To()); \
- return v8::internal::Handle<v8::internal::To>( \
- reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
- }
-
-OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
-
-#undef MAKE_OPEN_HANDLE
-#undef OPEN_HANDLE_LIST
-
namespace internal {
class V8_EXPORT_PRIVATE DeferredHandles {
@@ -634,12 +541,6 @@ bool HandleScopeImplementer::LastEnteredContextWas(Handle<Context> context) {
return !entered_contexts_.empty() && entered_contexts_.back() == *context;
}
-
-Handle<Context> HandleScopeImplementer::LastEnteredContext() {
- if (entered_contexts_.empty()) return Handle<Context>::null();
- return Handle<Context>(entered_contexts_.back(), isolate_);
-}
-
void HandleScopeImplementer::EnterMicrotaskContext(Handle<Context> context) {
DCHECK(!microtask_context_);
microtask_context_ = *context;
@@ -651,11 +552,6 @@ void HandleScopeImplementer::LeaveMicrotaskContext() {
entered_context_count_during_microtasks_ = 0;
}
-Handle<Context> HandleScopeImplementer::MicrotaskContext() {
- if (microtask_context_) return Handle<Context>(microtask_context_, isolate_);
- return Handle<Context>::null();
-}
-
// If there's a spare block, use it for growing the current scope.
internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {
internal::Object** block =
diff --git a/deps/v8/src/arguments-inl.h b/deps/v8/src/arguments-inl.h
new file mode 100644
index 0000000000..c1a18ab82f
--- /dev/null
+++ b/deps/v8/src/arguments-inl.h
@@ -0,0 +1,28 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARGUMENTS_INL_H_
+#define V8_ARGUMENTS_INL_H_
+
+#include "src/arguments.h"
+
+#include "src/handles-inl.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+template <class S>
+Handle<S> Arguments::at(int index) {
+ return Handle<S>::cast(at<Object>(index));
+}
+
+int Arguments::smi_at(int index) { return Smi::ToInt((*this)[index]); }
+
+double Arguments::number_at(int index) { return (*this)[index]->Number(); }
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ARGUMENTS_INL_H_
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index bef9cba698..0bfdd770f5 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -42,19 +42,11 @@ class Arguments BASE_EMBEDDED {
}
template <class S = Object>
- Handle<S> at(int index) {
- Object** value = &((*this)[index]);
- // This cast checks that the object we're accessing does indeed have the
- // expected type.
- S::cast(*value);
- return Handle<S>(reinterpret_cast<S**>(value));
- }
+ inline Handle<S> at(int index);
- int smi_at(int index) { return Smi::ToInt((*this)[index]); }
+ inline int smi_at(int index);
- double number_at(int index) {
- return (*this)[index]->Number();
- }
+ inline double number_at(int index);
// Get the total number of arguments including the receiver.
int length() const { return static_cast<int>(length_); }
@@ -70,6 +62,12 @@ class Arguments BASE_EMBEDDED {
Object** arguments_;
};
+template <>
+inline Handle<Object> Arguments::at(int index) {
+ Object** value = &((*this)[index]);
+ return Handle<Object>(value);
+}
+
double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
#ifdef DEBUG
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index a432e44814..68ea6f3210 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -78,10 +78,10 @@ Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsOffHeapTarget(rmode_));
- if (Assembler::IsMovW(Memory::int32_at(pc_))) {
+ if (Assembler::IsMovW(Memory<int32_t>(pc_))) {
return pc_;
} else {
- DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
+ DCHECK(Assembler::IsLdrPcImmediateOffset(Memory<int32_t>(pc_)));
return constant_pool_entry_address();
}
}
@@ -120,8 +120,7 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
- heap->incremental_marking()->RecordWriteIntoCode(host(), this, target);
- heap->RecordWriteIntoCode(host(), this, target);
+ WriteBarrierForCode(host(), this, target);
}
}
@@ -140,7 +139,7 @@ void RelocInfo::set_target_external_reference(
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
- return Memory::Address_at(pc_);
+ return Memory<Address>(pc_);
}
@@ -170,9 +169,9 @@ Address RelocInfo::target_off_heap_target() {
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
- IsInternalReference(rmode_));
+ IsInternalReference(rmode_) || IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
- Memory::Address_at(pc_) = kNullAddress;
+ Memory<Address>(pc_) = kNullAddress;
} else {
Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress);
}
@@ -181,7 +180,7 @@ void RelocInfo::WipeOut() {
Handle<Code> Assembler::relative_code_target_object_handle_at(
Address pc) const {
Instruction* branch = Instruction::At(pc);
- int code_target_index = branch->GetBranchOffset() / Instruction::kInstrSize;
+ int code_target_index = branch->GetBranchOffset() / kInstrSize;
return GetCodeTarget(code_target_index);
}
@@ -255,22 +254,21 @@ Address Assembler::target_address_from_return_address(Address pc) {
// ldr ip, [pc, #...] @ call address
// blx ip
// @ return address
- Address candidate = pc - 2 * Assembler::kInstrSize;
- Instr candidate_instr(Memory::int32_at(candidate));
+ Address candidate = pc - 2 * kInstrSize;
+ Instr candidate_instr(Memory<int32_t>(candidate));
if (IsLdrPcImmediateOffset(candidate_instr)) {
return candidate;
} else {
if (CpuFeatures::IsSupported(ARMv7)) {
- candidate -= 1 * Assembler::kInstrSize;
- DCHECK(IsMovW(Memory::int32_at(candidate)) &&
- IsMovT(Memory::int32_at(candidate + Assembler::kInstrSize)));
+ candidate -= 1 * kInstrSize;
+ DCHECK(IsMovW(Memory<int32_t>(candidate)) &&
+ IsMovT(Memory<int32_t>(candidate + kInstrSize)));
} else {
- candidate -= 3 * Assembler::kInstrSize;
- DCHECK(
- IsMovImmed(Memory::int32_at(candidate)) &&
- IsOrrImmed(Memory::int32_at(candidate + Assembler::kInstrSize)) &&
- IsOrrImmed(Memory::int32_at(candidate + 2 * Assembler::kInstrSize)) &&
- IsOrrImmed(Memory::int32_at(candidate + 3 * Assembler::kInstrSize)));
+ candidate -= 3 * kInstrSize;
+ DCHECK(IsMovImmed(Memory<int32_t>(candidate)) &&
+ IsOrrImmed(Memory<int32_t>(candidate + kInstrSize)) &&
+ IsOrrImmed(Memory<int32_t>(candidate + 2 * kInstrSize)) &&
+ IsOrrImmed(Memory<int32_t>(candidate + 3 * kInstrSize)));
}
return candidate;
}
@@ -278,20 +276,20 @@ Address Assembler::target_address_from_return_address(Address pc) {
Address Assembler::return_address_from_call_start(Address pc) {
- if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) {
+ if (IsLdrPcImmediateOffset(Memory<int32_t>(pc))) {
// Load from constant pool, small section.
return pc + kInstrSize * 2;
} else {
if (CpuFeatures::IsSupported(ARMv7)) {
- DCHECK(IsMovW(Memory::int32_at(pc)));
- DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ DCHECK(IsMovW(Memory<int32_t>(pc)));
+ DCHECK(IsMovT(Memory<int32_t>(pc + kInstrSize)));
// A movw / movt load immediate.
return pc + kInstrSize * 3;
} else {
- DCHECK(IsMovImmed(Memory::int32_at(pc)));
- DCHECK(IsOrrImmed(Memory::int32_at(pc + kInstrSize)));
- DCHECK(IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)));
- DCHECK(IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+ DCHECK(IsMovImmed(Memory<int32_t>(pc)));
+ DCHECK(IsOrrImmed(Memory<int32_t>(pc + kInstrSize)));
+ DCHECK(IsOrrImmed(Memory<int32_t>(pc + 2 * kInstrSize)));
+ DCHECK(IsOrrImmed(Memory<int32_t>(pc + 3 * kInstrSize)));
// A mov / orr load immediate.
return pc + kInstrSize * 5;
}
@@ -300,7 +298,7 @@ Address Assembler::return_address_from_call_start(Address pc) {
void Assembler::deserialization_set_special_target_at(
Address constant_pool_entry, Code* code, Address target) {
- Memory::Address_at(constant_pool_entry) = target;
+ Memory<Address>(constant_pool_entry) = target;
}
int Assembler::deserialization_special_target_size(Address location) {
@@ -309,19 +307,19 @@ int Assembler::deserialization_special_target_size(Address location) {
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
- Memory::Address_at(pc) = target;
+ Memory<Address>(pc) = target;
}
bool Assembler::is_constant_pool_load(Address pc) {
- return IsLdrPcImmediateOffset(Memory::int32_at(pc));
+ return IsLdrPcImmediateOffset(Memory<int32_t>(pc));
}
Address Assembler::constant_pool_entry_address(Address pc,
Address constant_pool) {
- DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc)));
- Instr instr = Memory::int32_at(pc);
+ DCHECK(Assembler::IsLdrPcImmediateOffset(Memory<int32_t>(pc)));
+ Instr instr = Memory<int32_t>(pc);
return pc + GetLdrRegisterImmediateOffset(instr) + Instruction::kPcLoadDelta;
}
@@ -329,21 +327,21 @@ Address Assembler::constant_pool_entry_address(Address pc,
Address Assembler::target_address_at(Address pc, Address constant_pool) {
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Return the value in the constant pool.
- return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
- } else if (CpuFeatures::IsSupported(ARMv7) && IsMovW(Memory::int32_at(pc))) {
+ return Memory<Address>(constant_pool_entry_address(pc, constant_pool));
+ } else if (CpuFeatures::IsSupported(ARMv7) && IsMovW(Memory<int32_t>(pc))) {
// This is an movw / movt immediate load. Return the immediate.
- DCHECK(IsMovW(Memory::int32_at(pc)) &&
- IsMovT(Memory::int32_at(pc + kInstrSize)));
+ DCHECK(IsMovW(Memory<int32_t>(pc)) &&
+ IsMovT(Memory<int32_t>(pc + kInstrSize)));
Instruction* movw_instr = Instruction::At(pc);
Instruction* movt_instr = Instruction::At(pc + kInstrSize);
return static_cast<Address>((movt_instr->ImmedMovwMovtValue() << 16) |
movw_instr->ImmedMovwMovtValue());
- } else if (IsMovImmed(Memory::int32_at(pc))) {
+ } else if (IsMovImmed(Memory<int32_t>(pc))) {
// This is an mov / orr immediate load. Return the immediate.
- DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
- IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
- IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
- IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+ DCHECK(IsMovImmed(Memory<int32_t>(pc)) &&
+ IsOrrImmed(Memory<int32_t>(pc + kInstrSize)) &&
+ IsOrrImmed(Memory<int32_t>(pc + 2 * kInstrSize)) &&
+ IsOrrImmed(Memory<int32_t>(pc + 3 * kInstrSize)));
Instr mov_instr = instr_at(pc);
Instr orr_instr_1 = instr_at(pc + kInstrSize);
Instr orr_instr_2 = instr_at(pc + 2 * kInstrSize);
@@ -364,7 +362,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
ICacheFlushMode icache_flush_mode) {
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Update the entry in the constant pool.
- Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
+ Memory<Address>(constant_pool_entry_address(pc, constant_pool)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
// Assembler::FlushICache(pc, sizeof(target));
@@ -373,37 +371,37 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
// ldr ip, [pp, #...]
// since the instruction accessing this address in the constant pool remains
// unchanged.
- } else if (CpuFeatures::IsSupported(ARMv7) && IsMovW(Memory::int32_at(pc))) {
+ } else if (CpuFeatures::IsSupported(ARMv7) && IsMovW(Memory<int32_t>(pc))) {
// This is an movw / movt immediate load. Patch the immediate embedded in
// the instructions.
- DCHECK(IsMovW(Memory::int32_at(pc)));
- DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ DCHECK(IsMovW(Memory<int32_t>(pc)));
+ DCHECK(IsMovT(Memory<int32_t>(pc + kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
uint32_t immediate = static_cast<uint32_t>(target);
instr_ptr[0] = PatchMovwImmediate(instr_ptr[0], immediate & 0xFFFF);
instr_ptr[1] = PatchMovwImmediate(instr_ptr[1], immediate >> 16);
- DCHECK(IsMovW(Memory::int32_at(pc)));
- DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ DCHECK(IsMovW(Memory<int32_t>(pc)));
+ DCHECK(IsMovT(Memory<int32_t>(pc + kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc, 2 * kInstrSize);
}
- } else if (IsMovImmed(Memory::int32_at(pc))) {
+ } else if (IsMovImmed(Memory<int32_t>(pc))) {
// This is an mov / orr immediate load. Patch the immediate embedded in
// the instructions.
- DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
- IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
- IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
- IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+ DCHECK(IsMovImmed(Memory<int32_t>(pc)) &&
+ IsOrrImmed(Memory<int32_t>(pc + kInstrSize)) &&
+ IsOrrImmed(Memory<int32_t>(pc + 2 * kInstrSize)) &&
+ IsOrrImmed(Memory<int32_t>(pc + 3 * kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
uint32_t immediate = static_cast<uint32_t>(target);
instr_ptr[0] = PatchShiftImm(instr_ptr[0], immediate & kImm8Mask);
instr_ptr[1] = PatchShiftImm(instr_ptr[1], immediate & (kImm8Mask << 8));
instr_ptr[2] = PatchShiftImm(instr_ptr[2], immediate & (kImm8Mask << 16));
instr_ptr[3] = PatchShiftImm(instr_ptr[3], immediate & (kImm8Mask << 24));
- DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
- IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
- IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
- IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+ DCHECK(IsMovImmed(Memory<int32_t>(pc)) &&
+ IsOrrImmed(Memory<int32_t>(pc + kInstrSize)) &&
+ IsOrrImmed(Memory<int32_t>(pc + 2 * kInstrSize)) &&
+ IsOrrImmed(Memory<int32_t>(pc + 3 * kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc, 4 * kInstrSize);
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 576bcb30f6..163fa4c219 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -485,7 +485,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
break;
}
Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
- Memory::Address_at(constant_pool_entry_address(pc, 0 /* unused */)) =
+ Memory<Address>(constant_pool_entry_address(pc, 0 /* unused */)) =
object.address();
}
}
@@ -2058,6 +2058,13 @@ void Assembler::rbit(Register dst, Register src, Condition cond) {
emit(cond | 0x6FF * B16 | dst.code() * B12 | 0xF3 * B4 | src.code());
}
+void Assembler::rev(Register dst, Register src, Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.144.
+ // cond(31-28) | 011010111111(27-16) | Rd(15-12) | 11110011(11-4) | Rm(3-0)
+ DCHECK(dst != pc);
+ DCHECK(src != pc);
+ emit(cond | 0x6BF * B16 | dst.code() * B12 | 0xF3 * B4 | src.code());
+}
// Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
@@ -2233,6 +2240,30 @@ void Assembler::strexh(Register src1, Register src2, Register dst,
0xF9 * B4 | src2.code());
}
+void Assembler::ldrexd(Register dst1, Register dst2, Register src,
+ Condition cond) {
+ // cond(31-28) | 00011011(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
+ DCHECK(dst1 != lr); // r14.
+ // The pair of destination registers is restricted to being an even-numbered
+ // register and the odd-numbered register that immediately follows it.
+ DCHECK_EQ(0, dst1.code() % 2);
+ DCHECK_EQ(dst1.code() + 1, dst2.code());
+ emit(cond | B24 | B23 | B21 | B20 | src.code() * B16 | dst1.code() * B12 |
+ 0xF9F);
+}
+
+void Assembler::strexd(Register res, Register src1, Register src2, Register dst,
+ Condition cond) {
+ // cond(31-28) | 00011010(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
+ DCHECK(src1 != lr); // r14.
+ // The pair of source registers is restricted to being an even-numbered
+ // register and the odd-numbered register that immediately follows it.
+ DCHECK_EQ(0, src1.code() % 2);
+ DCHECK_EQ(src1.code() + 1, src2.code());
+ emit(cond | B24 | B23 | B21 | dst.code() * B16 | res.code() * B12 |
+ 0xF9 * B4 | src1.code());
+}
+
// Preload instructions.
void Assembler::pld(const MemOperand& address) {
// Instruction details available in ARM DDI 0406C.b, A8.8.128.
@@ -2827,25 +2858,6 @@ void Assembler::vmov(const DwVfpRegister dst, Double imm,
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
- } else if (CpuFeatures::IsSupported(ARMv7) && FLAG_enable_vldr_imm) {
- CpuFeatureScope scope(this, ARMv7);
- // TODO(jfb) Temporarily turned off until we have constant blinding or
- // some equivalent mitigation: an attacker can otherwise control
- // generated data which also happens to be executable, a Very Bad
- // Thing indeed.
- // Blinding gets tricky because we don't have xor, we probably
- // need to add/subtract without losing precision, which requires a
- // cookie value that Lithium is probably better positioned to
- // choose.
- // We could also add a few peepholes here like detecting 0.0 and
- // -0.0 and doing a vmov from the sequestered d14, forcing denorms
- // to zero (we set flush-to-zero), and normalizing NaN values.
- // We could also detect redundant values.
- // The code could also randomize the order of values, though
- // that's tricky because vldr has a limited reach. Furthermore
- // it breaks load locality.
- ConstantPoolAddEntry(pc_offset(), imm);
- vldr(dst, MemOperand(pc, 0));
} else {
// Synthesise the double from ARM immediates.
uint32_t lo, hi;
@@ -2861,14 +2873,14 @@ void Assembler::vmov(const DwVfpRegister dst, Double imm,
} else if (extra_scratch == no_reg) {
// We only have one spare scratch register.
mov(scratch, Operand(lo));
- vmov(dst, VmovIndexLo, scratch);
+ vmov(NeonS32, dst, 0, scratch);
if (((lo & 0xFFFF) == (hi & 0xFFFF)) && CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(this, ARMv7);
movt(scratch, hi >> 16);
} else {
mov(scratch, Operand(hi));
}
- vmov(dst, VmovIndexHi, scratch);
+ vmov(NeonS32, dst, 1, scratch);
} else {
// Move the low and high parts of the double to a D register in one
// instruction.
@@ -2909,40 +2921,6 @@ void Assembler::vmov(const DwVfpRegister dst,
}
void Assembler::vmov(const DwVfpRegister dst,
- const VmovIndex index,
- const Register src,
- const Condition cond) {
- // Dd[index] = Rt
- // Instruction details available in ARM DDI 0406C.b, A8-940.
- // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
- // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
- DCHECK(VfpRegisterIsAvailable(dst));
- DCHECK(index.index == 0 || index.index == 1);
- int vd, d;
- dst.split_code(&vd, &d);
- emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
- d*B7 | B4);
-}
-
-
-void Assembler::vmov(const Register dst,
- const VmovIndex index,
- const DwVfpRegister src,
- const Condition cond) {
- // Dd[index] = Rt
- // Instruction details available in ARM DDI 0406C.b, A8.8.342.
- // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
- // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
- DCHECK(VfpRegisterIsAvailable(src));
- DCHECK(index.index == 0 || index.index == 1);
- int vn, n;
- src.split_code(&vn, &n);
- emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
- 0xB*B8 | n*B7 | B4);
-}
-
-
-void Assembler::vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
const Condition cond) {
@@ -5140,14 +5118,17 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
intptr_t value) {
DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL);
- bool sharing_ok =
- RelocInfo::IsNone(rmode) || RelocInfo::IsShareableRelocMode(rmode);
+ // We can share CODE_TARGETs because we don't patch the code objects anymore,
+ // and we make sure we emit only one reloc info for them (thus delta patching)
+ // will apply the delta only once. At the moment, we do not dedup code targets
+ // if they are wrapped in a heap object request (value == 0).
+ bool sharing_ok = RelocInfo::IsShareableRelocMode(rmode) ||
+ (rmode == RelocInfo::CODE_TARGET && value != 0);
DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants);
if (pending_32_bit_constants_.empty()) {
first_const_pool_32_use_ = position;
}
- ConstantPoolEntry entry(
- position, value, sharing_ok || (rmode == RelocInfo::CODE_TARGET), rmode);
+ ConstantPoolEntry entry(position, value, sharing_ok, rmode);
bool shared = false;
if (sharing_ok) {
@@ -5164,24 +5145,6 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
}
}
- // Share entries if allowed and possible.
- // Null-values are placeholders and must be ignored.
- if (rmode == RelocInfo::CODE_TARGET && value != 0) {
- // Sharing entries here relies on canonicalized handles - without them, we
- // will miss the optimisation opportunity.
- Address handle_address = static_cast<Address>(value);
- auto existing = handle_to_index_map_.find(handle_address);
- if (existing != handle_to_index_map_.end()) {
- int index = existing->second;
- entry.set_merged_index(index);
- shared = true;
- } else {
- // Keep track of this code handle.
- handle_to_index_map_[handle_address] =
- static_cast<int>(pending_32_bit_constants_.size());
- }
- }
-
pending_32_bit_constants_.push_back(entry);
// Make sure the constant pool is not emitted in place of the next
@@ -5194,30 +5157,6 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
}
}
-void Assembler::ConstantPoolAddEntry(int position, Double value) {
- DCHECK_LT(pending_64_bit_constants_.size(), kMaxNumPending64Constants);
- if (pending_64_bit_constants_.empty()) {
- first_const_pool_64_use_ = position;
- }
- ConstantPoolEntry entry(position, value);
-
- // Merge the constant, if possible.
- for (size_t i = 0; i < pending_64_bit_constants_.size(); i++) {
- ConstantPoolEntry& current_entry = pending_64_bit_constants_[i];
- DCHECK(current_entry.sharing_ok());
- if (entry.value() == current_entry.value()) {
- entry.set_merged_index(i);
- break;
- }
- }
- pending_64_bit_constants_.push_back(entry);
-
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
-}
-
-
void Assembler::BlockConstPoolFor(int instructions) {
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
@@ -5419,7 +5358,6 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
pending_32_bit_constants_.clear();
pending_64_bit_constants_.clear();
- handle_to_index_map_.clear();
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 2e71ce59e6..fb36702882 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -167,7 +167,6 @@ static_assert(sizeof(Register) == sizeof(int),
"Register can efficiently be passed by value");
// r7: context register
-// r9: lithium scratch
#define DECLARE_REGISTER(R) \
constexpr Register R = Register::from_code<kRegCode_##R>();
GENERAL_REGISTERS(DECLARE_REGISTER)
@@ -605,14 +604,7 @@ class NeonListOperand BASE_EMBEDDED {
int register_count_;
};
-
-struct VmovIndex {
- unsigned char index;
-};
-constexpr VmovIndex VmovIndexLo = { 0 };
-constexpr VmovIndex VmovIndexHi = { 1 };
-
-class Assembler : public AssemblerBase {
+class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the
@@ -700,9 +692,6 @@ class Assembler : public AssemblerBase {
// pointer.
static constexpr int kSpecialTargetSize = kPointerSize;
- // Size of an instruction.
- static constexpr int kInstrSize = sizeof(Instr);
-
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
VfpRegList* GetScratchVfpRegisterList() {
return &scratch_vfp_register_list_;
@@ -908,6 +897,7 @@ class Assembler : public AssemblerBase {
// Reverse the bits in a register.
void rbit(Register dst, Register src, Condition cond = al);
+ void rev(Register dst, Register src, Condition cond = al);
// Status register access instructions
@@ -940,6 +930,9 @@ class Assembler : public AssemblerBase {
void strexb(Register src1, Register src2, Register dst, Condition cond = al);
void ldrexh(Register dst, Register src, Condition cond = al);
void strexh(Register src1, Register src2, Register dst, Condition cond = al);
+ void ldrexd(Register dst1, Register dst2, Register src, Condition cond = al);
+ void strexd(Register res, Register src1, Register src2, Register dst,
+ Condition cond = al);
// Preload instructions
void pld(const MemOperand& address);
@@ -1070,16 +1063,6 @@ class Assembler : public AssemblerBase {
void vmov(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
- // TODO(bbudge) Replace uses of these with the more general core register to
- // scalar register vmov's.
- void vmov(const DwVfpRegister dst,
- const VmovIndex index,
- const Register src,
- const Condition cond = al);
- void vmov(const Register dst,
- const VmovIndex index,
- const DwVfpRegister src,
- const Condition cond = al);
void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
@@ -1637,9 +1620,6 @@ class Assembler : public AssemblerBase {
std::vector<ConstantPoolEntry> pending_32_bit_constants_;
std::vector<ConstantPoolEntry> pending_64_bit_constants_;
- // Map of address of handle to index in pending_32_bit_constants_.
- std::map<Address, int> handle_to_index_map_;
-
// Scratch registers available for use by the Assembler.
RegList scratch_register_list_;
VfpRegList scratch_vfp_register_list_;
@@ -1705,7 +1685,6 @@ class Assembler : public AssemblerBase {
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
void ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
intptr_t value);
- void ConstantPoolAddEntry(int position, Double value);
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
friend class RelocInfo;
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 26131ea305..bb5becefb8 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -4,7 +4,7 @@
#if V8_TARGET_ARCH_ARM
-#include "src/api-arguments.h"
+#include "src/api-arguments-inl.h"
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
@@ -227,7 +227,7 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
if (tasm->isolate()->function_entry_hook() != nullptr) {
tasm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(
- tasm, tasm->CallStubSize() + 2 * Assembler::kInstrSize);
+ tasm, TurboAssembler::kCallStubSize + 2 * kInstrSize);
tasm->push(lr);
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
tasm->pop(lr);
@@ -239,7 +239,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
ProfileEntryHookStub stub(masm->isolate());
masm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(
- masm, masm->CallStubSize() + 2 * Assembler::kInstrSize);
+ masm, TurboAssembler::kCallStubSize + 2 * kInstrSize);
__ push(lr);
__ CallStub(&stub);
__ pop(lr);
@@ -249,8 +249,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// The entry hook is a "push lr" instruction, followed by a call.
- const int32_t kReturnAddressDistanceFromFunctionStart =
- 3 * Assembler::kInstrSize;
+ const int32_t kReturnAddressDistanceFromFunctionStart = 3 * kInstrSize;
// This should contain all kCallerSaved registers.
const RegList kSavedRegs =
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index ff439f8259..39f756d152 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -167,7 +167,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(desc));
+ DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
@@ -283,7 +283,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(desc));
+ DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 1d041e75be..b012340418 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -105,31 +105,6 @@ inline Condition NegateCondition(Condition cond) {
}
-// Commute a condition such that {a cond b == b cond' a}.
-inline Condition CommuteCondition(Condition cond) {
- switch (cond) {
- case lo:
- return hi;
- case hi:
- return lo;
- case hs:
- return ls;
- case ls:
- return hs;
- case lt:
- return gt;
- case gt:
- return lt;
- case ge:
- return le;
- case le:
- return ge;
- default:
- return cond;
- }
-}
-
-
// -----------------------------------------------------------------------------
// Instructions encoding.
@@ -461,14 +436,12 @@ inline Hint NegateHint(Hint ignored) { return no_hint; }
// return ((type == 0) || (type == 1)) && instr->HasS();
// }
//
+
+constexpr uint8_t kInstrSize = 4;
+constexpr uint8_t kInstrSizeLog2 = 2;
+
class Instruction {
public:
- enum {
- kInstrSize = 4,
- kInstrSizeLog2 = 2,
- kPCReadOffset = 8
- };
-
// Difference between address of current opcode and value read from pc
// register.
static constexpr int kPcLoadDelta = 8;
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 5dab458889..0c6ef132f8 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -669,7 +669,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
case 'A': {
// Print pc-relative address.
int offset = instr->Offset12Value();
- byte* pc = reinterpret_cast<byte*>(instr) + Instruction::kPCReadOffset;
+ byte* pc = reinterpret_cast<byte*>(instr) + Instruction::kPcLoadDelta;
byte* addr;
switch (instr->PUField()) {
case db_x: {
@@ -786,6 +786,9 @@ void Decoder::DecodeType01(Instruction* instr) {
case 0:
Format(instr, "ldrex'cond 'rt, ['rn]");
break;
+ case 1:
+ Format(instr, "ldrexd'cond 'rt, ['rn]");
+ break;
case 2:
Format(instr, "ldrexb'cond 'rt, ['rn]");
break;
@@ -804,6 +807,9 @@ void Decoder::DecodeType01(Instruction* instr) {
case 0:
Format(instr, "strex'cond 'rd, 'rm, ['rn]");
break;
+ case 1:
+ Format(instr, "strexd'cond 'rd, 'rm, ['rn]");
+ break;
case 2:
Format(instr, "strexb'cond 'rd, 'rm, ['rn]");
break;
@@ -1194,6 +1200,9 @@ void Decoder::DecodeType3(Instruction* instr) {
}
}
}
+ } else if (instr->Bits(27, 16) == 0x6BF &&
+ instr->Bits(11, 4) == 0xF3) {
+ Format(instr, "rev'cond 'rd, 'rm");
} else {
UNREACHABLE();
}
@@ -1416,7 +1425,7 @@ int Decoder::DecodeType7(Instruction* instr) {
break;
}
}
- return Instruction::kInstrSize;
+ return kInstrSize;
}
@@ -2599,14 +2608,14 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
instr->InstructionBits());
if (instr->ConditionField() == kSpecialCondition) {
DecodeSpecialCondition(instr);
- return Instruction::kInstrSize;
+ return kInstrSize;
}
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"constant pool begin (length %d)",
DecodeConstantPoolLength(instruction_bits));
- return Instruction::kInstrSize;
+ return kInstrSize;
}
switch (instr->TypeValue()) {
case 0:
@@ -2643,7 +2652,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
break;
}
}
- return Instruction::kInstrSize;
+ return kInstrSize;
}
@@ -2693,13 +2702,6 @@ const char* NameConverter::NameInCode(byte* addr) const {
//------------------------------------------------------------------------------
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-
-Disassembler::~Disassembler() {}
-
-
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte* instruction) {
v8::internal::Decoder d(converter_, buffer);
@@ -2711,10 +2713,10 @@ int Disassembler::ConstantPoolSizeAt(byte* instruction) {
return v8::internal::Decoder::ConstantPoolSizeAt(instruction);
}
-
-void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
+ UnimplementedOpcodeAction unimplemented_action) {
NameConverter converter;
- Disassembler d(converter);
+ Disassembler d(converter, unimplemented_action);
for (byte* pc = begin; pc < end;) {
v8::internal::EmbeddedVector<char, 128> buffer;
buffer[0] = '\0';
@@ -2725,7 +2727,6 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
}
}
-
} // namespace disasm
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index b96826264a..8af455fc6e 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -245,30 +245,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-namespace {
-
-void InterpreterCEntryDescriptor_InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r0, // argument count (argc)
- r2, // address of first argument (argv)
- r1 // the runtime function to call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace
-
-void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
-void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index d02766791b..09db465d59 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -24,6 +24,7 @@
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/snapshot.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/arm/macro-assembler-arm.h"
@@ -172,7 +173,6 @@ void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); }
void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond) {
- DCHECK(RelocInfo::IsCodeTarget(rmode));
mov(pc, Operand(target, rmode), LeaveCC, cond);
}
@@ -193,8 +193,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
if (target_is_isolate_independent_builtin &&
options().use_pc_relative_calls_and_jumps) {
int32_t code_target_index = AddCodeTarget(code);
- b(code_target_index * Instruction::kInstrSize, cond,
- RelocInfo::RELATIVE_CODE_TARGET);
+ b(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
return;
} else if (root_array_available_ && options().isolate_independent_code) {
UseScratchRegisterScope temps(this);
@@ -206,6 +205,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
} else if (target_is_isolate_independent_builtin &&
options().inline_offheap_trampolines) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do not
@@ -219,29 +219,10 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Jump(static_cast<intptr_t>(code.address()), rmode, cond);
}
-int TurboAssembler::CallSize(Register target, Condition cond) {
- return kInstrSize;
-}
-
void TurboAssembler::Call(Register target, Condition cond) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
- Label start;
- bind(&start);
blx(target, cond);
- DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
-}
-
-int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
- Condition cond) {
- Instr mov_instr = cond | MOV | LeaveCC;
- Operand mov_operand = Operand(target, rmode);
- return kInstrSize +
- mov_operand.InstructionsRequired(this, mov_instr) * kInstrSize;
-}
-
-int TurboAssembler::CallStubSize() {
- return CallSize(Handle<Code>(), RelocInfo::CODE_TARGET, al);
}
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
@@ -251,20 +232,12 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
if (check_constant_pool) MaybeCheckConstPool();
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
- Label start;
- bind(&start);
bool old_predictable_code_size = predictable_code_size();
if (mode == NEVER_INLINE_TARGET_ADDRESS) {
set_predictable_code_size(true);
}
-#ifdef DEBUG
- // Check the expected size before generating code to ensure we assume the same
- // constant pool availability (e.g., whether constant pool is full or not).
- int expected_size = CallSize(target, rmode, cond);
-#endif
-
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
@@ -282,17 +255,11 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
mov(ip, Operand(target, rmode));
blx(ip, cond);
- DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
if (mode == NEVER_INLINE_TARGET_ADDRESS) {
set_predictable_code_size(old_predictable_code_size);
}
}
-int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond) {
- return CallSize(code.address(), rmode, cond);
-}
-
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, TargetAddressStorageMode mode,
bool check_constant_pool) {
@@ -305,8 +272,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
if (target_is_isolate_independent_builtin &&
options().use_pc_relative_calls_and_jumps) {
int32_t code_target_index = AddCodeTarget(code);
- bl(code_target_index * Instruction::kInstrSize, cond,
- RelocInfo::RELATIVE_CODE_TARGET);
+ bl(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
return;
} else if (root_array_available_ && options().isolate_independent_code) {
// Use ip directly instead of using UseScratchRegisterScope, as we do not
@@ -318,7 +284,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
} else if (target_is_isolate_independent_builtin &&
options().inline_offheap_trampolines) {
// Inline the trampoline.
- DCHECK(Builtins::IsBuiltinId(builtin_index));
+ RecordCommentForOffHeapTrampoline(builtin_index);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do not
@@ -867,7 +833,7 @@ void TurboAssembler::VmovHigh(Register dst, DwVfpRegister src) {
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
vmov(dst, loc.high());
} else {
- vmov(dst, VmovIndexHi, src);
+ vmov(NeonS32, dst, src, 1);
}
}
@@ -876,7 +842,7 @@ void TurboAssembler::VmovHigh(DwVfpRegister dst, Register src) {
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
vmov(loc.high(), src);
} else {
- vmov(dst, VmovIndexHi, src);
+ vmov(NeonS32, dst, 1, src);
}
}
@@ -885,7 +851,7 @@ void TurboAssembler::VmovLow(Register dst, DwVfpRegister src) {
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
vmov(dst, loc.low());
} else {
- vmov(dst, VmovIndexLo, src);
+ vmov(NeonS32, dst, src, 0);
}
}
@@ -894,7 +860,7 @@ void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) {
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
vmov(loc.low(), src);
} else {
- vmov(dst, VmovIndexLo, src);
+ vmov(NeonS32, dst, 0, src);
}
}
@@ -1698,13 +1664,10 @@ void TurboAssembler::CallStubDelayed(CodeStub* stub) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
- Label start;
- bind(&start);
#ifdef DEBUG
- // Check the expected size before generating code to ensure we assume the same
- // constant pool availability (e.g., whether constant pool is full or not).
- int expected_size = CallStubSize();
+ Label start;
+ bind(&start);
#endif
// Call sequence on V7 or later may be :
@@ -1721,7 +1684,7 @@ void TurboAssembler::CallStubDelayed(CodeStub* stub) {
mov(ip, Operand::EmbeddedCode(stub));
blx(ip, al);
- DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
+ DCHECK_EQ(kCallStubSize, SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
@@ -1917,6 +1880,18 @@ void TurboAssembler::Abort(AbortReason reason) {
return;
}
+ if (should_abort_hard()) {
+ // We don't care if we constructed a frame. Just pretend we did.
+ FrameScope assume_frame(this, StackFrame::NONE);
+ Move32BitImmediate(r0, Operand(static_cast<int>(reason)));
+ PrepareCallCFunction(1, 0, r1);
+ Move(r1, ExternalReference::abort_with_reason());
+ // Use Call directly to avoid any unneeded overhead. The function won't
+ // return anyway.
+ Call(r1);
+ return;
+ }
+
Move(r1, Smi::FromInt(static_cast<int>(reason)));
// Disable stub call restrictions to always allow calls to abort.
@@ -1929,17 +1904,6 @@ void TurboAssembler::Abort(AbortReason reason) {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
}
// will not return here
- if (is_const_pool_blocked()) {
- // If the calling code cares about the exact number of
- // instructions generated, we insert padding here to keep the size
- // of the Abort macro constant.
- static const int kExpectedAbortInstructions = 7;
- int abort_instructions = InstructionsGeneratedSince(&abort_start);
- DCHECK_LE(abort_instructions, kExpectedAbortInstructions);
- while (abort_instructions++ < kExpectedAbortInstructions) {
- nop();
- }
- }
}
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
@@ -2285,13 +2249,14 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
}
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
- int num_double_arguments) {
+ int num_double_arguments,
+ Register scratch) {
int frame_alignment = ActivationFrameAlignment();
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
if (frame_alignment > kPointerSize) {
UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
+ if (!scratch.is_valid()) scratch = temps.Acquire();
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
mov(scratch, sp);
@@ -2299,7 +2264,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
and_(sp, sp, Operand(-frame_alignment));
str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
- } else {
+ } else if (stack_passed_arguments > 0) {
sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
}
}
@@ -2421,34 +2386,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
UNREACHABLE();
}
-#ifdef DEBUG
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3,
- Register reg4,
- Register reg5,
- Register reg6,
- Register reg7,
- Register reg8) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
- reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid();
-
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
- if (reg7.is_valid()) regs |= reg7.bit();
- if (reg8.is_valid()) regs |= reg8.bit();
- int n_of_non_aliasing_regs = NumRegs(regs);
-
- return n_of_valid_regs != n_of_non_aliasing_regs;
-}
-#endif
-
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
// We can use the register pc - 8 for the address of the current instruction.
sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta));
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 87a8ff2834..055b6e6fbc 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -36,6 +36,7 @@ constexpr Register kJavaScriptCallExtraArg1Register = r2;
constexpr Register kOffHeapTrampolineRegister = ip;
constexpr Register kRuntimeCallFunctionRegister = r1;
constexpr Register kRuntimeCallArgCountRegister = r0;
+constexpr Register kRuntimeCallArgvRegister = r2;
constexpr Register kWasmInstanceRegister = r3;
// ----------------------------------------------------------------------------
@@ -51,15 +52,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
constexpr Register cp = r7; // JavaScript context pointer.
constexpr Register kRootRegister = r10; // Roots array pointer.
-// Flags used for AllocateHeapNumber
-enum TaggingMode {
- // Tag the result.
- TAG_RESULT,
- // Don't tag
- DONT_TAG_RESULT
-};
-
-
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
@@ -72,25 +64,12 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg5 = no_reg,
Register reg6 = no_reg);
-
-#ifdef DEBUG
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3 = no_reg,
- Register reg4 = no_reg,
- Register reg5 = no_reg,
- Register reg6 = no_reg,
- Register reg7 = no_reg,
- Register reg8 = no_reg);
-#endif
-
-
enum TargetAddressStorageMode {
CAN_INLINE_TARGET_ADDRESS,
NEVER_INLINE_TARGET_ADDRESS
};
-class TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
@@ -259,8 +238,8 @@ class TurboAssembler : public TurboAssemblerBase {
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
- void PrepareCallCFunction(int num_reg_arguments,
- int num_double_registers = 0);
+ void PrepareCallCFunction(int num_reg_arguments, int num_double_registers = 0,
+ Register scratch = no_reg);
// Removes current frame and its arguments from the stack preserving
// the arguments and a return address pushed to the stack for the next call.
@@ -324,16 +303,7 @@ class TurboAssembler : public TurboAssemblerBase {
void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
void LoadRootRelative(Register destination, int32_t offset) override;
- // Returns the size of a call in instructions. Note, the value returned is
- // only valid as long as no entries are added to the constant pool between
- // checking the call size and emitting the actual call.
- static int CallSize(Register target, Condition cond = al);
- int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
- int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- Condition cond = al);
- int CallStubSize();
-
+ static constexpr int kCallStubSize = 2 * kInstrSize;
void CallStubDelayed(CodeStub* stub);
// Call a runtime routine. This expects {centry} to contain a fitting CEntry
@@ -618,13 +588,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// GC Support
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr, Register scratch,
- SaveFPRegsMode save_fp);
-
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index e0f57396c1..b1e8421876 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -359,7 +359,7 @@ void ArmDebugger::Debug() {
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstrSize);
+ end = cur + (10 * kInstrSize);
} else if (argc == 2) {
int regnum = Registers::Number(arg1);
if (regnum != kNoRegister || strncmp(arg1, "0x", 2) == 0) {
@@ -368,7 +368,7 @@ void ArmDebugger::Debug() {
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(value);
// Disassemble 10 instructions at <arg1>.
- end = cur + (10 * Instruction::kInstrSize);
+ end = cur + (10 * kInstrSize);
}
} else {
// The argument is the number of instructions.
@@ -376,7 +376,7 @@ void ArmDebugger::Debug() {
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
// Disassemble <arg1> instructions.
- end = cur + (value * Instruction::kInstrSize);
+ end = cur + (value * kInstrSize);
}
}
} else {
@@ -384,7 +384,7 @@ void ArmDebugger::Debug() {
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte*>(value1);
- end = cur + (value2 * Instruction::kInstrSize);
+ end = cur + (value2 * kInstrSize);
}
}
@@ -427,7 +427,7 @@ void ArmDebugger::Debug() {
PrintF("INEXACT flag: %d;\n", sim_->inexact_vfp_flag_);
} else if (strcmp(cmd, "stop") == 0) {
int32_t value;
- intptr_t stop_pc = sim_->get_pc() - Instruction::kInstrSize;
+ intptr_t stop_pc = sim_->get_pc() - kInstrSize;
Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
// Remove the current stop.
@@ -632,9 +632,8 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
if (cache_hit) {
// Check that the data in memory matches the contents of the I-cache.
- CHECK_EQ(0,
- memcmp(reinterpret_cast<void*>(instr),
- cache_page->CachedData(offset), Instruction::kInstrSize));
+ CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr),
+ cache_page->CachedData(offset), kInstrSize));
} else {
// Cache miss. Load memory into the cache.
memcpy(cached_line, line, CachePage::kLineLength);
@@ -736,7 +735,7 @@ int32_t Simulator::get_register(int reg) const {
// See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
if (reg >= num_registers) return 0;
// End stupid code.
- return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
+ return registers_[reg] + ((reg == pc) ? Instruction::kPcLoadDelta : 0);
}
@@ -931,8 +930,7 @@ void Simulator::TrashCallerSaveRegisters() {
registers_[12] = 0x50BAD4U;
}
-
-int Simulator::ReadW(int32_t addr, Instruction* instr) {
+int Simulator::ReadW(int32_t addr) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
@@ -941,7 +939,7 @@ int Simulator::ReadW(int32_t addr, Instruction* instr) {
return *ptr;
}
-int Simulator::ReadExW(int32_t addr, Instruction* instr) {
+int Simulator::ReadExW(int32_t addr) {
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
@@ -950,7 +948,7 @@ int Simulator::ReadExW(int32_t addr, Instruction* instr) {
return *ptr;
}
-void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
+void Simulator::WriteW(int32_t addr, int value) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
@@ -961,7 +959,7 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
*ptr = value;
}
-int Simulator::WriteExW(int32_t addr, int value, Instruction* instr) {
+int Simulator::WriteExW(int32_t addr, int value) {
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
global_monitor_.Pointer()->NotifyStoreExcl_Locked(
@@ -974,7 +972,7 @@ int Simulator::WriteExW(int32_t addr, int value, Instruction* instr) {
}
}
-uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
+uint16_t Simulator::ReadHU(int32_t addr) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
@@ -983,7 +981,7 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
return *ptr;
}
-int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
+int16_t Simulator::ReadH(int32_t addr) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
@@ -992,7 +990,7 @@ int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
return *ptr;
}
-uint16_t Simulator::ReadExHU(int32_t addr, Instruction* instr) {
+uint16_t Simulator::ReadExHU(int32_t addr) {
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
local_monitor_.NotifyLoadExcl(addr, TransactionSize::HalfWord);
global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
@@ -1001,7 +999,7 @@ uint16_t Simulator::ReadExHU(int32_t addr, Instruction* instr) {
return *ptr;
}
-void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
+void Simulator::WriteH(int32_t addr, uint16_t value) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
@@ -1012,7 +1010,7 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
*ptr = value;
}
-void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
+void Simulator::WriteH(int32_t addr, int16_t value) {
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
@@ -1023,7 +1021,7 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
*ptr = value;
}
-int Simulator::WriteExH(int32_t addr, uint16_t value, Instruction* instr) {
+int Simulator::WriteExH(int32_t addr, uint16_t value) {
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::HalfWord) &&
global_monitor_.Pointer()->NotifyStoreExcl_Locked(
@@ -1099,6 +1097,14 @@ int32_t* Simulator::ReadDW(int32_t addr) {
return ptr;
}
+int32_t* Simulator::ReadExDW(int32_t addr) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ local_monitor_.NotifyLoadExcl(addr, TransactionSize::DoubleWord);
+ global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
+ &global_monitor_processor_);
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ return ptr;
+}
void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
// All supported ARM targets allow unaligned accesses, so we don't need to
@@ -1112,6 +1118,19 @@ void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
*ptr = value2;
}
+int Simulator::WriteExDW(int32_t addr, int32_t value1, int32_t value2) {
+ base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+ if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::DoubleWord) &&
+ global_monitor_.Pointer()->NotifyStoreExcl_Locked(
+ addr, &global_monitor_processor_)) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ *ptr++ = value1;
+ *ptr = value2;
+ return 0;
+ } else {
+ return 1;
+ }
+}
// Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
@@ -1561,25 +1580,23 @@ void Simulator::HandleVList(Instruction* instr) {
for (int reg = vd; reg < vd + num_regs; reg++) {
if (precision == kSinglePrecision) {
if (load) {
- set_s_register_from_sinteger(
- reg, ReadW(reinterpret_cast<int32_t>(address), instr));
+ set_s_register_from_sinteger(reg,
+ ReadW(reinterpret_cast<int32_t>(address)));
} else {
WriteW(reinterpret_cast<int32_t>(address),
- get_sinteger_from_s_register(reg), instr);
+ get_sinteger_from_s_register(reg));
}
address += 1;
} else {
if (load) {
- int32_t data[] = {
- ReadW(reinterpret_cast<int32_t>(address), instr),
- ReadW(reinterpret_cast<int32_t>(address + 1), instr)
- };
+ int32_t data[] = {ReadW(reinterpret_cast<int32_t>(address)),
+ ReadW(reinterpret_cast<int32_t>(address + 1))};
set_d_register(reg, reinterpret_cast<uint32_t*>(data));
} else {
uint32_t data[2];
get_d_register(reg, data);
- WriteW(reinterpret_cast<int32_t>(address), data[0], instr);
- WriteW(reinterpret_cast<int32_t>(address + 1), data[1], instr);
+ WriteW(reinterpret_cast<int32_t>(address), data[0]);
+ WriteW(reinterpret_cast<int32_t>(address + 1), data[1]);
}
address += 2;
}
@@ -2054,10 +2071,16 @@ void Simulator::DecodeType01(Instruction* instr) {
switch (instr->Bits(22, 21)) {
case 0: {
// Format(instr, "ldrex'cond 'rt, ['rn]");
- int value = ReadExW(addr, instr);
+ int value = ReadExW(addr);
set_register(rt, value);
break;
}
+ case 1: {
+ // Format(instr, "ldrexd'cond 'rt, ['rn]");
+ int* rn_data = ReadExDW(addr);
+ set_dw_register(rt, rn_data);
+ break;
+ }
case 2: {
// Format(instr, "ldrexb'cond 'rt, ['rn]");
uint8_t value = ReadExBU(addr);
@@ -2066,7 +2089,7 @@ void Simulator::DecodeType01(Instruction* instr) {
}
case 3: {
// Format(instr, "ldrexh'cond 'rt, ['rn]");
- uint16_t value = ReadExHU(addr, instr);
+ uint16_t value = ReadExHU(addr);
set_register(rt, value);
break;
}
@@ -2087,7 +2110,16 @@ void Simulator::DecodeType01(Instruction* instr) {
case 0: {
// Format(instr, "strex'cond 'rd, 'rm, ['rn]");
int value = get_register(rt);
- int status = WriteExW(addr, value, instr);
+ int status = WriteExW(addr, value);
+ set_register(rd, status);
+ break;
+ }
+ case 1: {
+ // Format(instr, "strexd'cond 'rd, 'rm, ['rn]");
+ DCHECK_EQ(rt % 2, 0);
+ int32_t value1 = get_register(rt);
+ int32_t value2 = get_register(rt + 1);
+ int status = WriteExDW(addr, value1, value2);
set_register(rd, status);
break;
}
@@ -2101,7 +2133,7 @@ void Simulator::DecodeType01(Instruction* instr) {
case 3: {
// Format(instr, "strexh'cond 'rd, 'rm, ['rn]");
uint16_t value = get_register(rt);
- int status = WriteExH(addr, value, instr);
+ int status = WriteExH(addr, value);
set_register(rd, status);
break;
}
@@ -2223,19 +2255,19 @@ void Simulator::DecodeType01(Instruction* instr) {
} else if (instr->HasH()) {
if (instr->HasSign()) {
if (instr->HasL()) {
- int16_t val = ReadH(addr, instr);
+ int16_t val = ReadH(addr);
set_register(rd, val);
} else {
int16_t val = get_register(rd);
- WriteH(addr, val, instr);
+ WriteH(addr, val);
}
} else {
if (instr->HasL()) {
- uint16_t val = ReadHU(addr, instr);
+ uint16_t val = ReadHU(addr);
set_register(rd, val);
} else {
uint16_t val = get_register(rd);
- WriteH(addr, val, instr);
+ WriteH(addr, val);
}
}
} else {
@@ -2272,7 +2304,7 @@ void Simulator::DecodeType01(Instruction* instr) {
case BLX: {
uint32_t old_pc = get_pc();
set_pc(get_register(rm));
- set_register(lr, old_pc + Instruction::kInstrSize);
+ set_register(lr, old_pc + kInstrSize);
break;
}
case BKPT: {
@@ -2604,9 +2636,9 @@ void Simulator::DecodeType2(Instruction* instr) {
}
} else {
if (instr->HasL()) {
- set_register(rd, ReadW(addr, instr));
+ set_register(rd, ReadW(addr));
} else {
- WriteW(addr, get_register(rd), instr);
+ WriteW(addr, get_register(rd));
}
}
}
@@ -2772,6 +2804,11 @@ void Simulator::DecodeType3(Instruction* instr) {
set_register(rd, rn_val + static_cast<int16_t>(rm_val));
}
}
+ } else if (instr->Bits(27, 16) == 0x6BF &&
+ instr->Bits(11, 4) == 0xF3) {
+ // Rev.
+ uint32_t rm_val = get_register(instr->RmValue());
+ set_register(rd, ByteReverse(rm_val));
} else {
UNREACHABLE();
}
@@ -3021,9 +3058,9 @@ void Simulator::DecodeType3(Instruction* instr) {
}
} else {
if (instr->HasL()) {
- set_register(rd, ReadW(addr, instr));
+ set_register(rd, ReadW(addr));
} else {
- WriteW(addr, get_register(rd), instr);
+ WriteW(addr, get_register(rd));
}
}
}
@@ -3046,7 +3083,7 @@ void Simulator::DecodeType5(Instruction* instr) {
int off = (instr->SImmed24Value() << 2);
intptr_t pc_address = get_pc();
if (instr->HasLink()) {
- set_register(lr, pc_address + Instruction::kInstrSize);
+ set_register(lr, pc_address + kInstrSize);
}
int pc_reg = get_register(pc);
set_pc(pc_reg + off);
@@ -3790,10 +3827,10 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
DCHECK_EQ(address % 4, 0);
if (instr->HasL()) {
// Load single from memory: vldr.
- set_s_register_from_sinteger(vd, ReadW(address, instr));
+ set_s_register_from_sinteger(vd, ReadW(address));
} else {
// Store single to memory: vstr.
- WriteW(address, get_sinteger_from_s_register(vd), instr);
+ WriteW(address, get_sinteger_from_s_register(vd));
}
break;
}
@@ -3846,17 +3883,14 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
DCHECK_EQ(address % 4, 0);
if (instr->HasL()) {
// Load double from memory: vldr.
- int32_t data[] = {
- ReadW(address, instr),
- ReadW(address + 4, instr)
- };
+ int32_t data[] = {ReadW(address), ReadW(address + 4)};
set_d_register(vd, reinterpret_cast<uint32_t*>(data));
} else {
// Store double to memory: vstr.
uint32_t data[2];
get_d_register(vd, data);
- WriteW(address, data[0], instr);
- WriteW(address + 4, data[1], instr);
+ WriteW(address, data[0]);
+ WriteW(address + 4, data[1]);
}
break;
}
@@ -5403,8 +5437,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
while (r < regs) {
uint32_t data[2];
get_d_register(Vd + r, data);
- WriteW(address, data[0], instr);
- WriteW(address + 4, data[1], instr);
+ WriteW(address, data[0]);
+ WriteW(address + 4, data[1]);
address += 8;
r++;
}
@@ -5443,8 +5477,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
int r = 0;
while (r < regs) {
uint32_t data[2];
- data[0] = ReadW(address, instr);
- data[1] = ReadW(address + 4, instr);
+ data[0] = ReadW(address);
+ data[1] = ReadW(address + 4);
set_d_register(Vd + r, data);
address += 8;
r++;
@@ -5697,12 +5731,10 @@ void Simulator::InstructionDecode(Instruction* instr) {
}
}
if (!pc_modified_) {
- set_register(pc, reinterpret_cast<int32_t>(instr)
- + Instruction::kInstrSize);
+ set_register(pc, reinterpret_cast<int32_t>(instr) + kInstrSize);
}
}
-
void Simulator::Execute() {
// Get the PC to simulate. Cannot use the accessor here as we need the
// raw PC value and not the one used as input to arithmetic instructions.
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 6eb3cf6c6b..69e5cdbe3d 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -276,21 +276,23 @@ class Simulator : public SimulatorBase {
inline void WriteB(int32_t addr, int8_t value);
int WriteExB(int32_t addr, uint8_t value);
- inline uint16_t ReadHU(int32_t addr, Instruction* instr);
- inline int16_t ReadH(int32_t addr, Instruction* instr);
- uint16_t ReadExHU(int32_t addr, Instruction* instr);
+ inline uint16_t ReadHU(int32_t addr);
+ inline int16_t ReadH(int32_t addr);
+ uint16_t ReadExHU(int32_t addr);
// Note: Overloaded on the sign of the value.
- inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
- inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
- int WriteExH(int32_t addr, uint16_t value, Instruction* instr);
+ inline void WriteH(int32_t addr, uint16_t value);
+ inline void WriteH(int32_t addr, int16_t value);
+ int WriteExH(int32_t addr, uint16_t value);
- inline int ReadW(int32_t addr, Instruction* instr);
- int ReadExW(int32_t addr, Instruction* instr);
- inline void WriteW(int32_t addr, int value, Instruction* instr);
- int WriteExW(int32_t addr, int value, Instruction* instr);
+ inline int ReadW(int32_t addr);
+ int ReadExW(int32_t addr);
+ inline void WriteW(int32_t addr, int value);
+ int WriteExW(int32_t addr, int value);
int32_t* ReadDW(int32_t addr);
void WriteDW(int32_t addr, int32_t value1, int32_t value2);
+ int32_t* ReadExDW(int32_t addr);
+ int WriteExDW(int32_t addr, int32_t value1, int32_t value2);
// Executing is handled based on the instruction type.
// Both type 0 and type 1 rolled into one.
@@ -414,6 +416,7 @@ class Simulator : public SimulatorBase {
Byte = 1,
HalfWord = 2,
Word = 4,
+ DoubleWord = 8,
};
// The least-significant bits of the address are ignored. The number of bits
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 72674b87a3..52df8143ef 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -276,7 +276,7 @@ Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
shift_amount_(shift_amount) {
DCHECK(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
DCHECK(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
- DCHECK(!reg.IsSP());
+ DCHECK_IMPLIES(reg.IsSP(), shift_amount == 0);
}
@@ -535,7 +535,7 @@ Address Assembler::target_pointer_address_at(Address pc) {
Address Assembler::target_address_at(Address pc, Address constant_pool) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
if (instr->IsLdrLiteralX()) {
- return Memory::Address_at(target_pointer_address_at(pc));
+ return Memory<Address>(target_pointer_address_at(pc));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
@@ -549,8 +549,8 @@ Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
Assembler::target_address_at(pc, 0 /* unused */)));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
- DCHECK_EQ(instr->ImmPCOffset() % kInstructionSize, 0);
- return GetCodeTarget(instr->ImmPCOffset() >> kInstructionSizeLog2);
+ DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
+ return GetCodeTarget(instr->ImmPCOffset() >> kInstrSizeLog2);
}
}
@@ -570,7 +570,7 @@ Address Assembler::target_address_from_return_address(Address pc) {
// Call sequence on ARM64 is:
// ldr ip0, #... @ load from literal pool
// blr ip0
- Address candidate = pc - 2 * kInstructionSize;
+ Address candidate = pc - 2 * kInstrSize;
Instruction* instr = reinterpret_cast<Instruction*>(candidate);
USE(instr);
DCHECK(instr->IsLdrLiteralX());
@@ -598,10 +598,10 @@ void Assembler::deserialization_set_special_target_at(Address location,
target = location;
}
instr->SetBranchImmTarget(reinterpret_cast<Instruction*>(target));
- Assembler::FlushICache(location, kInstructionSize);
+ Assembler::FlushICache(location, kInstrSize);
} else {
DCHECK_EQ(instr->InstructionBits(), 0);
- Memory::Address_at(location) = target;
+ Memory<Address>(location) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code. However,
// in this case, only the constant pool contents change. The instruction
@@ -612,7 +612,7 @@ void Assembler::deserialization_set_special_target_at(Address location,
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
- Memory::Address_at(pc) = target;
+ Memory<Address>(pc) = target;
}
void Assembler::set_target_address_at(Address pc, Address constant_pool,
@@ -620,7 +620,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
ICacheFlushMode icache_flush_mode) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
if (instr->IsLdrLiteralX()) {
- Memory::Address_at(target_pointer_address_at(pc)) = target;
+ Memory<Address>(target_pointer_address_at(pc)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code. However,
// in this case, only the constant pool contents change. The instruction
@@ -635,7 +635,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
}
instr->SetBranchImmTarget(reinterpret_cast<Instruction*>(target));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc, kInstructionSize);
+ Assembler::FlushICache(pc, kInstrSize);
}
}
}
@@ -711,8 +711,7 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
- heap->incremental_marking()->RecordWriteIntoCode(host(), this, target);
- heap->RecordWriteIntoCode(host(), this, target);
+ WriteBarrierForCode(host(), this, target);
}
}
@@ -731,7 +730,7 @@ void RelocInfo::set_target_external_reference(
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
- return Memory::Address_at(pc_);
+ return Memory<Address>(pc_);
}
@@ -762,9 +761,9 @@ Address RelocInfo::target_off_heap_target() {
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
- IsInternalReference(rmode_));
+ IsInternalReference(rmode_) || IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
- Memory::Address_at(pc_) = kNullAddress;
+ Memory<Address>(pc_) = kNullAddress;
} else {
Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress);
}
@@ -874,8 +873,8 @@ LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
DCHECK_EQ(kStartOfLabelLinkChain, 0);
int offset = LinkAndGetByteOffsetTo(label);
- DCHECK(IsAligned(offset, kInstructionSize));
- return offset >> kInstructionSizeLog2;
+ DCHECK(IsAligned(offset, kInstrSize));
+ return offset >> kInstrSizeLog2;
}
@@ -1092,7 +1091,7 @@ Instr Assembler::ImmBarrierType(int imm2) {
}
unsigned Assembler::CalcLSDataSize(LoadStoreOp op) {
- DCHECK((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
+ DCHECK((LSSize_offset + LSSize_width) == (kInstrSize * 8));
unsigned size = static_cast<Instr>(op >> LSSize_offset);
if ((op & LSVector_mask) != 0) {
// Vector register memory operations encode the access size in the "size"
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index af3f59bd48..d41b1a7d7f 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -208,10 +208,10 @@ uint32_t RelocInfo::wasm_call_tag() const {
Instruction* instr = reinterpret_cast<Instruction*>(pc_);
if (instr->IsLdrLiteralX()) {
return static_cast<uint32_t>(
- Memory::Address_at(Assembler::target_pointer_address_at(pc_)));
+ Memory<Address>(Assembler::target_pointer_address_at(pc_)));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
- return static_cast<uint32_t>(instr->ImmPCOffset() / kInstructionSize);
+ return static_cast<uint32_t>(instr->ImmPCOffset() / kInstrSize);
}
}
@@ -347,7 +347,7 @@ bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
first_use_ = offset;
}
- if (CanBeShared(mode)) {
+ if (RelocInfo::IsShareableRelocMode(mode)) {
write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset);
} else if (mode == RelocInfo::CODE_TARGET && raw_data != 0) {
// A zero data value is a placeholder and must not be shared.
@@ -391,7 +391,7 @@ int ConstPool::WorstCaseSize() {
// blr xzr
// nop
// All entries are 64-bit for now.
- return 4 * kInstructionSize + EntryCount() * kPointerSize;
+ return 4 * kInstrSize + EntryCount() * kPointerSize;
}
@@ -403,10 +403,10 @@ int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
// ldr xzr, #pool_size
// blr xzr
// nop ;; if not 64-bit aligned
- int prologue_size = require_jump ? kInstructionSize : 0;
- prologue_size += 2 * kInstructionSize;
- prologue_size += IsAligned(assm_->pc_offset() + prologue_size, 8) ?
- 0 : kInstructionSize;
+ int prologue_size = require_jump ? kInstrSize : 0;
+ prologue_size += 2 * kInstrSize;
+ prologue_size +=
+ IsAligned(assm_->pc_offset() + prologue_size, 8) ? 0 : kInstrSize;
// All entries are 64-bit for now.
return prologue_size + EntryCount() * kPointerSize;
@@ -476,11 +476,6 @@ void ConstPool::Clear() {
}
-bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
- return RelocInfo::IsNone(mode) || RelocInfo::IsShareableRelocMode(mode);
-}
-
-
void ConstPool::EmitMarker() {
// A constant pool size is expressed in number of 32-bits words.
// Currently all entries are 64-bit.
@@ -601,8 +596,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
request.code_stub()->set_isolate(isolate);
Instruction* instr = reinterpret_cast<Instruction*>(pc);
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
- DCHECK_EQ(instr->ImmPCOffset() % kInstructionSize, 0);
- UpdateCodeTarget(instr->ImmPCOffset() >> kInstructionSizeLog2,
+ DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
+ UpdateCodeTarget(instr->ImmPCOffset() >> kInstrSizeLog2,
request.code_stub()->GetCode());
break;
}
@@ -959,12 +954,12 @@ int Assembler::ConstantPoolSizeAt(Instruction* instr) {
reinterpret_cast<const char*>(
instr->InstructionAtOffset(kDebugMessageOffset));
int size = static_cast<int>(kDebugMessageOffset + strlen(message) + 1);
- return RoundUp(size, kInstructionSize) / kInstructionSize;
+ return RoundUp(size, kInstrSize) / kInstrSize;
}
// Same for printf support, see MacroAssembler::CallPrintf().
if ((instr->Mask(ExceptionMask) == HLT) &&
(instr->ImmException() == kImmExceptionIsPrintf)) {
- return kPrintfLength / kInstructionSize;
+ return kPrintfLength / kInstrSize;
}
#endif
if (IsConstantPoolAt(instr)) {
@@ -3938,7 +3933,7 @@ void Assembler::dcptr(Label* label) {
// references are not instructions so while unbound they are encoded as
// two consecutive brk instructions. The two 16-bit immediates are used
// to encode the offset.
- offset >>= kInstructionSizeLog2;
+ offset >>= kInstrSizeLog2;
DCHECK(is_int32(offset));
uint32_t high16 = unsigned_bitextract_32(31, 16, offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, offset);
@@ -4069,13 +4064,13 @@ void Assembler::brk(int code) {
void Assembler::EmitStringData(const char* string) {
size_t len = strlen(string) + 1;
- DCHECK_LE(RoundUp(len, kInstructionSize), static_cast<size_t>(kGap));
+ DCHECK_LE(RoundUp(len, kInstrSize), static_cast<size_t>(kGap));
EmitData(string, static_cast<int>(len));
// Pad with nullptr characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'};
- static_assert(sizeof(pad) == kInstructionSize,
+ static_assert(sizeof(pad) == kInstrSize,
"Size of padding must match instruction size.");
- EmitData(pad, RoundUp(pc_offset(), kInstructionSize) - pc_offset());
+ EmitData(pad, RoundUp(pc_offset(), kInstrSize) - pc_offset());
}
@@ -4103,6 +4098,10 @@ void Assembler::debug(const char* message, uint32_t code, Instr params) {
return;
}
// Fall through if Serializer is enabled.
+#else
+ // Make sure we haven't dynamically enabled simulator code when there is no
+ // simulator built in.
+ DCHECK(!options().enable_simulator_code);
#endif
if (params & BREAK) {
@@ -4422,7 +4421,7 @@ bool Assembler::IsImmLSPair(int64_t offset, unsigned size) {
bool Assembler::IsImmLLiteral(int64_t offset) {
- int inst_size = static_cast<int>(kInstructionSizeLog2);
+ int inst_size = static_cast<int>(kInstrSizeLog2);
bool offset_is_inst_multiple =
(((offset >> inst_size) << inst_size) == offset);
DCHECK_GT(offset, 0);
@@ -4810,7 +4809,7 @@ void Assembler::near_call(HeapObjectRequest request) {
}
void Assembler::BlockConstPoolFor(int instructions) {
- int pc_limit = pc_offset() + instructions * kInstructionSize;
+ int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
no_const_pool_before_ = pc_limit;
// Make sure the pool won't be blocked for too long.
@@ -4862,7 +4861,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Check that the code buffer is large enough before emitting the constant
// pool (this includes the gap to the relocation information).
- int needed_space = worst_case_size + kGap + 1 * kInstructionSize;
+ int needed_space = worst_case_size + kGap + 1 * kInstrSize;
while (buffer_space() <= needed_space) {
GrowBuffer();
}
@@ -4881,7 +4880,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
// Account for the branch around the veneers and the guard.
- int protection_offset = 2 * kInstructionSize;
+ int protection_offset = 2 * kInstrSize;
return pc_offset() > max_reachable_pc - margin - protection_offset -
static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
}
@@ -5018,10 +5017,10 @@ void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
CHECK(expected_adr->IsAdr() && (expected_adr->ImmPCRel() == 0));
int rd_code = expected_adr->Rd();
for (int i = 0; i < kAdrFarPatchableNNops; ++i) {
- CHECK(InstructionAt((i + 1) * kInstructionSize)->IsNop(ADR_FAR_NOP));
+ CHECK(InstructionAt((i + 1) * kInstrSize)->IsNop(ADR_FAR_NOP));
}
Instruction* expected_movz =
- InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstructionSize);
+ InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstrSize);
CHECK(expected_movz->IsMovz() &&
(expected_movz->ImmMoveWide() == 0) &&
(expected_movz->ShiftMoveWide() == 0));
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index e2945d5999..b42b80f9ca 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -407,6 +407,7 @@ constexpr Register NoReg = Register::no_reg();
constexpr VRegister NoVReg = VRegister::no_reg();
constexpr CPURegister NoCPUReg = CPURegister::no_reg();
constexpr Register no_reg = NoReg;
+constexpr VRegister no_dreg = NoVReg;
#define DEFINE_REGISTER(register_class, name, ...) \
constexpr register_class name = register_class::Create<__VA_ARGS__>()
@@ -848,7 +849,6 @@ class ConstPool {
void Clear();
private:
- bool CanBeShared(RelocInfo::Mode mode);
void EmitMarker();
void EmitGuard();
void EmitEntries();
@@ -882,7 +882,7 @@ class ConstPool {
// -----------------------------------------------------------------------------
// Assembler.
-class Assembler : public AssemblerBase {
+class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the
@@ -1008,8 +1008,6 @@ class Assembler : public AssemblerBase {
static constexpr int kSpecialTargetSize = 0;
// The sizes of the call sequences emitted by MacroAssembler::Call.
- // Wherever possible, use MacroAssembler::CallSize instead of these constants,
- // as it will choose the correct value for a given relocation mode.
//
// A "near" call is encoded in a BL immediate instruction:
// bl target
@@ -1017,8 +1015,8 @@ class Assembler : public AssemblerBase {
// whereas a "far" call will be encoded like this:
// ldr temp, =target
// blr temp
- static constexpr int kNearCallSize = 1 * kInstructionSize;
- static constexpr int kFarCallSize = 2 * kInstructionSize;
+ static constexpr int kNearCallSize = 1 * kInstrSize;
+ static constexpr int kFarCallSize = 2 * kInstrSize;
// Size of the generated code in bytes
uint64_t SizeOfGeneratedCode() const {
@@ -1034,20 +1032,10 @@ class Assembler : public AssemblerBase {
return pc_offset() - label->pos();
}
- // Check the size of the code generated since the given label. This function
- // is used primarily to work around comparisons between signed and unsigned
- // quantities, since V8 uses both.
- // TODO(jbramley): Work out what sign to use for these things and if possible,
- // change things to be consistent.
- void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
- DCHECK_GE(size, 0);
- DCHECK_EQ(static_cast<uint64_t>(size), SizeOfCodeGeneratedSince(label));
- }
-
// Return the number of instructions generated from label to the
// current position.
uint64_t InstructionsGeneratedSince(const Label* label) {
- return SizeOfCodeGeneratedSince(label) / kInstructionSize;
+ return SizeOfCodeGeneratedSince(label) / kInstrSize;
}
// Prevent contant pool emission until EndBlockConstPool is called.
@@ -3198,7 +3186,7 @@ class Assembler : public AssemblerBase {
// The maximum code size generated for a veneer. Currently one branch
// instruction. This is for code size checking purposes, and can be extended
// in the future for example if we decide to add nops between the veneers.
- static constexpr int kMaxVeneerCodeSize = 1 * kInstructionSize;
+ static constexpr int kMaxVeneerCodeSize = 1 * kInstrSize;
void RecordVeneerPool(int location_offset, int size);
// Emits veneers for branches that are approaching their maximum range.
@@ -3423,13 +3411,13 @@ class Assembler : public AssemblerBase {
// Set how far from current pc the next constant pool check will be.
void SetNextConstPoolCheckIn(int instructions) {
- next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize;
+ next_constant_pool_check_ = pc_offset() + instructions * kInstrSize;
}
// Emit the instruction at pc_.
void Emit(Instr instruction) {
STATIC_ASSERT(sizeof(*pc_) == 1);
- STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
+ STATIC_ASSERT(sizeof(instruction) == kInstrSize);
DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
memcpy(pc_, &instruction, sizeof(instruction));
@@ -3614,7 +3602,7 @@ class PatchingAssembler : public Assembler {
// Note that the instruction cache will not be flushed.
PatchingAssembler(const AssemblerOptions& options, byte* start,
unsigned count)
- : Assembler(options, start, count * kInstructionSize + kGap) {
+ : Assembler(options, start, count * kInstrSize + kGap) {
// Block constant pool emission.
StartBlockPools();
}
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 7a5f06c492..328983f42c 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -215,7 +215,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// The entry hook is a Push (stp) instruction, followed by a near call.
static const unsigned int kProfileEntryHookCallSize =
- (1 * kInstructionSize) + Assembler::kNearCallSize;
+ (1 * kInstrSize) + Assembler::kNearCallSize;
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
@@ -249,7 +249,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+ HardAbortScope hard_aborts(masm);
// Save all kCallerSaved registers (including lr), since this can be called
// from anywhere.
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index 1b87ce572c..389f4818d5 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -28,10 +28,10 @@ namespace internal {
constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
-const unsigned kInstructionSize = 4;
-const unsigned kInstructionSizeLog2 = 2;
-const unsigned kLoadLiteralScaleLog2 = 2;
-const unsigned kMaxLoadLiteralRange = 1 * MB;
+constexpr uint8_t kInstrSize = 4;
+constexpr uint8_t kInstrSizeLog2 = 2;
+constexpr size_t kLoadLiteralScaleLog2 = 2;
+constexpr size_t kMaxLoadLiteralRange = 1 * MB;
const int kNumberOfRegisters = 32;
const int kNumberOfVRegisters = 32;
@@ -42,7 +42,7 @@ const int kFirstCalleeSavedRegisterIndex = 19;
const int kNumberOfCalleeSavedVRegisters = 8;
const int kFirstCalleeSavedVRegisterIndex = 8;
// Callee saved registers with no specific purpose in JS are x19-x25.
-const unsigned kJSCalleeSavedRegList = 0x03f80000;
+const size_t kJSCalleeSavedRegList = 0x03f80000;
const int kWRegSizeInBits = 32;
const int kWRegSizeInBitsLog2 = 5;
const int kWRegSize = kWRegSizeInBits >> 3;
@@ -329,36 +329,6 @@ inline Condition NegateCondition(Condition cond) {
return static_cast<Condition>(cond ^ 1);
}
-// Commute a condition such that {a cond b == b cond' a}.
-inline Condition CommuteCondition(Condition cond) {
- switch (cond) {
- case lo:
- return hi;
- case hi:
- return lo;
- case hs:
- return ls;
- case ls:
- return hs;
- case lt:
- return gt;
- case gt:
- return lt;
- case ge:
- return le;
- case le:
- return ge;
- case eq:
- return eq;
- default:
- // In practice this function is only used with a condition coming from
- // TokenToCondition in lithium-codegen-arm64.cc. Any other condition is
- // invalid as it doesn't necessary make sense to reverse it (consider
- // 'mi' for instance).
- UNREACHABLE();
- }
-}
-
enum FlagsUpdate {
SetFlags = 1,
LeaveFlags = 0
diff --git a/deps/v8/src/arm64/decoder-arm64-inl.h b/deps/v8/src/arm64/decoder-arm64-inl.h
index 201dfaa423..c2181ddc40 100644
--- a/deps/v8/src/arm64/decoder-arm64-inl.h
+++ b/deps/v8/src/arm64/decoder-arm64-inl.h
@@ -106,10 +106,7 @@ void Decoder<V>::DecodePCRelAddressing(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
- DCHECK((instr->Bits(27, 24) == 0x4) ||
- (instr->Bits(27, 24) == 0x5) ||
- (instr->Bits(27, 24) == 0x6) ||
- (instr->Bits(27, 24) == 0x7) );
+ DCHECK_EQ(0x4, instr->Bits(27, 24) & 0xC); // 0x4, 0x5, 0x6, 0x7
switch (instr->Bits(31, 29)) {
case 0:
@@ -203,10 +200,7 @@ void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeLoadStore(Instruction* instr) {
- DCHECK((instr->Bits(27, 24) == 0x8) ||
- (instr->Bits(27, 24) == 0x9) ||
- (instr->Bits(27, 24) == 0xC) ||
- (instr->Bits(27, 24) == 0xD) );
+ DCHECK_EQ(0x8, instr->Bits(27, 24) & 0xA); // 0x8, 0x9, 0xC, 0xD
if ((instr->Bit(28) == 0) && (instr->Bit(29) == 0) && (instr->Bit(26) == 1)) {
DecodeNEONLoadStore(instr);
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index b2f534ac45..cb8925f779 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -277,7 +277,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Size of an entry of the second level deopt table. Since we do not generate
// a table for ARM64, the size is zero.
-const int Deoptimizer::table_entry_size_ = 0 * kInstructionSize;
+const int Deoptimizer::table_entry_size_ = 0 * kInstrSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
UseScratchRegisterScope temps(masm());
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index d344903d59..4c7ce77e4a 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -3917,7 +3917,7 @@ int DisassemblingDecoder::SubstituteBranchTargetField(Instruction* instr,
case 'e': offset = instr->ImmTestBranch(); break;
default: UNREACHABLE();
}
- offset <<= kInstructionSizeLog2;
+ offset <<= kInstrSizeLog2;
char sign = '+';
if (offset < 0) {
sign = '-';
@@ -4106,21 +4106,15 @@ class BufferDisassembler : public v8::internal::DisassemblingDecoder {
v8::internal::Vector<char> out_buffer_;
};
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-
-Disassembler::~Disassembler() { USE(converter_); }
-
-
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte* instr) {
+ USE(converter_); // avoid unused field warning
v8::internal::Decoder<v8::internal::DispatchingDecoderVisitor> decoder;
BufferDisassembler disasm(buffer);
decoder.AppendVisitor(&disasm);
decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(instr));
- return v8::internal::kInstructionSize;
+ return v8::internal::kInstrSize;
}
@@ -4129,13 +4123,13 @@ int Disassembler::ConstantPoolSizeAt(byte* instr) {
reinterpret_cast<v8::internal::Instruction*>(instr));
}
-
-void Disassembler::Disassemble(FILE* file, byte* start, byte* end) {
+void Disassembler::Disassemble(FILE* file, byte* start, byte* end,
+ UnimplementedOpcodeAction) {
v8::internal::Decoder<v8::internal::DispatchingDecoderVisitor> decoder;
v8::internal::PrintDisassembler disasm(file);
decoder.AppendVisitor(&disasm);
- for (byte* pc = start; pc < end; pc += v8::internal::kInstructionSize) {
+ for (byte* pc = start; pc < end; pc += v8::internal::kInstrSize) {
decoder.Decode(reinterpret_cast<v8::internal::Instruction*>(pc));
}
}
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
index 4a10594590..503f31050f 100644
--- a/deps/v8/src/arm64/instructions-arm64.cc
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -159,7 +159,7 @@ double Instruction::ImmNEONFP64() const {
unsigned CalcLSDataSize(LoadStoreOp op) {
DCHECK_EQ(static_cast<unsigned>(LSSize_offset + LSSize_width),
- kInstructionSize * 8);
+ kInstrSize * 8);
unsigned size = static_cast<Instr>(op) >> LSSize_offset;
if ((op & LSVector_mask) != 0) {
// Vector register memory operations encode the access size in the "size"
@@ -197,16 +197,16 @@ int64_t Instruction::ImmPCOffset() {
} else if (BranchType() != UnknownBranchType) {
// All PC-relative branches.
// Relative branch offsets are instruction-size-aligned.
- offset = ImmBranch() << kInstructionSizeLog2;
+ offset = ImmBranch() << kInstrSizeLog2;
} else if (IsUnresolvedInternalReference()) {
// Internal references are always word-aligned.
- offset = ImmUnresolvedInternalReference() << kInstructionSizeLog2;
+ offset = ImmUnresolvedInternalReference() << kInstrSizeLog2;
} else {
// Load literal (offset from PC).
DCHECK(IsLdrLiteral());
// The offset is always shifted by 2 bits, even for loads to 64-bits
// registers.
- offset = ImmLLiteral() << kInstructionSizeLog2;
+ offset = ImmLLiteral() << kInstrSizeLog2;
}
return offset;
}
@@ -260,10 +260,10 @@ void Instruction::SetPCRelImmTarget(const AssemblerOptions& options,
void Instruction::SetBranchImmTarget(Instruction* target) {
- DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
- DCHECK(IsValidImmPCOffset(BranchType(),
- DistanceTo(target) >> kInstructionSizeLog2));
- int offset = static_cast<int>(DistanceTo(target) >> kInstructionSizeLog2);
+ DCHECK(IsAligned(DistanceTo(target), kInstrSize));
+ DCHECK(
+ IsValidImmPCOffset(BranchType(), DistanceTo(target) >> kInstrSizeLog2));
+ int offset = static_cast<int>(DistanceTo(target) >> kInstrSizeLog2);
Instr branch_imm = 0;
uint32_t imm_mask = 0;
switch (BranchType()) {
@@ -295,10 +295,10 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
void Instruction::SetUnresolvedInternalReferenceImmTarget(
const AssemblerOptions& options, Instruction* target) {
DCHECK(IsUnresolvedInternalReference());
- DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
- DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2));
+ DCHECK(IsAligned(DistanceTo(target), kInstrSize));
+ DCHECK(is_int32(DistanceTo(target) >> kInstrSizeLog2));
int32_t target_offset =
- static_cast<int32_t>(DistanceTo(target) >> kInstructionSizeLog2);
+ static_cast<int32_t>(DistanceTo(target) >> kInstrSizeLog2);
uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
@@ -310,7 +310,7 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(
void Instruction::SetImmLLiteral(Instruction* source) {
DCHECK(IsLdrLiteral());
- DCHECK(IsAligned(DistanceTo(source), kInstructionSize));
+ DCHECK(IsAligned(DistanceTo(source), kInstrSize));
DCHECK(Assembler::IsImmLLiteral(DistanceTo(source)));
Instr imm = Assembler::ImmLLiteral(
static_cast<int>(DistanceTo(source) >> kLoadLiteralScaleLog2));
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index bb1791becb..9ea15e55ad 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -104,11 +104,11 @@ class Instruction {
}
V8_INLINE const Instruction* following(int count = 1) const {
- return InstructionAtOffset(count * static_cast<int>(kInstructionSize));
+ return InstructionAtOffset(count * static_cast<int>(kInstrSize));
}
V8_INLINE Instruction* following(int count = 1) {
- return InstructionAtOffset(count * static_cast<int>(kInstructionSize));
+ return InstructionAtOffset(count * static_cast<int>(kInstrSize));
}
V8_INLINE const Instruction* preceding(int count = 1) const {
@@ -329,9 +329,8 @@ class Instruction {
// The range of the branch instruction, expressed as 'instr +- range'.
static int32_t ImmBranchRange(ImmBranchType branch_type) {
- return
- (1 << (ImmBranchRangeBitwidth(branch_type) + kInstructionSizeLog2)) / 2 -
- kInstructionSize;
+ return (1 << (ImmBranchRangeBitwidth(branch_type) + kInstrSizeLog2)) / 2 -
+ kInstrSize;
}
int ImmBranch() const {
@@ -419,14 +418,14 @@ class Instruction {
V8_INLINE const Instruction* InstructionAtOffset(
int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) const {
// The FUZZ_disasm test relies on no check being done.
- DCHECK(check == NO_CHECK || IsAligned(offset, kInstructionSize));
+ DCHECK(check == NO_CHECK || IsAligned(offset, kInstrSize));
return this + offset;
}
V8_INLINE Instruction* InstructionAtOffset(
int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) {
// The FUZZ_disasm test relies on no check being done.
- DCHECK(check == NO_CHECK || IsAligned(offset, kInstructionSize));
+ DCHECK(check == NO_CHECK || IsAligned(offset, kInstrSize));
return this + offset;
}
@@ -534,9 +533,9 @@ const Instr kImmExceptionIsPrintf = 0xdeb1;
// passed in. This information could be retrieved from the printf format string,
// but the format string is not trivial to parse so we encode the relevant
// information with the HLT instruction.
-const unsigned kPrintfArgCountOffset = 1 * kInstructionSize;
-const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize;
-const unsigned kPrintfLength = 3 * kInstructionSize;
+const unsigned kPrintfArgCountOffset = 1 * kInstrSize;
+const unsigned kPrintfArgPatternListOffset = 2 * kInstrSize;
+const unsigned kPrintfLength = 3 * kInstrSize;
const unsigned kPrintfMaxArgCount = 4;
@@ -557,12 +556,12 @@ const Instr kImmExceptionIsDebug = 0xdeb0;
// - Debug code.
// - Debug parameters.
// - Debug message string. This is a nullptr-terminated ASCII string, padded to
-// kInstructionSize so that subsequent instructions are correctly aligned.
+// kInstrSize so that subsequent instructions are correctly aligned.
// - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
// string data.
-const unsigned kDebugCodeOffset = 1 * kInstructionSize;
-const unsigned kDebugParamsOffset = 2 * kInstructionSize;
-const unsigned kDebugMessageOffset = 3 * kInstructionSize;
+const unsigned kDebugCodeOffset = 1 * kInstrSize;
+const unsigned kDebugParamsOffset = 2 * kInstrSize;
+const unsigned kDebugMessageOffset = 3 * kInstrSize;
// Debug parameters.
// Used without a TRACE_ option, the Debugger will print the arguments only
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 357161d57f..bb1c22aff5 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -250,30 +250,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-namespace {
-
-void InterpreterCEntryDescriptor_InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- x0, // argument count (argc)
- x11, // address of first argument (argv)
- x1 // the runtime function to call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace
-
-void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
-void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index b583d7ba14..62594241ec 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -780,18 +780,6 @@ void TurboAssembler::Mneg(const Register& rd, const Register& rn,
mneg(rd, rn, rm);
}
-void TurboAssembler::Mov(const Register& rd, const Register& rn) {
- DCHECK(allow_macro_instructions());
- DCHECK(!rd.IsZero());
- // Emit a register move only if the registers are distinct, or if they are
- // not X registers. Note that mov(w0, w0) is not a no-op because it clears
- // the top word of x0.
- if (!rd.Is(rn) || !rd.Is64Bits()) {
- Assembler::mov(rd, rn);
- }
-}
-
-
void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -830,6 +818,12 @@ void TurboAssembler::Rbit(const Register& rd, const Register& rn) {
rbit(rd, rn);
}
+void TurboAssembler::Rev(const Register& rd, const Register& rn) {
+ DCHECK(allow_macro_instructions());
+ DCHECK(!rd.IsZero());
+ rev(rd, rn);
+}
+
void TurboAssembler::Ret(const Register& xn) {
DCHECK(allow_macro_instructions());
DCHECK(!xn.IsZero());
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 74583523af..b15ab47473 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -20,6 +20,7 @@
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/snapshot.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/arm64/macro-assembler-arm64.h" // Cannot be the first include
@@ -305,23 +306,35 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
if (operand.NeedsRelocation(this)) {
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ if (operand.ImmediateRMode() == RelocInfo::EXTERNAL_REFERENCE) {
+ Address addr = static_cast<Address>(operand.ImmediateValue());
+ ExternalReference reference = bit_cast<ExternalReference>(addr);
+ IndirectLoadExternalReference(rd, reference);
+ return;
+ } else if (operand.ImmediateRMode() == RelocInfo::EMBEDDED_OBJECT) {
+ Handle<HeapObject> x(
+ reinterpret_cast<HeapObject**>(operand.ImmediateValue()));
+ IndirectLoadConstant(rd, x);
+ return;
+ }
+ }
+ }
Ldr(dst, operand);
} else if (operand.IsImmediate()) {
// Call the macro assembler for generic immediates.
Mov(dst, operand.ImmediateValue());
-
} else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
// Emit a shift instruction if moving a shifted register. This operation
// could also be achieved using an orr instruction (like orn used by Mvn),
// but using a shift instruction makes the disassembly clearer.
EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
-
} else if (operand.IsExtendedRegister()) {
// Emit an extend instruction if moving an extended register. This handles
// extend with post-shift operations, too.
EmitExtendShift(dst, operand.reg(), operand.extend(),
operand.shift_amount());
-
} else {
// Otherwise, emit a register move only if the registers are distinct, or
// if they are not X registers.
@@ -347,16 +360,6 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
}
}
-void TurboAssembler::Mov(const Register& rd, ExternalReference reference) {
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code) {
- IndirectLoadExternalReference(rd, reference);
- return;
- }
- }
- Mov(rd, Operand(reference));
-}
-
void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
DCHECK(is_uint16(imm));
int byte1 = (imm & 0xFF);
@@ -1049,8 +1052,8 @@ void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
int size = src0.SizeInBytes();
+ DCHECK_EQ(0, (size * count) % 16);
- PushPreamble(count, size);
PushHelper(count, size, src0, src1, src2, src3);
}
@@ -1062,8 +1065,8 @@ void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
int size = src0.SizeInBytes();
+ DCHECK_EQ(0, (size * count) % 16);
- PushPreamble(count, size);
PushHelper(4, size, src0, src1, src2, src3);
PushHelper(count - 4, size, src4, src5, src6, src7);
}
@@ -1078,9 +1081,9 @@ void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
int size = dst0.SizeInBytes();
+ DCHECK_EQ(0, (size * count) % 16);
PopHelper(count, size, dst0, dst1, dst2, dst3);
- PopPostamble(count, size);
}
void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
@@ -1095,31 +1098,26 @@ void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid();
int size = dst0.SizeInBytes();
+ DCHECK_EQ(0, (size * count) % 16);
PopHelper(4, size, dst0, dst1, dst2, dst3);
PopHelper(count - 4, size, dst4, dst5, dst6, dst7);
- PopPostamble(count, size);
}
void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
int size = src0.SizeInBytes() + src1.SizeInBytes();
+ DCHECK_EQ(0, size % 16);
- PushPreamble(size);
// Reserve room for src0 and push src1.
str(src1, MemOperand(sp, -size, PreIndex));
// Fill the gap with src0.
str(src0, MemOperand(sp, src1.SizeInBytes()));
}
-
-void MacroAssembler::PushPopQueue::PushQueued(
- PreambleDirective preamble_directive) {
+void MacroAssembler::PushPopQueue::PushQueued() {
+ DCHECK_EQ(0, size_ % 16);
if (queued_.empty()) return;
- if (preamble_directive == WITH_PREAMBLE) {
- masm_->PushPreamble(size_);
- }
-
size_t count = queued_.size();
size_t index = 0;
while (index < count) {
@@ -1141,6 +1139,7 @@ void MacroAssembler::PushPopQueue::PushQueued(
void MacroAssembler::PushPopQueue::PopQueued() {
+ DCHECK_EQ(0, size_ % 16);
if (queued_.empty()) return;
size_t count = queued_.size();
@@ -1159,14 +1158,13 @@ void MacroAssembler::PushPopQueue::PopQueued() {
batch[0], batch[1], batch[2], batch[3]);
}
- masm_->PopPostamble(size_);
queued_.clear();
}
void TurboAssembler::PushCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
+ DCHECK_EQ(0, (size * registers.Count()) % 16);
- PushPreamble(registers.Count(), size);
// Push up to four registers at a time.
while (!registers.IsEmpty()) {
int count_before = registers.Count();
@@ -1181,6 +1179,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
void TurboAssembler::PopCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
+ DCHECK_EQ(0, (size * registers.Count()) % 16);
// Pop up to four registers at a time.
while (!registers.IsEmpty()) {
@@ -1192,12 +1191,9 @@ void TurboAssembler::PopCPURegList(CPURegList registers) {
int count = count_before - registers.Count();
PopHelper(count, size, dst0, dst1, dst2, dst3);
}
- PopPostamble(registers.Count(), size);
}
void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
- PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
-
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(count);
@@ -1316,39 +1312,6 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
}
}
-void TurboAssembler::PushPreamble(Operand total_size) {
- if (total_size.IsZero()) return;
-
- // The stack pointer must be aligned to 16 bytes on entry, and the total
- // size of the specified registers must also be a multiple of 16 bytes.
- if (total_size.IsImmediate()) {
- DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
- }
-
- // Don't check access size for non-immediate sizes. It's difficult to do
- // well, and it will be caught by hardware (or the simulator) anyway.
-}
-
-void TurboAssembler::PopPostamble(Operand total_size) {
- if (total_size.IsZero()) return;
-
- // The stack pointer must be aligned to 16 bytes on entry, and the total
- // size of the specified registers must also be a multiple of 16 bytes.
- if (total_size.IsImmediate()) {
- DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
- }
-
- // Don't check access size for non-immediate sizes. It's difficult to do
- // well, and it will be caught by hardware (or the simulator) anyway.
-}
-
-void TurboAssembler::PushPreamble(int count, int size) {
- PushPreamble(count * size);
-}
-void TurboAssembler::PopPostamble(int count, int size) {
- PopPostamble(count * size);
-}
-
void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
if (offset.IsImmediate()) {
DCHECK_GE(offset.ImmediateValue(), 0);
@@ -1429,7 +1392,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
void TurboAssembler::AssertSpAligned() {
if (emit_debug_code()) {
- TrapOnAbortScope trap_on_abort_scope(this); // Avoid calls to Abort.
+ HardAbortScope hard_abort(this); // Avoid calls to Abort.
// Arm64 requires the stack pointer to be 16-byte aligned prior to address
// calculation.
UseScratchRegisterScope scope(this);
@@ -1563,24 +1526,12 @@ void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
- Move(result, Handle<HeapObject>::cast(object));
+ Mov(result, Handle<HeapObject>::cast(object));
} else {
Mov(result, Operand(Smi::cast(*object)));
}
}
-void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
-
-void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
- if (FLAG_embedded_builtins) {
- if (root_array_available_ && options().isolate_independent_code) {
- IndirectLoadConstant(dst, value);
- return;
- }
- }
- Mov(dst, value);
-}
-
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
void TurboAssembler::Swap(Register lhs, Register rhs) {
@@ -1717,14 +1668,12 @@ void TurboAssembler::CallStubDelayed(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
BlockPoolsScope scope(this);
#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
+ Label start;
+ Bind(&start);
#endif
Operand operand = Operand::EmbeddedCode(stub);
near_call(operand.heap_object_request());
-#ifdef DEBUG
- AssertSizeOfCodeGeneratedSince(&start_call, kNearCallSize);
-#endif
+ DCHECK_EQ(kNearCallSize, SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::CallStub(CodeStub* stub) {
@@ -1871,9 +1820,9 @@ void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
void TurboAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
- Move(destination, kRootRegister);
+ Mov(destination, kRootRegister);
} else {
- Add(destination, kRootRegister, Operand(offset));
+ Add(destination, kRootRegister, offset);
}
}
@@ -1896,7 +1845,7 @@ void TurboAssembler::JumpHelper(int64_t offset, RelocInfo::Mode rmode,
} else {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- uint64_t imm = reinterpret_cast<uint64_t>(pc_) + offset * kInstructionSize;
+ uint64_t imm = reinterpret_cast<uint64_t>(pc_) + offset * kInstrSize;
Mov(temp, Immediate(imm, rmode));
Br(temp);
}
@@ -1916,8 +1865,8 @@ static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
// address at this point, and needs to be encoded as-is.
if (rmode != RelocInfo::WASM_CALL && rmode != RelocInfo::WASM_STUB_CALL) {
offset -= reinterpret_cast<int64_t>(pc);
- DCHECK_EQ(offset % kInstructionSize, 0);
- offset = offset / static_cast<int>(kInstructionSize);
+ DCHECK_EQ(offset % kInstrSize, 0);
+ offset = offset / static_cast<int>(kInstrSize);
}
return offset;
}
@@ -1950,6 +1899,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
@@ -1970,26 +1920,11 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
void TurboAssembler::Call(Register target) {
BlockPoolsScope scope(this);
-#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
-#endif
-
Blr(target);
-
-#ifdef DEBUG
- AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
-#endif
}
-// TurboAssembler::CallSize is sensitive to changes in this function, as it
-// requires to know how many instructions are used to branch to the target.
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
BlockPoolsScope scope(this);
-#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
-#endif
if (CanUseNearCallOrJump(rmode)) {
int64_t offset = CalculateTargetOffset(target, rmode, pc_);
@@ -1998,17 +1933,10 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
} else {
IndirectCall(target, rmode);
}
-#ifdef DEBUG
- AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
-#endif
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
BlockPoolsScope scope(this);
-#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
-#endif
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code &&
@@ -2029,6 +1957,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
@@ -2045,19 +1974,12 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
} else {
IndirectCall(code.address(), rmode);
}
-
-#ifdef DEBUG
- // Check the size of the code generated.
- AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode));
-#endif
}
void TurboAssembler::Call(ExternalReference target) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- // Immediate is in charge of setting the relocation mode to
- // EXTERNAL_REFERENCE.
- Mov(temp, Immediate(target));
+ Mov(temp, target);
Call(temp);
}
@@ -2078,8 +2000,8 @@ void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
BlockPoolsScope scope(this);
#ifdef DEBUG
- Label start_call;
- Bind(&start_call);
+ Label start;
+ Bind(&start);
#endif
// The deoptimizer requires the deoptimization id to be in x16.
UseScratchRegisterScope temps(this);
@@ -2091,29 +2013,12 @@ void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
movz(temp, deopt_id);
int64_t offset = static_cast<int64_t>(target) -
static_cast<int64_t>(options().code_range_start);
- DCHECK_EQ(offset % kInstructionSize, 0);
- offset = offset / static_cast<int>(kInstructionSize);
+ DCHECK_EQ(offset % kInstrSize, 0);
+ offset = offset / static_cast<int>(kInstrSize);
DCHECK(IsNearCallOffset(offset));
near_call(static_cast<int>(offset), RelocInfo::RUNTIME_ENTRY);
-#ifdef DEBUG
- AssertSizeOfCodeGeneratedSince(&start_call, kNearCallSize + kInstructionSize);
-#endif
-}
-
-int TurboAssembler::CallSize(Register target) {
- USE(target);
- return kInstructionSize;
-}
-
-int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
- USE(target);
- return CanUseNearCallOrJump(rmode) ? kNearCallSize : kFarCallSize;
-}
-
-int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
- USE(code);
- return CanUseNearCallOrJump(rmode) ? kNearCallSize : kFarCallSize;
+ DCHECK_EQ(kNearCallSize + kInstrSize, SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
@@ -2536,7 +2441,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
Mov(fp, sp);
Mov(scratch, StackFrame::TypeToMarker(frame_type));
Push(scratch, xzr);
- Move(scratch, CodeObject());
+ Mov(scratch, CodeObject());
Push(scratch, padreg);
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
@@ -2810,7 +2715,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
void MacroAssembler::CheckPageFlag(const Register& object,
const Register& scratch, int mask,
Condition cc, Label* condition_met) {
- And(scratch, object, ~Page::kPageAlignmentMask);
+ And(scratch, object, ~kPageAlignmentMask);
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
if (cc == eq) {
TestAndBranchIfAnySet(scratch, mask, condition_met);
@@ -2822,7 +2727,7 @@ void MacroAssembler::CheckPageFlag(const Register& object,
void TurboAssembler::CheckPageFlagSet(const Register& object,
const Register& scratch, int mask,
Label* if_any_set) {
- And(scratch, object, ~Page::kPageAlignmentMask);
+ And(scratch, object, ~kPageAlignmentMask);
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
TestAndBranchIfAnySet(scratch, mask, if_any_set);
}
@@ -2830,7 +2735,7 @@ void TurboAssembler::CheckPageFlagSet(const Register& object,
void TurboAssembler::CheckPageFlagClear(const Register& object,
const Register& scratch, int mask,
Label* if_all_clear) {
- And(scratch, object, ~Page::kPageAlignmentMask);
+ And(scratch, object, ~kPageAlignmentMask);
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
TestAndBranchIfAllClear(scratch, mask, if_all_clear);
}
@@ -2930,8 +2835,8 @@ void TurboAssembler::CallRecordWriteStub(
Pop(slot_parameter, object_parameter);
Mov(isolate_parameter, ExternalReference::isolate_address(isolate()));
- Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
- Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
+ Mov(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
+ Mov(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreRegisters(registers);
@@ -3044,38 +2949,26 @@ void TurboAssembler::Abort(AbortReason reason) {
RegList old_tmp_list = TmpList()->list();
TmpList()->Combine(MacroAssembler::DefaultTmpList());
- if (use_real_aborts()) {
- // Avoid infinite recursion; Push contains some assertions that use Abort.
- NoUseRealAbortsScope no_real_aborts(this);
-
- Move(x1, Smi::FromInt(static_cast<int>(reason)));
-
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
- } else {
- Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
- }
- } else {
- // Load the string to pass to Printf.
- Label msg_address;
- Adr(x0, &msg_address);
+ if (should_abort_hard()) {
+ // We don't care if we constructed a frame. Just pretend we did.
+ FrameScope assume_frame(this, StackFrame::NONE);
+ Mov(w0, static_cast<int>(reason));
+ Call(ExternalReference::abort_with_reason());
+ return;
+ }
- // Call Printf directly to report the error.
- CallPrintf();
+ // Avoid infinite recursion; Push contains some assertions that use Abort.
+ HardAbortScope hard_aborts(this);
- // We need a way to stop execution on both the simulator and real hardware,
- // and Unreachable() is the best option.
- Unreachable();
+ Mov(x1, Smi::FromInt(static_cast<int>(reason)));
- // Emit the message string directly in the instruction stream.
- {
- BlockPoolsScope scope(this);
- Bind(&msg_address);
- EmitStringData(GetAbortReason(reason));
- }
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
+ } else {
+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
}
TmpList()->set_list(old_tmp_list);
@@ -3216,7 +3109,8 @@ void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) {
// printf function will use a different instruction set and the procedure-call
// standard will not be compatible.
#ifdef USE_SIMULATOR
- { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
+ {
+ InstructionAccurateScope scope(this, kPrintfLength / kInstrSize);
hlt(kImmExceptionIsPrintf);
dc32(arg_count); // kPrintfArgCountOffset
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index a73fc2f47b..a2862748a6 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -63,6 +63,7 @@ constexpr Register kJavaScriptCallExtraArg1Register = x2;
constexpr Register kOffHeapTrampolineRegister = ip0;
constexpr Register kRuntimeCallFunctionRegister = x1;
constexpr Register kRuntimeCallArgCountRegister = x0;
+constexpr Register kRuntimeCallArgvRegister = x11;
constexpr Register kWasmInstanceRegister = x7;
#define LS_MACRO_LIST(V) \
@@ -177,7 +178,7 @@ enum PreShiftImmMode {
kAnyShift // Allow any pre-shift.
};
-class TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
@@ -185,27 +186,6 @@ class TurboAssembler : public TurboAssemblerBase {
: TurboAssemblerBase(isolate, options, buffer, buffer_size,
create_code_object) {}
- // The Abort method should call a V8 runtime function, but the CallRuntime
- // mechanism depends on CEntry. If use_real_aborts is false, Abort will
- // use a simpler abort mechanism that doesn't depend on CEntry.
- //
- // The purpose of this is to allow Aborts to be compiled whilst CEntry is
- // being generated.
- bool use_real_aborts() const { return use_real_aborts_; }
-
- class NoUseRealAbortsScope {
- public:
- explicit NoUseRealAbortsScope(TurboAssembler* tasm)
- : saved_(tasm->use_real_aborts_), tasm_(tasm) {
- tasm_->use_real_aborts_ = false;
- }
- ~NoUseRealAbortsScope() { tasm_->use_real_aborts_ = saved_; }
-
- private:
- bool saved_;
- TurboAssembler* tasm_;
- };
-
#if DEBUG
void set_allow_macro_instructions(bool value) {
allow_macro_instructions_ = value;
@@ -233,9 +213,7 @@ class TurboAssembler : public TurboAssemblerBase {
void Mov(const Register& rd, const Operand& operand,
DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
- void Mov(const Register& rd, ExternalReference reference);
void Mov(const Register& rd, uint64_t imm);
- inline void Mov(const Register& rd, const Register& rm);
void Mov(const VRegister& vd, int vd_index, const VRegister& vn,
int vn_index) {
DCHECK(allow_macro_instructions());
@@ -256,8 +234,6 @@ class TurboAssembler : public TurboAssemblerBase {
// This is required for compatibility with architecture independent code.
// Remove if not needed.
- void Move(Register dst, Register src);
- void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, Smi* src);
// Register swap. Note that the register operands should be distinct.
@@ -833,15 +809,6 @@ class TurboAssembler : public TurboAssemblerBase {
void CheckPageFlagClear(const Register& object, const Register& scratch,
int mask, Label* if_all_clear);
- // Perform necessary maintenance operations before a push or after a pop.
- //
- // Note that size is specified in bytes.
- void PushPreamble(Operand total_size);
- void PopPostamble(Operand total_size);
-
- void PushPreamble(int count, int size);
- void PopPostamble(int count, int size);
-
// Test the bits of register defined by bit_pattern, and branch if ANY of
// those bits are set. May corrupt the status flags.
inline void TestAndBranchIfAnySet(const Register& reg,
@@ -900,13 +867,6 @@ class TurboAssembler : public TurboAssemblerBase {
void CallForDeoptimization(Address target, int deopt_id,
RelocInfo::Mode rmode);
- // For every Call variant, there is a matching CallSize function that returns
- // the size (in bytes) of the call sequence.
- static int CallSize(Register target);
- int CallSize(Address target, RelocInfo::Mode rmode);
- int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
-
// Calls a C function.
// The called function is not allowed to trigger a
// garbage collection, since that might move the code and invalidate the
@@ -979,6 +939,7 @@ class TurboAssembler : public TurboAssemblerBase {
inline void Fmin(const VRegister& fd, const VRegister& fn,
const VRegister& fm);
inline void Rbit(const Register& rd, const Register& rn);
+ inline void Rev(const Register& rd, const Register& rn);
enum AdrHint {
// The target must be within the immediate range of adr.
@@ -1274,8 +1235,6 @@ class TurboAssembler : public TurboAssemblerBase {
CPURegList tmp_list_ = DefaultTmpList();
CPURegList fptmp_list_ = DefaultFPTmpList();
- bool use_real_aborts_ = true;
-
// Helps resolve branching to labels potentially out of range.
// If the label is not bound, it registers the information necessary to later
// be able to emit a veneer for this branch if necessary.
@@ -1609,7 +1568,7 @@ class MacroAssembler : public TurboAssembler {
// register sizes and types.
class PushPopQueue {
public:
- explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { }
+ explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) {}
~PushPopQueue() {
DCHECK(queued_.empty());
@@ -1620,11 +1579,7 @@ class MacroAssembler : public TurboAssembler {
queued_.push_back(rt);
}
- enum PreambleDirective {
- WITH_PREAMBLE,
- SKIP_PREAMBLE
- };
- void PushQueued(PreambleDirective preamble_directive = WITH_PREAMBLE);
+ void PushQueued();
void PopQueued();
private:
@@ -2076,7 +2031,7 @@ class InstructionAccurateScope BASE_EMBEDDED {
: tasm_(tasm)
#ifdef DEBUG
,
- size_(count * kInstructionSize)
+ size_(count * kInstrSize)
#endif
{
// Before blocking the const pool, see if it needs to be emitted.
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index 09c447fdb5..5df4361c1b 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -1081,7 +1081,7 @@ void Simulator::CheckBreakNext() {
void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
- Instruction* end = start->InstructionAtOffset(count * kInstructionSize);
+ Instruction* end = start->InstructionAtOffset(count * kInstrSize);
for (Instruction* pc = start; pc < end; pc = pc->following()) {
disassembler_decoder_->Decode(pc);
}
@@ -3415,7 +3415,7 @@ void Simulator::VisitException(Instruction* instr) {
// The stop parameters are inlined in the code. Skip them:
// - Skip to the end of the message string.
size_t size = kDebugMessageOffset + strlen(message) + 1;
- pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstructionSize));
+ pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstrSize));
// - Verify that the unreachable marker is present.
DCHECK(pc_->Mask(ExceptionMask) == HLT);
DCHECK_EQ(pc_->ImmException(), kImmExceptionIsUnreachable);
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 4bd9294c2f..c97a759d1b 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -774,7 +774,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
}
void ExecuteInstruction() {
- DCHECK(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
+ DCHECK(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstrSize));
CheckBreakNext();
Decode(pc_);
increment_pc();
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index 604207bc0d..fd973c8a36 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -23,6 +23,7 @@
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
@@ -329,6 +330,28 @@ UnoptimizedCompilationJob* AsmJs::NewCompilationJob(
return new AsmJsCompilationJob(parse_info, literal, allocator);
}
+namespace {
+inline bool IsValidAsmjsMemorySize(size_t size) {
+ // Enforce asm.js spec minimum size.
+ if (size < (1u << 12u)) return false;
+ // Enforce engine-limited maximum allocation size.
+ if (size > wasm::kV8MaxWasmMemoryBytes) return false;
+ // Enforce flag-limited maximum allocation size.
+ if (size > (FLAG_wasm_max_mem_pages * uint64_t{wasm::kWasmPageSize})) {
+ return false;
+ }
+ // Enforce power-of-2 sizes for 2^12 - 2^24.
+ if (size < (1u << 24u)) {
+ uint32_t size32 = static_cast<uint32_t>(size);
+ return base::bits::IsPowerOfTwo(size32);
+ }
+ // Enforce multiple of 2^24 for sizes >= 2^24
+ if ((size % (1u << 24u)) != 0) return false;
+ // All checks passed!
+ return true;
+}
+} // namespace
+
MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
Handle<SharedFunctionInfo> shared,
Handle<FixedArray> wasm_data,
@@ -369,15 +392,9 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
}
memory->set_is_growable(false);
size_t size = NumberToSize(memory->byte_length());
- // TODO(mstarzinger): We currently only limit byte length of the buffer to
- // be a multiple of 8, we should enforce the stricter spec limits here.
- if (size % FixedTypedArrayBase::kMaxElementSize != 0) {
- ReportInstantiationFailure(script, position, "Unexpected heap size");
- return MaybeHandle<Object>();
- }
- // Currently WebAssembly only supports heap sizes within the uint32_t range.
- if (size > std::numeric_limits<uint32_t>::max()) {
- ReportInstantiationFailure(script, position, "Unexpected heap size");
+ // Check the asm.js heap size against the valid limits.
+ if (!IsValidAsmjsMemorySize(size)) {
+ ReportInstantiationFailure(script, position, "Invalid heap size");
return MaybeHandle<Object>();
}
} else {
@@ -392,8 +409,14 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
// An exception caused by the module start function will be set as pending
// and bypass the {ErrorThrower}, this happens in case of a stack overflow.
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
+ if (thrower.error()) {
+ ScopedVector<char> error_reason(100);
+ SNPrintF(error_reason, "Internal wasm failure: %s", thrower.error_msg());
+ ReportInstantiationFailure(script, position, error_reason.start());
+ } else {
+ ReportInstantiationFailure(script, position, "Internal wasm failure");
+ }
thrower.Reset(); // Ensure exceptions do not propagate.
- ReportInstantiationFailure(script, position, "Internal wasm failure");
return MaybeHandle<Object>();
}
DCHECK(!thrower.error());
diff --git a/deps/v8/src/assembler-arch-inl.h b/deps/v8/src/assembler-arch-inl.h
new file mode 100644
index 0000000000..443c6ee1ae
--- /dev/null
+++ b/deps/v8/src/assembler-arch-inl.h
@@ -0,0 +1,30 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ASSEMBLER_ARCH_INL_H_
+#define V8_ASSEMBLER_ARCH_INL_H_
+
+#include "src/assembler-inl.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/ia32/assembler-ia32-inl.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/x64/assembler-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/assembler-arm64-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/arm/assembler-arm-inl.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/assembler-ppc-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/mips/assembler-mips-inl.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/assembler-mips64-inl.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/assembler-s390-inl.h"
+#else
+#error Unknown architecture.
+#endif
+
+#endif // V8_ASSEMBLER_ARCH_INL_H_
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index a431c7442d..0f216052f3 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -48,8 +48,6 @@
namespace v8 {
namespace internal {
-const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
-
AssemblerOptions AssemblerOptions::Default(
Isolate* isolate, bool explicitly_support_serialization) {
AssemblerOptions options;
@@ -62,7 +60,6 @@ AssemblerOptions AssemblerOptions::Default(
// might be run on real hardware.
options.enable_simulator_code = !serializer;
#endif
- options.isolate_independent_code = isolate->ShouldLoadConstantsFromRootList();
options.inline_offheap_trampolines = !serializer;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
options.code_range_start =
@@ -150,533 +147,6 @@ unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::icache_line_size_ = 0;
unsigned CpuFeatures::dcache_line_size_ = 0;
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfoWriter and RelocIterator
-//
-// Relocation information is written backwards in memory, from high addresses
-// towards low addresses, byte by byte. Therefore, in the encodings listed
-// below, the first byte listed it at the highest address, and successive
-// bytes in the record are at progressively lower addresses.
-//
-// Encoding
-//
-// The most common modes are given single-byte encodings. Also, it is
-// easy to identify the type of reloc info and skip unwanted modes in
-// an iteration.
-//
-// The encoding relies on the fact that there are fewer than 14
-// different relocation modes using standard non-compact encoding.
-//
-// The first byte of a relocation record has a tag in its low 2 bits:
-// Here are the record schemes, depending on the low tag and optional higher
-// tags.
-//
-// Low tag:
-// 00: embedded_object: [6-bit pc delta] 00
-//
-// 01: code_target: [6-bit pc delta] 01
-//
-// 10: wasm_stub_call: [6-bit pc delta] 10
-//
-// 11: long_record [6 bit reloc mode] 11
-// followed by pc delta
-// followed by optional data depending on type.
-//
-// If a pc delta exceeds 6 bits, it is split into a remainder that fits into
-// 6 bits and a part that does not. The latter is encoded as a long record
-// with PC_JUMP as pseudo reloc info mode. The former is encoded as part of
-// the following record in the usual way. The long pc jump record has variable
-// length:
-// pc-jump: [PC_JUMP] 11
-// [7 bits data] 0
-// ...
-// [7 bits data] 1
-// (Bits 6..31 of pc delta, with leading zeroes
-// dropped, and last non-zero chunk tagged with 1.)
-
-const int kTagBits = 2;
-const int kTagMask = (1 << kTagBits) - 1;
-const int kLongTagBits = 6;
-
-const int kEmbeddedObjectTag = 0;
-const int kCodeTargetTag = 1;
-const int kWasmStubCallTag = 2;
-const int kDefaultTag = 3;
-
-const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
-const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
-const int RelocInfo::kMaxSmallPCDelta = kSmallPCDeltaMask;
-
-const int kChunkBits = 7;
-const int kChunkMask = (1 << kChunkBits) - 1;
-const int kLastChunkTagBits = 1;
-const int kLastChunkTagMask = 1;
-const int kLastChunkTag = 1;
-
-uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
- // Return if the pc_delta can fit in kSmallPCDeltaBits bits.
- // Otherwise write a variable length PC jump for the bits that do
- // not fit in the kSmallPCDeltaBits bits.
- if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
- WriteMode(RelocInfo::PC_JUMP);
- uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
- DCHECK_GT(pc_jump, 0);
- // Write kChunkBits size chunks of the pc_jump.
- for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) {
- byte b = pc_jump & kChunkMask;
- *--pos_ = b << kLastChunkTagBits;
- }
- // Tag the last chunk so it can be identified.
- *pos_ = *pos_ | kLastChunkTag;
- // Return the remaining kSmallPCDeltaBits of the pc_delta.
- return pc_delta & kSmallPCDeltaMask;
-}
-
-void RelocInfoWriter::WriteShortTaggedPC(uint32_t pc_delta, int tag) {
- // Write a byte of tagged pc-delta, possibly preceded by an explicit pc-jump.
- pc_delta = WriteLongPCJump(pc_delta);
- *--pos_ = pc_delta << kTagBits | tag;
-}
-
-void RelocInfoWriter::WriteShortData(intptr_t data_delta) {
- *--pos_ = static_cast<byte>(data_delta);
-}
-
-void RelocInfoWriter::WriteMode(RelocInfo::Mode rmode) {
- STATIC_ASSERT(RelocInfo::NUMBER_OF_MODES <= (1 << kLongTagBits));
- *--pos_ = static_cast<int>((rmode << kTagBits) | kDefaultTag);
-}
-
-void RelocInfoWriter::WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode) {
- // Write two-byte tagged pc-delta, possibly preceded by var. length pc-jump.
- pc_delta = WriteLongPCJump(pc_delta);
- WriteMode(rmode);
- *--pos_ = pc_delta;
-}
-
-void RelocInfoWriter::WriteIntData(int number) {
- for (int i = 0; i < kIntSize; i++) {
- *--pos_ = static_cast<byte>(number);
- // Signed right shift is arithmetic shift. Tested in test-utils.cc.
- number = number >> kBitsPerByte;
- }
-}
-
-void RelocInfoWriter::WriteData(intptr_t data_delta) {
- for (int i = 0; i < kIntptrSize; i++) {
- *--pos_ = static_cast<byte>(data_delta);
- // Signed right shift is arithmetic shift. Tested in test-utils.cc.
- data_delta = data_delta >> kBitsPerByte;
- }
-}
-
-void RelocInfoWriter::Write(const RelocInfo* rinfo) {
- RelocInfo::Mode rmode = rinfo->rmode();
-#ifdef DEBUG
- byte* begin_pos = pos_;
-#endif
- DCHECK(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
- DCHECK_GE(rinfo->pc() - reinterpret_cast<Address>(last_pc_), 0);
- // Use unsigned delta-encoding for pc.
- uint32_t pc_delta =
- static_cast<uint32_t>(rinfo->pc() - reinterpret_cast<Address>(last_pc_));
-
- // The two most common modes are given small tags, and usually fit in a byte.
- if (rmode == RelocInfo::EMBEDDED_OBJECT) {
- WriteShortTaggedPC(pc_delta, kEmbeddedObjectTag);
- } else if (rmode == RelocInfo::CODE_TARGET) {
- WriteShortTaggedPC(pc_delta, kCodeTargetTag);
- DCHECK_LE(begin_pos - pos_, RelocInfo::kMaxCallSize);
- } else if (rmode == RelocInfo::WASM_STUB_CALL) {
- WriteShortTaggedPC(pc_delta, kWasmStubCallTag);
- } else {
- WriteModeAndPC(pc_delta, rmode);
- if (RelocInfo::IsComment(rmode)) {
- WriteData(rinfo->data());
- } else if (RelocInfo::IsDeoptReason(rmode)) {
- DCHECK_LT(rinfo->data(), 1 << kBitsPerByte);
- WriteShortData(rinfo->data());
- } else if (RelocInfo::IsConstPool(rmode) ||
- RelocInfo::IsVeneerPool(rmode) || RelocInfo::IsDeoptId(rmode) ||
- RelocInfo::IsDeoptPosition(rmode)) {
- WriteIntData(static_cast<int>(rinfo->data()));
- }
- }
- last_pc_ = reinterpret_cast<byte*>(rinfo->pc());
-#ifdef DEBUG
- DCHECK_LE(begin_pos - pos_, kMaxSize);
-#endif
-}
-
-inline int RelocIterator::AdvanceGetTag() {
- return *--pos_ & kTagMask;
-}
-
-inline RelocInfo::Mode RelocIterator::GetMode() {
- return static_cast<RelocInfo::Mode>((*pos_ >> kTagBits) &
- ((1 << kLongTagBits) - 1));
-}
-
-inline void RelocIterator::ReadShortTaggedPC() {
- rinfo_.pc_ += *pos_ >> kTagBits;
-}
-
-inline void RelocIterator::AdvanceReadPC() {
- rinfo_.pc_ += *--pos_;
-}
-
-void RelocIterator::AdvanceReadInt() {
- int x = 0;
- for (int i = 0; i < kIntSize; i++) {
- x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
- }
- rinfo_.data_ = x;
-}
-
-void RelocIterator::AdvanceReadData() {
- intptr_t x = 0;
- for (int i = 0; i < kIntptrSize; i++) {
- x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte;
- }
- rinfo_.data_ = x;
-}
-
-void RelocIterator::AdvanceReadLongPCJump() {
- // Read the 32-kSmallPCDeltaBits most significant bits of the
- // pc jump in kChunkBits bit chunks and shift them into place.
- // Stop when the last chunk is encountered.
- uint32_t pc_jump = 0;
- for (int i = 0; i < kIntSize; i++) {
- byte pc_jump_part = *--pos_;
- pc_jump |= (pc_jump_part >> kLastChunkTagBits) << i * kChunkBits;
- if ((pc_jump_part & kLastChunkTagMask) == 1) break;
- }
- // The least significant kSmallPCDeltaBits bits will be added
- // later.
- rinfo_.pc_ += pc_jump << kSmallPCDeltaBits;
-}
-
-inline void RelocIterator::ReadShortData() {
- uint8_t unsigned_b = *pos_;
- rinfo_.data_ = unsigned_b;
-}
-
-void RelocIterator::next() {
- DCHECK(!done());
- // Basically, do the opposite of RelocInfoWriter::Write.
- // Reading of data is as far as possible avoided for unwanted modes,
- // but we must always update the pc.
- //
- // We exit this loop by returning when we find a mode we want.
- while (pos_ > end_) {
- int tag = AdvanceGetTag();
- if (tag == kEmbeddedObjectTag) {
- ReadShortTaggedPC();
- if (SetMode(RelocInfo::EMBEDDED_OBJECT)) return;
- } else if (tag == kCodeTargetTag) {
- ReadShortTaggedPC();
- if (SetMode(RelocInfo::CODE_TARGET)) return;
- } else if (tag == kWasmStubCallTag) {
- ReadShortTaggedPC();
- if (SetMode(RelocInfo::WASM_STUB_CALL)) return;
- } else {
- DCHECK_EQ(tag, kDefaultTag);
- RelocInfo::Mode rmode = GetMode();
- if (rmode == RelocInfo::PC_JUMP) {
- AdvanceReadLongPCJump();
- } else {
- AdvanceReadPC();
- if (RelocInfo::IsComment(rmode)) {
- if (SetMode(rmode)) {
- AdvanceReadData();
- return;
- }
- Advance(kIntptrSize);
- } else if (RelocInfo::IsDeoptReason(rmode)) {
- Advance();
- if (SetMode(rmode)) {
- ReadShortData();
- return;
- }
- } else if (RelocInfo::IsConstPool(rmode) ||
- RelocInfo::IsVeneerPool(rmode) ||
- RelocInfo::IsDeoptId(rmode) ||
- RelocInfo::IsDeoptPosition(rmode)) {
- if (SetMode(rmode)) {
- AdvanceReadInt();
- return;
- }
- Advance(kIntSize);
- } else if (SetMode(static_cast<RelocInfo::Mode>(rmode))) {
- return;
- }
- }
- }
- }
- done_ = true;
-}
-
-RelocIterator::RelocIterator(Code* code, int mode_mask)
- : RelocIterator(code, code->raw_instruction_start(), code->constant_pool(),
- code->relocation_end(), code->relocation_start(),
- mode_mask) {}
-
-RelocIterator::RelocIterator(const CodeReference code_reference, int mode_mask)
- : RelocIterator(nullptr, code_reference.instruction_start(),
- code_reference.constant_pool(),
- code_reference.relocation_end(),
- code_reference.relocation_start(), mode_mask) {}
-
-RelocIterator::RelocIterator(EmbeddedData* embedded_data, Code* code,
- int mode_mask)
- : RelocIterator(
- code, embedded_data->InstructionStartOfBuiltin(code->builtin_index()),
- code->constant_pool(),
- code->relocation_start() + code->relocation_size(),
- code->relocation_start(), mode_mask) {}
-
-RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
- : RelocIterator(nullptr, reinterpret_cast<Address>(desc.buffer), 0,
- desc.buffer + desc.buffer_size,
- desc.buffer + desc.buffer_size - desc.reloc_size,
- mode_mask) {}
-
-RelocIterator::RelocIterator(Vector<byte> instructions,
- Vector<const byte> reloc_info, Address const_pool,
- int mode_mask)
- : RelocIterator(nullptr, reinterpret_cast<Address>(instructions.start()),
- const_pool, reloc_info.start() + reloc_info.size(),
- reloc_info.start(), mode_mask) {}
-
-RelocIterator::RelocIterator(Code* host, Address pc, Address constant_pool,
- const byte* pos, const byte* end, int mode_mask)
- : pos_(pos), end_(end), mode_mask_(mode_mask) {
- // Relocation info is read backwards.
- DCHECK_GE(pos_, end_);
- rinfo_.host_ = host;
- rinfo_.pc_ = pc;
- rinfo_.constant_pool_ = constant_pool;
- if (mode_mask_ == 0) pos_ = end_;
- next();
-}
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-// static
-bool RelocInfo::OffHeapTargetIsCodedSpecially() {
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64) || \
- defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
- return false;
-#elif defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
- defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390)
- return true;
-#endif
-}
-
-Address RelocInfo::wasm_call_address() const {
- DCHECK_EQ(rmode_, WASM_CALL);
- return Assembler::target_address_at(pc_, constant_pool_);
-}
-
-void RelocInfo::set_wasm_call_address(Address address,
- ICacheFlushMode icache_flush_mode) {
- DCHECK_EQ(rmode_, WASM_CALL);
- Assembler::set_target_address_at(pc_, constant_pool_, address,
- icache_flush_mode);
-}
-
-Address RelocInfo::wasm_stub_call_address() const {
- DCHECK_EQ(rmode_, WASM_STUB_CALL);
- return Assembler::target_address_at(pc_, constant_pool_);
-}
-
-void RelocInfo::set_wasm_stub_call_address(Address address,
- ICacheFlushMode icache_flush_mode) {
- DCHECK_EQ(rmode_, WASM_STUB_CALL);
- Assembler::set_target_address_at(pc_, constant_pool_, address,
- icache_flush_mode);
-}
-
-void RelocInfo::set_target_address(Address target,
- WriteBarrierMode write_barrier_mode,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
- IsWasmCall(rmode_));
- Assembler::set_target_address_at(pc_, constant_pool_, target,
- icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
- IsCodeTargetMode(rmode_)) {
- Code* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
- target_code);
- }
-}
-
-#ifdef DEBUG
-bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
- // Ensure there are no code targets or embedded objects present in the
- // deoptimization entries, they would require relocation after code
- // generation.
- int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::kApplyMask;
- RelocIterator it(desc, mode_mask);
- return !it.done();
-}
-#endif
-
-#ifdef ENABLE_DISASSEMBLER
-const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
- switch (rmode) {
- case NONE:
- return "no reloc";
- case EMBEDDED_OBJECT:
- return "embedded object";
- case CODE_TARGET:
- return "code target";
- case RELATIVE_CODE_TARGET:
- return "relative code target";
- case RUNTIME_ENTRY:
- return "runtime entry";
- case COMMENT:
- return "comment";
- case EXTERNAL_REFERENCE:
- return "external reference";
- case INTERNAL_REFERENCE:
- return "internal reference";
- case INTERNAL_REFERENCE_ENCODED:
- return "encoded internal reference";
- case OFF_HEAP_TARGET:
- return "off heap target";
- case DEOPT_SCRIPT_OFFSET:
- return "deopt script offset";
- case DEOPT_INLINING_ID:
- return "deopt inlining id";
- case DEOPT_REASON:
- return "deopt reason";
- case DEOPT_ID:
- return "deopt index";
- case CONST_POOL:
- return "constant pool";
- case VENEER_POOL:
- return "veneer pool";
- case WASM_CALL:
- return "internal wasm call";
- case WASM_STUB_CALL:
- return "wasm stub call";
- case JS_TO_WASM_CALL:
- return "js to wasm call";
- case NUMBER_OF_MODES:
- case PC_JUMP:
- UNREACHABLE();
- }
- return "unknown relocation type";
-}
-
-void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
- os << reinterpret_cast<const void*>(pc_) << " " << RelocModeName(rmode_);
- if (IsComment(rmode_)) {
- os << " (" << reinterpret_cast<char*>(data_) << ")";
- } else if (rmode_ == DEOPT_SCRIPT_OFFSET || rmode_ == DEOPT_INLINING_ID) {
- os << " (" << data() << ")";
- } else if (rmode_ == DEOPT_REASON) {
- os << " ("
- << DeoptimizeReasonToString(static_cast<DeoptimizeReason>(data_)) << ")";
- } else if (rmode_ == EMBEDDED_OBJECT) {
- os << " (" << Brief(target_object()) << ")";
- } else if (rmode_ == EXTERNAL_REFERENCE) {
- if (isolate) {
- ExternalReferenceEncoder ref_encoder(isolate);
- os << " ("
- << ref_encoder.NameOfAddress(isolate, target_external_reference())
- << ") ";
- }
- os << " (" << reinterpret_cast<const void*>(target_external_reference())
- << ")";
- } else if (IsCodeTargetMode(rmode_)) {
- const Address code_target = target_address();
- Code* code = Code::GetCodeFromTargetAddress(code_target);
- DCHECK(code->IsCode());
- os << " (" << Code::Kind2String(code->kind());
- if (Builtins::IsBuiltin(code)) {
- os << " " << Builtins::name(code->builtin_index());
- } else if (code->kind() == Code::STUB) {
- os << " " << CodeStub::MajorName(CodeStub::GetMajorKey(code));
- }
- os << ") (" << reinterpret_cast<const void*>(target_address()) << ")";
- } else if (IsRuntimeEntry(rmode_) && isolate->deoptimizer_data() != nullptr) {
- // Deoptimization bailouts are stored as runtime entries.
- DeoptimizeKind type;
- if (Deoptimizer::IsDeoptimizationEntry(isolate, target_address(), &type)) {
- int id = GetDeoptimizationId(isolate, type);
- os << " (" << Deoptimizer::MessageFor(type) << " deoptimization bailout "
- << id << ")";
- }
- } else if (IsConstPool(rmode_)) {
- os << " (size " << static_cast<int>(data_) << ")";
- }
-
- os << "\n";
-}
-#endif // ENABLE_DISASSEMBLER
-
-#ifdef VERIFY_HEAP
-void RelocInfo::Verify(Isolate* isolate) {
- switch (rmode_) {
- case EMBEDDED_OBJECT:
- Object::VerifyPointer(isolate, target_object());
- break;
- case CODE_TARGET:
- case RELATIVE_CODE_TARGET: {
- // convert inline target address to code object
- Address addr = target_address();
- CHECK_NE(addr, kNullAddress);
- // Check that we can find the right code object.
- Code* code = Code::GetCodeFromTargetAddress(addr);
- Object* found = isolate->FindCodeObject(addr);
- CHECK(found->IsCode());
- CHECK(code->address() == HeapObject::cast(found)->address());
- break;
- }
- case INTERNAL_REFERENCE:
- case INTERNAL_REFERENCE_ENCODED: {
- Address target = target_internal_reference();
- Address pc = target_internal_reference_address();
- Code* code = Code::cast(isolate->FindCodeObject(pc));
- CHECK(target >= code->InstructionStart());
- CHECK(target <= code->InstructionEnd());
- break;
- }
- case OFF_HEAP_TARGET: {
- Address addr = target_off_heap_target();
- CHECK_NE(addr, kNullAddress);
- CHECK_NOT_NULL(InstructionStream::TryLookupCode(isolate, addr));
- break;
- }
- case RUNTIME_ENTRY:
- case COMMENT:
- case EXTERNAL_REFERENCE:
- case DEOPT_SCRIPT_OFFSET:
- case DEOPT_INLINING_ID:
- case DEOPT_REASON:
- case DEOPT_ID:
- case CONST_POOL:
- case VENEER_POOL:
- case WASM_CALL:
- case WASM_STUB_CALL:
- case JS_TO_WASM_CALL:
- case NONE:
- break;
- case NUMBER_OF_MODES:
- case PC_JUMP:
- UNREACHABLE();
- break;
- }
-}
-#endif // VERIFY_HEAP
-
ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
int double_reach_bits) {
info_[ConstantPoolEntry::INTPTR].entries.reserve(64);
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 28ec2a68c6..b108c5dfff 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -51,6 +51,7 @@
#include "src/objects.h"
#include "src/register-configuration.h"
#include "src/reglist.h"
+#include "src/reloc-info.h"
namespace v8 {
@@ -171,7 +172,7 @@ struct V8_EXPORT_PRIVATE AssemblerOptions {
Isolate* isolate, bool explicitly_support_serialization = false);
};
-class AssemblerBase : public Malloced {
+class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
public:
AssemblerBase(const AssemblerOptions& options, void* buffer, int buffer_size);
virtual ~AssemblerBase();
@@ -415,426 +416,6 @@ class CpuFeatures : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
-// Specifies whether to perform icache flush operations on RelocInfo updates.
-// If FLUSH_ICACHE_IF_NEEDED, the icache will always be flushed if an
-// instruction was modified. If SKIP_ICACHE_FLUSH the flush will always be
-// skipped (only use this if you will flush the icache manually before it is
-// executed).
-enum ICacheFlushMode { FLUSH_ICACHE_IF_NEEDED, SKIP_ICACHE_FLUSH };
-
-// -----------------------------------------------------------------------------
-// Relocation information
-
-
-// Relocation information consists of the address (pc) of the datum
-// to which the relocation information applies, the relocation mode
-// (rmode), and an optional data field. The relocation mode may be
-// "descriptive" and not indicate a need for relocation, but simply
-// describe a property of the datum. Such rmodes are useful for GC
-// and nice disassembly output.
-
-class RelocInfo {
- public:
- // This string is used to add padding comments to the reloc info in cases
- // where we are not sure to have enough space for patching in during
- // lazy deoptimization. This is the case if we have indirect calls for which
- // we do not normally record relocation info.
- static const char* const kFillerCommentString;
-
- // The minimum size of a comment is equal to two bytes for the extra tagged
- // pc and kPointerSize for the actual pointer to the comment.
- static const int kMinRelocCommentSize = 2 + kPointerSize;
-
- // The maximum size for a call instruction including pc-jump.
- static const int kMaxCallSize = 6;
-
- // The maximum pc delta that will use the short encoding.
- static const int kMaxSmallPCDelta;
-
- enum Mode : int8_t {
- // Please note the order is important (see IsRealRelocMode, IsGCRelocMode,
- // and IsShareableRelocMode predicates below).
-
- CODE_TARGET,
- RELATIVE_CODE_TARGET, // LAST_CODE_TARGET_MODE
- EMBEDDED_OBJECT, // LAST_GCED_ENUM
-
- JS_TO_WASM_CALL,
- WASM_CALL, // FIRST_SHAREABLE_RELOC_MODE
- WASM_STUB_CALL,
-
- RUNTIME_ENTRY,
- COMMENT,
-
- EXTERNAL_REFERENCE, // The address of an external C++ function.
- INTERNAL_REFERENCE, // An address inside the same function.
-
- // Encoded internal reference, used only on MIPS, MIPS64 and PPC.
- INTERNAL_REFERENCE_ENCODED,
-
- // An off-heap instruction stream target. See http://goo.gl/Z2HUiM.
- OFF_HEAP_TARGET,
-
- // Marks constant and veneer pools. Only used on ARM and ARM64.
- // They use a custom noncompact encoding.
- CONST_POOL,
- VENEER_POOL,
-
- DEOPT_SCRIPT_OFFSET,
- DEOPT_INLINING_ID, // Deoptimization source position.
- DEOPT_REASON, // Deoptimization reason index.
- DEOPT_ID, // Deoptimization inlining id.
-
- // This is not an actual reloc mode, but used to encode a long pc jump that
- // cannot be encoded as part of another record.
- PC_JUMP,
-
- // Pseudo-types
- NUMBER_OF_MODES,
- NONE, // never recorded value
-
- LAST_CODE_TARGET_MODE = RELATIVE_CODE_TARGET,
- FIRST_REAL_RELOC_MODE = CODE_TARGET,
- LAST_REAL_RELOC_MODE = VENEER_POOL,
- LAST_GCED_ENUM = EMBEDDED_OBJECT,
- FIRST_SHAREABLE_RELOC_MODE = WASM_CALL,
- };
-
- STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt);
-
- RelocInfo() = default;
-
- RelocInfo(Address pc, Mode rmode, intptr_t data, Code* host,
- Address constant_pool = kNullAddress)
- : pc_(pc),
- rmode_(rmode),
- data_(data),
- host_(host),
- constant_pool_(constant_pool) {}
-
- static inline bool IsRealRelocMode(Mode mode) {
- return mode >= FIRST_REAL_RELOC_MODE && mode <= LAST_REAL_RELOC_MODE;
- }
- // Is the relocation mode affected by GC?
- static inline bool IsGCRelocMode(Mode mode) { return mode <= LAST_GCED_ENUM; }
- static inline bool IsShareableRelocMode(Mode mode) {
- return mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE;
- }
- static inline bool IsCodeTarget(Mode mode) { return mode == CODE_TARGET; }
- static inline bool IsCodeTargetMode(Mode mode) {
- return mode <= LAST_CODE_TARGET_MODE;
- }
- static inline bool IsRelativeCodeTarget(Mode mode) {
- return mode == RELATIVE_CODE_TARGET;
- }
- static inline bool IsEmbeddedObject(Mode mode) {
- return mode == EMBEDDED_OBJECT;
- }
- static inline bool IsRuntimeEntry(Mode mode) {
- return mode == RUNTIME_ENTRY;
- }
- static inline bool IsWasmCall(Mode mode) { return mode == WASM_CALL; }
- static inline bool IsWasmStubCall(Mode mode) {
- return mode == WASM_STUB_CALL;
- }
- static inline bool IsComment(Mode mode) {
- return mode == COMMENT;
- }
- static inline bool IsConstPool(Mode mode) {
- return mode == CONST_POOL;
- }
- static inline bool IsVeneerPool(Mode mode) {
- return mode == VENEER_POOL;
- }
- static inline bool IsDeoptPosition(Mode mode) {
- return mode == DEOPT_SCRIPT_OFFSET || mode == DEOPT_INLINING_ID;
- }
- static inline bool IsDeoptReason(Mode mode) {
- return mode == DEOPT_REASON;
- }
- static inline bool IsDeoptId(Mode mode) {
- return mode == DEOPT_ID;
- }
- static inline bool IsExternalReference(Mode mode) {
- return mode == EXTERNAL_REFERENCE;
- }
- static inline bool IsInternalReference(Mode mode) {
- return mode == INTERNAL_REFERENCE;
- }
- static inline bool IsInternalReferenceEncoded(Mode mode) {
- return mode == INTERNAL_REFERENCE_ENCODED;
- }
- static inline bool IsOffHeapTarget(Mode mode) {
- return mode == OFF_HEAP_TARGET;
- }
- static inline bool IsNone(Mode mode) { return mode == NONE; }
- static inline bool IsWasmReference(Mode mode) {
- return IsWasmPtrReference(mode);
- }
- static inline bool IsWasmPtrReference(Mode mode) {
- return mode == WASM_CALL || mode == JS_TO_WASM_CALL;
- }
-
- static inline bool IsOnlyForSerializer(Mode mode) {
- return mode == EXTERNAL_REFERENCE || mode == OFF_HEAP_TARGET;
- }
-
- static constexpr int ModeMask(Mode mode) { return 1 << mode; }
-
- // Accessors
- Address pc() const { return pc_; }
- Mode rmode() const { return rmode_; }
- intptr_t data() const { return data_; }
- Code* host() const { return host_; }
- Address constant_pool() const { return constant_pool_; }
-
- // Apply a relocation by delta bytes. When the code object is moved, PC
- // relative addresses have to be updated as well as absolute addresses
- // inside the code (internal references).
- // Do not forget to flush the icache afterwards!
- V8_INLINE void apply(intptr_t delta);
-
- // Is the pointer this relocation info refers to coded like a plain pointer
- // or is it strange in some way (e.g. relative or patched into a series of
- // instructions).
- bool IsCodedSpecially();
-
- // The static pendant to IsCodedSpecially, just for off-heap targets. Used
- // during deserialization, when we don't actually have a RelocInfo handy.
- static bool OffHeapTargetIsCodedSpecially();
-
- // If true, the pointer this relocation info refers to is an entry in the
- // constant pool, otherwise the pointer is embedded in the instruction stream.
- bool IsInConstantPool();
-
- // Returns the deoptimization id for the entry associated with the reloc info
- // where {kind} is the deoptimization kind.
- // This is only used for printing RUNTIME_ENTRY relocation info.
- int GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind);
-
- Address wasm_call_address() const;
- Address wasm_stub_call_address() const;
- Address js_to_wasm_address() const;
-
- uint32_t wasm_call_tag() const;
-
- void set_wasm_call_address(
- Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- void set_wasm_stub_call_address(
- Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- void set_js_to_wasm_address(
- Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
-
- void set_target_address(
- Address target,
- WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
-
- // this relocation applies to;
- // can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
- V8_INLINE Address target_address();
- V8_INLINE HeapObject* target_object();
- V8_INLINE Handle<HeapObject> target_object_handle(Assembler* origin);
- V8_INLINE void set_target_object(
- Heap* heap, HeapObject* target,
- WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- V8_INLINE Address target_runtime_entry(Assembler* origin);
- V8_INLINE void set_target_runtime_entry(
- Address target,
- WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- V8_INLINE Address target_off_heap_target();
- V8_INLINE Cell* target_cell();
- V8_INLINE Handle<Cell> target_cell_handle();
- V8_INLINE void set_target_cell(
- Cell* cell, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- V8_INLINE void set_target_external_reference(
- Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
-
- // Returns the address of the constant pool entry where the target address
- // is held. This should only be called if IsInConstantPool returns true.
- V8_INLINE Address constant_pool_entry_address();
-
- // Read the address of the word containing the target_address in an
- // instruction stream. What this means exactly is architecture-independent.
- // The only architecture-independent user of this function is the serializer.
- // The serializer uses it to find out how many raw bytes of instruction to
- // output before the next target. Architecture-independent code shouldn't
- // dereference the pointer it gets back from this.
- V8_INLINE Address target_address_address();
-
- // This indicates how much space a target takes up when deserializing a code
- // stream. For most architectures this is just the size of a pointer. For
- // an instruction like movw/movt where the target bits are mixed into the
- // instruction bits the size of the target will be zero, indicating that the
- // serializer should not step forwards in memory after a target is resolved
- // and written. In this case the target_address_address function above
- // should return the end of the instructions to be patched, allowing the
- // deserializer to deserialize the instructions as raw bytes and put them in
- // place, ready to be patched with the target.
- V8_INLINE int target_address_size();
-
- // Read the reference in the instruction this relocation
- // applies to; can only be called if rmode_ is EXTERNAL_REFERENCE.
- V8_INLINE Address target_external_reference();
-
- // Read the reference in the instruction this relocation
- // applies to; can only be called if rmode_ is INTERNAL_REFERENCE.
- V8_INLINE Address target_internal_reference();
-
- // Return the reference address this relocation applies to;
- // can only be called if rmode_ is INTERNAL_REFERENCE.
- V8_INLINE Address target_internal_reference_address();
-
- // Wipe out a relocation to a fixed value, used for making snapshots
- // reproducible.
- V8_INLINE void WipeOut();
-
- template <typename ObjectVisitor>
- inline void Visit(ObjectVisitor* v);
-
-#ifdef DEBUG
- // Check whether the given code contains relocation information that
- // either is position-relative or movable by the garbage collector.
- static bool RequiresRelocation(const CodeDesc& desc);
-#endif
-
-#ifdef ENABLE_DISASSEMBLER
- // Printing
- static const char* RelocModeName(Mode rmode);
- void Print(Isolate* isolate, std::ostream& os); // NOLINT
-#endif // ENABLE_DISASSEMBLER
-#ifdef VERIFY_HEAP
- void Verify(Isolate* isolate);
-#endif
-
- static const int kApplyMask; // Modes affected by apply. Depends on arch.
-
- private:
- // On ARM/ARM64, note that pc_ is the address of the instruction referencing
- // the constant pool and not the address of the constant pool entry.
- Address pc_;
- Mode rmode_;
- intptr_t data_ = 0;
- Code* host_;
- Address constant_pool_ = kNullAddress;
- friend class RelocIterator;
-};
-
-
-// RelocInfoWriter serializes a stream of relocation info. It writes towards
-// lower addresses.
-class RelocInfoWriter BASE_EMBEDDED {
- public:
- RelocInfoWriter() : pos_(nullptr), last_pc_(nullptr) {}
-
- byte* pos() const { return pos_; }
- byte* last_pc() const { return last_pc_; }
-
- void Write(const RelocInfo* rinfo);
-
- // Update the state of the stream after reloc info buffer
- // and/or code is moved while the stream is active.
- void Reposition(byte* pos, byte* pc) {
- pos_ = pos;
- last_pc_ = pc;
- }
-
- // Max size (bytes) of a written RelocInfo. Longest encoding is
- // ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, data_delta.
- static constexpr int kMaxSize = 1 + 4 + 1 + 1 + kPointerSize;
-
- private:
- inline uint32_t WriteLongPCJump(uint32_t pc_delta);
-
- inline void WriteShortTaggedPC(uint32_t pc_delta, int tag);
- inline void WriteShortData(intptr_t data_delta);
-
- inline void WriteMode(RelocInfo::Mode rmode);
- inline void WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode);
- inline void WriteIntData(int data_delta);
- inline void WriteData(intptr_t data_delta);
-
- byte* pos_;
- byte* last_pc_;
-
- DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
-};
-
-
-// A RelocIterator iterates over relocation information.
-// Typical use:
-//
-// for (RelocIterator it(code); !it.done(); it.next()) {
-// // do something with it.rinfo() here
-// }
-//
-// A mask can be specified to skip unwanted modes.
-class RelocIterator: public Malloced {
- public:
- // Create a new iterator positioned at
- // the beginning of the reloc info.
- // Relocation information with mode k is included in the
- // iteration iff bit k of mode_mask is set.
- explicit RelocIterator(Code* code, int mode_mask = -1);
- explicit RelocIterator(EmbeddedData* embedded_data, Code* code,
- int mode_mask);
- explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
- explicit RelocIterator(const CodeReference code_reference,
- int mode_mask = -1);
- explicit RelocIterator(Vector<byte> instructions,
- Vector<const byte> reloc_info, Address const_pool,
- int mode_mask = -1);
- RelocIterator(RelocIterator&&) = default;
- RelocIterator& operator=(RelocIterator&&) = default;
-
- // Iteration
- bool done() const { return done_; }
- void next();
-
- // Return pointer valid until next next().
- RelocInfo* rinfo() {
- DCHECK(!done());
- return &rinfo_;
- }
-
- private:
- RelocIterator(Code* host, Address pc, Address constant_pool, const byte* pos,
- const byte* end, int mode_mask);
-
- // Advance* moves the position before/after reading.
- // *Read* reads from current byte(s) into rinfo_.
- // *Get* just reads and returns info on current byte.
- void Advance(int bytes = 1) { pos_ -= bytes; }
- int AdvanceGetTag();
- RelocInfo::Mode GetMode();
-
- void AdvanceReadLongPCJump();
-
- void ReadShortTaggedPC();
- void ReadShortData();
-
- void AdvanceReadPC();
- void AdvanceReadInt();
- void AdvanceReadData();
-
- // If the given mode is wanted, set it in rinfo_ and return true.
- // Else return false. Used for efficiently skipping unwanted modes.
- bool SetMode(RelocInfo::Mode mode) {
- return (mode_mask_ & (1 << mode)) ? (rinfo_.rmode_ = mode, true) : false;
- }
-
- const byte* pos_;
- const byte* end_;
- RelocInfo rinfo_;
- bool done_ = false;
- const int mode_mask_;
-
- DISALLOW_COPY_AND_ASSIGN(RelocIterator);
-};
-
// -----------------------------------------------------------------------------
// Utility functions
diff --git a/deps/v8/src/assert-scope.h b/deps/v8/src/assert-scope.h
index 981a037f13..acf7649792 100644
--- a/deps/v8/src/assert-scope.h
+++ b/deps/v8/src/assert-scope.h
@@ -139,6 +139,12 @@ typedef PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, false>
typedef PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, true>
AllowCodeDependencyChange;
+class DisallowHeapAccess {
+ DisallowHeapAllocation no_heap_allocation_;
+ DisallowHandleAllocation no_handle_allocation_;
+ DisallowHandleDereference no_handle_dereference_;
+ DisallowCodeDependencyChange no_dependency_change_;
+};
// Per-isolate assert scopes.
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index fc8be819f6..8cf81b24a5 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -27,7 +27,6 @@
#include "src/ast/ast-value-factory.h"
-#include "src/api.h"
#include "src/char-predicates-inl.h"
#include "src/objects-inl.h"
#include "src/objects.h"
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 5a2346ad9f..6c1e989d30 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -249,7 +249,23 @@ class Expression : public AstNode {
static const uint8_t kNextBitFieldIndex = AstNode::kNextBitFieldIndex;
};
-
+// V8's notion of BreakableStatement does not correspond to the notion of
+// BreakableStatement in ECMAScript. In V8, the idea is that a
+// BreakableStatement is a statement that can be the target of a break
+// statement. The BreakableStatement AST node carries a list of labels, any of
+// which can be used as an argument to the break statement in order to target
+// it.
+//
+// Since we don't want to attach a list of labels to all kinds of statements, we
+// only declare switchs, loops, and blocks as BreakableStatements. This means
+// that we implement breaks targeting other statement forms as breaks targeting
+// a substatement thereof. For instance, in "foo: if (b) { f(); break foo; }" we
+// pretend that foo is the label of the inner block. That's okay because one
+// can't observe the difference.
+//
+// This optimization makes it harder to detect invalid continue labels, see the
+// need for own_labels in IterationStatement.
+//
class BreakableStatement : public Statement {
public:
enum BreakableType {
@@ -257,6 +273,13 @@ class BreakableStatement : public Statement {
TARGET_FOR_NAMED_ONLY
};
+ // A list of all labels declared on the path up to the previous
+ // BreakableStatement (if any).
+ //
+ // Example: "l1: for (;;) l2: l3: { l4: if (b) l5: { s } }"
+ // labels() of the ForStatement will be l1.
+ // labels() of the Block { l4: ... } will be l2, l3.
+ // labels() of the Block { s } will be l4, l5.
ZonePtrList<const AstRawString>* labels() const;
// Testers.
@@ -441,11 +464,23 @@ class IterationStatement : public BreakableStatement {
ZonePtrList<const AstRawString>* labels() const { return labels_; }
+ // A list of all labels that the iteration statement is directly prefixed
+ // with, i.e. all the labels that a continue statement in the body can use to
+ // continue this iteration statement. This is always a subset of {labels}.
+ //
+ // Example: "l1: { l2: if (b) l3: l4: for (;;) s }"
+ // labels() of the Block will be l1.
+ // labels() of the ForStatement will be l2, l3, l4.
+ // own_labels() of the ForStatement will be l3, l4.
+ ZonePtrList<const AstRawString>* own_labels() const { return own_labels_; }
+
protected:
- IterationStatement(ZonePtrList<const AstRawString>* labels, int pos,
+ IterationStatement(ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, int pos,
NodeType type)
: BreakableStatement(TARGET_FOR_ANONYMOUS, pos, type),
labels_(labels),
+ own_labels_(own_labels),
body_(nullptr) {}
void Initialize(Statement* body) { body_ = body; }
@@ -454,6 +489,7 @@ class IterationStatement : public BreakableStatement {
private:
ZonePtrList<const AstRawString>* labels_;
+ ZonePtrList<const AstRawString>* own_labels_;
Statement* body_;
};
@@ -470,8 +506,10 @@ class DoWhileStatement final : public IterationStatement {
private:
friend class AstNodeFactory;
- DoWhileStatement(ZonePtrList<const AstRawString>* labels, int pos)
- : IterationStatement(labels, pos, kDoWhileStatement), cond_(nullptr) {}
+ DoWhileStatement(ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, int pos)
+ : IterationStatement(labels, own_labels, pos, kDoWhileStatement),
+ cond_(nullptr) {}
Expression* cond_;
};
@@ -489,8 +527,10 @@ class WhileStatement final : public IterationStatement {
private:
friend class AstNodeFactory;
- WhileStatement(ZonePtrList<const AstRawString>* labels, int pos)
- : IterationStatement(labels, pos, kWhileStatement), cond_(nullptr) {}
+ WhileStatement(ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, int pos)
+ : IterationStatement(labels, own_labels, pos, kWhileStatement),
+ cond_(nullptr) {}
Expression* cond_;
};
@@ -513,8 +553,9 @@ class ForStatement final : public IterationStatement {
private:
friend class AstNodeFactory;
- ForStatement(ZonePtrList<const AstRawString>* labels, int pos)
- : IterationStatement(labels, pos, kForStatement),
+ ForStatement(ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, int pos)
+ : IterationStatement(labels, own_labels, pos, kForStatement),
init_(nullptr),
cond_(nullptr),
next_(nullptr) {}
@@ -539,9 +580,10 @@ class ForEachStatement : public IterationStatement {
}
protected:
- ForEachStatement(ZonePtrList<const AstRawString>* labels, int pos,
+ ForEachStatement(ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, int pos,
NodeType type)
- : IterationStatement(labels, pos, type) {}
+ : IterationStatement(labels, own_labels, pos, type) {}
};
@@ -566,8 +608,9 @@ class ForInStatement final : public ForEachStatement {
private:
friend class AstNodeFactory;
- ForInStatement(ZonePtrList<const AstRawString>* labels, int pos)
- : ForEachStatement(labels, pos, kForInStatement),
+ ForInStatement(ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, int pos)
+ : ForEachStatement(labels, own_labels, pos, kForInStatement),
each_(nullptr),
subject_(nullptr) {
bit_field_ = ForInTypeField::update(bit_field_, SLOW_FOR_IN);
@@ -632,8 +675,9 @@ class ForOfStatement final : public ForEachStatement {
private:
friend class AstNodeFactory;
- ForOfStatement(ZonePtrList<const AstRawString>* labels, int pos)
- : ForEachStatement(labels, pos, kForOfStatement),
+ ForOfStatement(ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, int pos)
+ : ForEachStatement(labels, own_labels, pos, kForOfStatement),
iterator_(nullptr),
assign_iterator_(nullptr),
next_result_(nullptr),
@@ -2201,6 +2245,12 @@ class FunctionLiteral final : public Expression {
bool is_anonymous_expression() const {
return function_type() == kAnonymousExpression;
}
+
+ void mark_as_iife() { bit_field_ = IIFEBit::update(bit_field_, true); }
+ bool is_iife() const { return IIFEBit::decode(bit_field_); }
+ bool is_top_level() const {
+ return function_literal_id() == FunctionLiteral::kIdTypeTopLevel;
+ }
bool is_wrapped() const { return function_type() == kWrapped; }
LanguageMode language_mode() const;
@@ -2333,7 +2383,7 @@ class FunctionLiteral final : public Expression {
kHasDuplicateParameters) |
DontOptimizeReasonField::encode(BailoutReason::kNoReason) |
RequiresInstanceFieldsInitializer::encode(false) |
- HasBracesField::encode(has_braces);
+ HasBracesField::encode(has_braces) | IIFEBit::encode(false);
if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
DCHECK_EQ(body == nullptr, expected_property_count < 0);
}
@@ -2348,6 +2398,7 @@ class FunctionLiteral final : public Expression {
: public BitField<bool, DontOptimizeReasonField::kNext, 1> {};
class HasBracesField
: public BitField<bool, RequiresInstanceFieldsInitializer::kNext, 1> {};
+ class IIFEBit : public BitField<bool, HasBracesField::kNext, 1> {};
int expected_property_count_;
int parameter_count_;
@@ -2803,9 +2854,11 @@ class AstNodeFactory final BASE_EMBEDDED {
Block(zone_, labels, capacity, ignore_completion_value);
}
-#define STATEMENT_WITH_LABELS(NodeType) \
- NodeType* New##NodeType(ZonePtrList<const AstRawString>* labels, int pos) { \
- return new (zone_) NodeType(labels, pos); \
+#define STATEMENT_WITH_LABELS(NodeType) \
+ NodeType* New##NodeType(ZonePtrList<const AstRawString>* labels, \
+ ZonePtrList<const AstRawString>* own_labels, \
+ int pos) { \
+ return new (zone_) NodeType(labels, own_labels, pos); \
}
STATEMENT_WITH_LABELS(DoWhileStatement)
STATEMENT_WITH_LABELS(WhileStatement)
@@ -2817,23 +2870,25 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) SwitchStatement(zone_, labels, tag, pos);
}
- ForEachStatement* NewForEachStatement(ForEachStatement::VisitMode visit_mode,
- ZonePtrList<const AstRawString>* labels,
- int pos) {
+ ForEachStatement* NewForEachStatement(
+ ForEachStatement::VisitMode visit_mode,
+ ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, int pos) {
switch (visit_mode) {
case ForEachStatement::ENUMERATE: {
- return new (zone_) ForInStatement(labels, pos);
+ return new (zone_) ForInStatement(labels, own_labels, pos);
}
case ForEachStatement::ITERATE: {
- return new (zone_) ForOfStatement(labels, pos);
+ return new (zone_) ForOfStatement(labels, own_labels, pos);
}
}
UNREACHABLE();
}
ForOfStatement* NewForOfStatement(ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels,
int pos) {
- return new (zone_) ForOfStatement(labels, pos);
+ return new (zone_) ForOfStatement(labels, own_labels, pos);
}
ExpressionStatement* NewExpressionStatement(Expression* expression, int pos) {
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index ef086bcefc..d2e56a9335 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -11,12 +11,13 @@
#include "src/base/platform/platform.h"
#include "src/globals.h"
#include "src/objects-inl.h"
+#include "src/string-builder-inl.h"
namespace v8 {
namespace internal {
CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js)
- : builder_(isolate) {
+ : builder_(new IncrementalStringBuilder(isolate)) {
isolate_ = isolate;
position_ = 0;
num_prints_ = 0;
@@ -30,6 +31,8 @@ CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js)
InitializeAstVisitor(isolate);
}
+CallPrinter::~CallPrinter() {}
+
CallPrinter::ErrorHint CallPrinter::GetErrorHint() const {
if (is_call_error_) {
if (is_iterator_error_) return ErrorHint::kCallAndNormalIterator;
@@ -45,7 +48,7 @@ Handle<String> CallPrinter::Print(FunctionLiteral* program, int position) {
num_prints_ = 0;
position_ = position;
Find(program);
- return builder_.Finish().ToHandleChecked();
+ return builder_->Finish().ToHandleChecked();
}
@@ -65,13 +68,13 @@ void CallPrinter::Find(AstNode* node, bool print) {
void CallPrinter::Print(const char* str) {
if (!found_ || done_) return;
num_prints_++;
- builder_.AppendCString(str);
+ builder_->AppendCString(str);
}
void CallPrinter::Print(Handle<String> str) {
if (!found_ || done_) return;
num_prints_++;
- builder_.AppendString(str);
+ builder_->AppendString(str);
}
void CallPrinter::VisitBlock(Block* node) {
@@ -746,17 +749,21 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info, Variable* var,
}
}
-void AstPrinter::PrintLabelsIndented(ZonePtrList<const AstRawString>* labels) {
+void AstPrinter::PrintLabelsIndented(ZonePtrList<const AstRawString>* labels,
+ const char* prefix) {
if (labels == nullptr || labels->length() == 0) return;
- PrintIndented("LABELS ");
+ PrintIndented(prefix);
+ Print("LABELS ");
PrintLabels(labels);
Print("\n");
}
void AstPrinter::PrintIndentedVisit(const char* s, AstNode* node) {
- IndentedScope indent(this, s, node->position());
- Visit(node);
+ if (node != nullptr) {
+ IndentedScope indent(this, s, node->position());
+ Visit(node);
+ }
}
@@ -823,6 +830,7 @@ void AstPrinter::VisitBlock(Block* node) {
const char* block_txt =
node->ignore_completion_value() ? "BLOCK NOCOMPLETIONS" : "BLOCK";
IndentedScope indent(this, block_txt, node->position());
+ PrintLabelsIndented(node->labels());
PrintStatements(node->statements());
}
@@ -916,6 +924,7 @@ void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
IndentedScope indent(this, "DO", node->position());
PrintLabelsIndented(node->labels());
+ PrintLabelsIndented(node->own_labels(), "OWN ");
PrintIndentedVisit("BODY", node->body());
PrintIndentedVisit("COND", node->cond());
}
@@ -924,6 +933,7 @@ void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
void AstPrinter::VisitWhileStatement(WhileStatement* node) {
IndentedScope indent(this, "WHILE", node->position());
PrintLabelsIndented(node->labels());
+ PrintLabelsIndented(node->own_labels(), "OWN ");
PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body());
}
@@ -932,6 +942,7 @@ void AstPrinter::VisitWhileStatement(WhileStatement* node) {
void AstPrinter::VisitForStatement(ForStatement* node) {
IndentedScope indent(this, "FOR", node->position());
PrintLabelsIndented(node->labels());
+ PrintLabelsIndented(node->own_labels(), "OWN ");
if (node->init()) PrintIndentedVisit("INIT", node->init());
if (node->cond()) PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body());
@@ -941,6 +952,8 @@ void AstPrinter::VisitForStatement(ForStatement* node) {
void AstPrinter::VisitForInStatement(ForInStatement* node) {
IndentedScope indent(this, "FOR IN", node->position());
+ PrintLabelsIndented(node->labels());
+ PrintLabelsIndented(node->own_labels(), "OWN ");
PrintIndentedVisit("FOR", node->each());
PrintIndentedVisit("IN", node->enumerable());
PrintIndentedVisit("BODY", node->body());
@@ -949,6 +962,8 @@ void AstPrinter::VisitForInStatement(ForInStatement* node) {
void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
IndentedScope indent(this, "FOR OF", node->position());
+ PrintLabelsIndented(node->labels());
+ PrintLabelsIndented(node->own_labels(), "OWN ");
PrintIndentedVisit("INIT", node->assign_iterator());
PrintIndentedVisit("NEXT", node->next_result());
PrintIndentedVisit("DONE", node->result_done());
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index cc29052c2d..71019fe264 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -8,14 +8,16 @@
#include "src/allocation.h"
#include "src/ast/ast.h"
#include "src/base/compiler-specific.h"
-#include "src/string-builder.h"
namespace v8 {
namespace internal {
+class IncrementalStringBuilder; // to avoid including string-builder-inl.h
+
class CallPrinter final : public AstVisitor<CallPrinter> {
public:
explicit CallPrinter(Isolate* isolate, bool is_user_js);
+ ~CallPrinter();
// The following routine prints the node with position |position| into a
// string.
@@ -42,7 +44,8 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
Isolate* isolate_;
int num_prints_;
- IncrementalStringBuilder builder_;
+ // Allocate the builder on the heap simply because it's forward declared.
+ std::unique_ptr<IncrementalStringBuilder> builder_;
int position_; // position of ast node to print
bool found_;
bool done_;
@@ -107,7 +110,8 @@ class AstPrinter final : public AstVisitor<AstPrinter> {
bool quote);
void PrintLiteralWithModeIndented(const char* info, Variable* var,
const AstRawString* value);
- void PrintLabelsIndented(ZonePtrList<const AstRawString>* labels);
+ void PrintLabelsIndented(ZonePtrList<const AstRawString>* labels,
+ const char* prefix = "");
void PrintObjectProperties(ZonePtrList<ObjectLiteral::Property>* properties);
void PrintClassProperties(ZonePtrList<ClassLiteral::Property>* properties);
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 18db88f950..74d50c44de 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -2178,25 +2178,16 @@ void Scope::AllocateHeapSlot(Variable* var) {
void DeclarationScope::AllocateParameterLocals() {
DCHECK(is_function_scope());
- bool uses_sloppy_arguments = false;
-
+ bool has_mapped_arguments = false;
if (arguments_ != nullptr) {
DCHECK(!is_arrow_scope());
- // 'arguments' is used. Unless there is also a parameter called
- // 'arguments', we must be conservative and allocate all parameters to
- // the context assuming they will be captured by the arguments object.
- // If we have a parameter named 'arguments', a (new) value is always
- // assigned to it via the function invocation. Then 'arguments' denotes
- // that specific parameter value and cannot be used to access the
- // parameters, which is why we don't need to allocate an arguments
- // object in that case.
if (MustAllocate(arguments_) && !has_arguments_parameter_) {
- // In strict mode 'arguments' does not alias formal parameters.
- // Therefore in strict mode we allocate parameters as if 'arguments'
- // were not used.
- // If the parameter list is not simple, arguments isn't sloppy either.
- uses_sloppy_arguments =
- is_sloppy(language_mode()) && has_simple_parameters();
+ // 'arguments' is used and does not refer to a function
+ // parameter of the same name. If the arguments object
+ // aliases formal parameters, we conservatively allocate
+ // them specially in the loop below.
+ has_mapped_arguments =
+ GetArgumentsType() == CreateArgumentsType::kMappedArguments;
} else {
// 'arguments' is unused. Tell the code generator that it does not need to
// allocate the arguments object by nulling out arguments_.
@@ -2212,7 +2203,7 @@ void DeclarationScope::AllocateParameterLocals() {
Variable* var = params_[i];
DCHECK(!has_rest_ || var != rest_parameter());
DCHECK_EQ(this, var->scope());
- if (uses_sloppy_arguments) {
+ if (has_mapped_arguments) {
var->set_is_used();
var->set_maybe_assigned();
var->ForceContextAllocation();
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 5618adee9e..f43761af58 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -804,6 +804,16 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
has_simple_parameters_ = false;
}
+ // Returns whether the arguments object aliases formal parameters.
+ CreateArgumentsType GetArgumentsType() const {
+ DCHECK(is_function_scope());
+ DCHECK(!is_arrow_scope());
+ DCHECK_NOT_NULL(arguments_);
+ return is_sloppy(language_mode()) && has_simple_parameters()
+ ? CreateArgumentsType::kMappedArguments
+ : CreateArgumentsType::kUnmappedArguments;
+ }
+
// The local variable 'arguments' if we need to allocate it; nullptr
// otherwise.
Variable* arguments() const {
diff --git a/deps/v8/src/async-hooks-wrapper.cc b/deps/v8/src/async-hooks-wrapper.cc
index cc080d9cfc..fd724af9c9 100644
--- a/deps/v8/src/async-hooks-wrapper.cc
+++ b/deps/v8/src/async-hooks-wrapper.cc
@@ -4,6 +4,7 @@
#include "src/async-hooks-wrapper.h"
#include "src/d8.h"
+#include "src/isolate-inl.h"
namespace v8 {
@@ -42,6 +43,18 @@ static AsyncHooksWrap* UnwrapHook(
Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
Local<Object> hook = args.This();
+
+ AsyncHooks* hooks = PerIsolateData::Get(isolate)->GetAsyncHooks();
+
+ if (!hooks->async_hook_ctor.Get(isolate)->HasInstance(hook)) {
+ isolate->ThrowException(
+ String::NewFromUtf8(
+ isolate, "Invalid 'this' passed instead of AsyncHooks instance",
+ NewStringType::kNormal)
+ .ToLocalChecked());
+ return nullptr;
+ }
+
Local<External> wrap = Local<External>::Cast(hook->GetInternalField(0));
void* ptr = wrap->Value();
return static_cast<AsyncHooksWrap*>(ptr);
@@ -49,12 +62,16 @@ static AsyncHooksWrap* UnwrapHook(
static void EnableHook(const v8::FunctionCallbackInfo<v8::Value>& args) {
AsyncHooksWrap* wrap = UnwrapHook(args);
- wrap->Enable();
+ if (wrap) {
+ wrap->Enable();
+ }
}
static void DisableHook(const v8::FunctionCallbackInfo<v8::Value>& args) {
AsyncHooksWrap* wrap = UnwrapHook(args);
- wrap->Disable();
+ if (wrap) {
+ wrap->Disable();
+ }
}
async_id_t AsyncHooks::GetExecutionAsyncId() const {
@@ -182,10 +199,12 @@ void AsyncHooks::Initialize() {
async_hook_ctor.Get(isolate_)->InstanceTemplate());
async_hooks_templ.Get(isolate_)->SetInternalFieldCount(1);
async_hooks_templ.Get(isolate_)->Set(
- String::NewFromUtf8(isolate_, "enable"),
+ String::NewFromUtf8(isolate_, "enable", v8::NewStringType::kNormal)
+ .ToLocalChecked(),
FunctionTemplate::New(isolate_, EnableHook));
async_hooks_templ.Get(isolate_)->Set(
- String::NewFromUtf8(isolate_, "disable"),
+ String::NewFromUtf8(isolate_, "disable", v8::NewStringType::kNormal)
+ .ToLocalChecked(),
FunctionTemplate::New(isolate_, DisableHook));
async_id_smb.Reset(isolate_, Private::New(isolate_));
@@ -214,14 +233,26 @@ void AsyncHooks::PromiseHookDispatch(PromiseHookType type,
TryCatch try_catch(hooks->isolate_);
try_catch.SetVerbose(true);
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(hooks->isolate_);
+ if (isolate->has_scheduled_exception()) {
+ isolate->ScheduleThrow(isolate->scheduled_exception());
+
+ DCHECK(try_catch.HasCaught());
+ Shell::ReportException(hooks->isolate_, &try_catch);
+ return;
+ }
+
Local<Value> rcv = Undefined(hooks->isolate_);
+ Local<Context> context = hooks->isolate_->GetCurrentContext();
Local<Value> async_id =
- promise
- ->GetPrivate(hooks->isolate_->GetCurrentContext(),
- hooks->async_id_smb.Get(hooks->isolate_))
+ promise->GetPrivate(context, hooks->async_id_smb.Get(hooks->isolate_))
.ToLocalChecked();
Local<Value> args[1] = {async_id};
+ // This is unused. It's here to silence the warning about
+ // not using the MaybeLocal return value from Call.
+ MaybeLocal<Value> result;
+
// Sacrifice the brevity for readability and debugfulness
if (type == PromiseHookType::kInit) {
if (!wrap->init_function().IsEmpty()) {
@@ -231,23 +262,22 @@ void AsyncHooks::PromiseHookDispatch(PromiseHookType type,
NewStringType::kNormal)
.ToLocalChecked(),
promise
- ->GetPrivate(hooks->isolate_->GetCurrentContext(),
- hooks->trigger_id_smb.Get(hooks->isolate_))
+ ->GetPrivate(context, hooks->trigger_id_smb.Get(hooks->isolate_))
.ToLocalChecked(),
promise};
- wrap->init_function()->Call(rcv, 4, initArgs);
+ result = wrap->init_function()->Call(context, rcv, 4, initArgs);
}
} else if (type == PromiseHookType::kBefore) {
if (!wrap->before_function().IsEmpty()) {
- wrap->before_function()->Call(rcv, 1, args);
+ result = wrap->before_function()->Call(context, rcv, 1, args);
}
} else if (type == PromiseHookType::kAfter) {
if (!wrap->after_function().IsEmpty()) {
- wrap->after_function()->Call(rcv, 1, args);
+ result = wrap->after_function()->Call(context, rcv, 1, args);
}
} else if (type == PromiseHookType::kResolve) {
if (!wrap->promiseResolve_function().IsEmpty()) {
- wrap->promiseResolve_function()->Call(rcv, 1, args);
+ result = wrap->promiseResolve_function()->Call(context, rcv, 1, args);
}
}
diff --git a/deps/v8/src/async-hooks-wrapper.h b/deps/v8/src/async-hooks-wrapper.h
index c0c72373e0..68aafa5225 100644
--- a/deps/v8/src/async-hooks-wrapper.h
+++ b/deps/v8/src/async-hooks-wrapper.h
@@ -69,10 +69,11 @@ class AsyncHooks {
Local<Object> CreateHook(const v8::FunctionCallbackInfo<v8::Value>& args);
+ Persistent<FunctionTemplate> async_hook_ctor;
+
private:
std::vector<AsyncHooksWrap*> async_wraps_;
Isolate* isolate_;
- Persistent<FunctionTemplate> async_hook_ctor;
Persistent<ObjectTemplate> async_hooks_templ;
Persistent<Private> async_id_smb;
Persistent<Private> trigger_id_smb;
diff --git a/deps/v8/src/bailout-reason.cc b/deps/v8/src/bailout-reason.cc
index 7cf983861c..54b3dbda54 100644
--- a/deps/v8/src/bailout-reason.cc
+++ b/deps/v8/src/bailout-reason.cc
@@ -26,6 +26,11 @@ const char* GetAbortReason(AbortReason reason) {
return error_messages_[static_cast<int>(reason)];
}
+bool IsValidAbortReason(int reason_id) {
+ return reason_id >= static_cast<int>(AbortReason::kNoReason) &&
+ reason_id < static_cast<int>(AbortReason::kLastErrorMessage);
+}
+
#undef ERROR_MESSAGES_TEXTS
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index d48d696022..78f5665a38 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -5,6 +5,8 @@
#ifndef V8_BAILOUT_REASON_H_
#define V8_BAILOUT_REASON_H_
+#include <cstdint>
+
namespace v8 {
namespace internal {
@@ -114,17 +116,18 @@ namespace internal {
V(kOptimizationDisabledForTest, "Optimization disabled for test")
#define ERROR_MESSAGES_CONSTANTS(C, T) C,
-enum class BailoutReason {
+enum class BailoutReason : uint8_t {
BAILOUT_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
};
-enum class AbortReason {
+enum class AbortReason : uint8_t {
ABORT_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
};
#undef ERROR_MESSAGES_CONSTANTS
const char* GetBailoutReason(BailoutReason reason);
const char* GetAbortReason(AbortReason reason);
+bool IsValidAbortReason(int reason_id);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/base/compiler-specific.h b/deps/v8/src/base/compiler-specific.h
index 75f89298f1..8a5b9e6a60 100644
--- a/deps/v8/src/base/compiler-specific.h
+++ b/deps/v8/src/base/compiler-specific.h
@@ -92,4 +92,15 @@
#endif // V8_CC_MSVC
+// Allowing the use of noexcept by removing the keyword on older compilers that
+// do not support adding noexcept to default members.
+#if ((!defined(V8_CC_GNU) && !defined(V8_TARGET_ARCH_MIPS) && \
+ !defined(V8_TARGET_ARCH_MIPS64) && !defined(V8_TARGET_ARCH_PPC) && \
+ !defined(V8_TARGET_ARCH_PPC64)) || \
+ (defined(__clang__) && __cplusplus > 201300L))
+#define V8_NOEXCEPT noexcept
+#else
+#define V8_NOEXCEPT
+#endif
+
#endif // V8_BASE_COMPILER_SPECIFIC_H_
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 5d10ae4ec0..081018cc2e 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -113,6 +113,12 @@ V8_INLINE Dest bit_cast(Source const& source) {
TypeName(const TypeName&) = delete; \
DISALLOW_ASSIGN(TypeName)
+// Explicitly declare all copy/move constructors and assignments as deleted.
+#define DISALLOW_COPY_AND_MOVE_AND_ASSIGN(TypeName) \
+ TypeName(TypeName&&) = delete; \
+ TypeName& operator=(TypeName&&) = delete; \
+ DISALLOW_COPY_AND_ASSIGN(TypeName)
+
// Explicitly declare all implicit constructors as deleted, namely the
// default constructor, copy constructor and operator= functions.
// This is especially useful for classes containing only static methods.
@@ -128,9 +134,9 @@ V8_INLINE Dest bit_cast(Source const& source) {
// Disallow copying a type, and only provide move construction and move
// assignment. Especially useful for move-only structs.
-#define MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(TypeName) \
- TypeName(TypeName&&) = default; \
- TypeName& operator=(TypeName&&) = default; \
+#define MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(TypeName) \
+ TypeName(TypeName&&) V8_NOEXCEPT = default; \
+ TypeName& operator=(TypeName&&) V8_NOEXCEPT = default; \
DISALLOW_COPY_AND_ASSIGN(TypeName)
// A macro to disallow the dynamic allocation.
@@ -410,4 +416,30 @@ bool is_inbounds(float_t v) {
(kUpperBoundIsMax ? (v <= kUpperBound) : (v < kUpperBound));
}
+#ifdef V8_OS_WIN
+
+// Setup for Windows shared library export.
+#ifdef BUILDING_V8_SHARED
+#define V8_EXPORT_PRIVATE __declspec(dllexport)
+#elif USING_V8_SHARED
+#define V8_EXPORT_PRIVATE __declspec(dllimport)
+#else
+#define V8_EXPORT_PRIVATE
+#endif // BUILDING_V8_SHARED
+
+#else // V8_OS_WIN
+
+// Setup for Linux shared library export.
+#if V8_HAS_ATTRIBUTE_VISIBILITY
+#ifdef BUILDING_V8_SHARED
+#define V8_EXPORT_PRIVATE __attribute__((visibility("default")))
+#else
+#define V8_EXPORT_PRIVATE
+#endif
+#else
+#define V8_EXPORT_PRIVATE
+#endif
+
+#endif // V8_OS_WIN
+
#endif // V8_BASE_MACROS_H_
diff --git a/deps/v8/src/base/optional.h b/deps/v8/src/base/optional.h
index ea32c403ac..6f5276843d 100644
--- a/deps/v8/src/base/optional.h
+++ b/deps/v8/src/base/optional.h
@@ -131,7 +131,7 @@ class Optional {
if (!other.storage_.is_null_) Init(other.value());
}
- Optional(Optional&& other) {
+ Optional(Optional&& other) V8_NOEXCEPT {
if (!other.storage_.is_null_) Init(std::move(other.value()));
}
@@ -164,7 +164,7 @@ class Optional {
return *this;
}
- Optional& operator=(Optional&& other) {
+ Optional& operator=(Optional&& other) V8_NOEXCEPT {
if (other.storage_.is_null_) {
FreeIfNeeded();
return *this;
@@ -182,16 +182,16 @@ class Optional {
return *this;
}
- // TODO(mlamouri): can't use 'constexpr' with DCHECK.
+ // TODO(mlamouri): can't use 'constexpr' with CHECK.
const T* operator->() const {
- DCHECK(!storage_.is_null_);
+ CHECK(!storage_.is_null_);
return &value();
}
// TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
// meant to be 'constexpr const'.
T* operator->() {
- DCHECK(!storage_.is_null_);
+ CHECK(!storage_.is_null_);
return &value();
}
@@ -214,26 +214,26 @@ class Optional {
// TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
// meant to be 'constexpr const'.
T& value() & {
- DCHECK(!storage_.is_null_);
+ CHECK(!storage_.is_null_);
return storage_.value_;
}
- // TODO(mlamouri): can't use 'constexpr' with DCHECK.
+ // TODO(mlamouri): can't use 'constexpr' with CHECK.
const T& value() const & {
- DCHECK(!storage_.is_null_);
+ CHECK(!storage_.is_null_);
return storage_.value_;
}
// TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
// meant to be 'constexpr const'.
T&& value() && {
- DCHECK(!storage_.is_null_);
+ CHECK(!storage_.is_null_);
return std::move(storage_.value_);
}
- // TODO(mlamouri): can't use 'constexpr' with DCHECK.
+ // TODO(mlamouri): can't use 'constexpr' with CHECK.
const T&& value() const && {
- DCHECK(!storage_.is_null_);
+ CHECK(!storage_.is_null_);
return std::move(storage_.value_);
}
@@ -273,7 +273,7 @@ class Optional {
return;
}
- DCHECK(!storage_.is_null_ && !other.storage_.is_null_);
+ CHECK(!storage_.is_null_ && !other.storage_.is_null_);
using std::swap;
swap(**this, *other);
}
@@ -288,20 +288,20 @@ class Optional {
private:
void Init(const T& value) {
- DCHECK(storage_.is_null_);
+ CHECK(storage_.is_null_);
new (&storage_.value_) T(value);
storage_.is_null_ = false;
}
void Init(T&& value) {
- DCHECK(storage_.is_null_);
+ CHECK(storage_.is_null_);
new (&storage_.value_) T(std::move(value));
storage_.is_null_ = false;
}
template <class... Args>
void Init(Args&&... args) {
- DCHECK(storage_.is_null_);
+ CHECK(storage_.is_null_);
new (&storage_.value_) T(std::forward<Args>(args)...);
storage_.is_null_ = false;
}
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index 3a9d65a12d..d1979fb9d8 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -57,8 +57,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
strlen(kVirtualMemoryName));
uintptr_t reservation;
uint32_t prot = GetProtectionFromMemoryPermission(access);
- zx_status_t status = zx_vmar_map(zx_vmar_root_self(), 0, vmo, 0, request_size,
- prot, &reservation);
+ zx_status_t status = zx_vmar_map_old(zx_vmar_root_self(), 0, vmo, 0,
+ request_size, prot, &reservation);
// Either the vmo is now referenced by the vmar, or we failed and are bailing,
// so close the vmo either way.
zx_handle_close(vmo);
@@ -114,9 +114,9 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
uint32_t prot = GetProtectionFromMemoryPermission(access);
- return zx_vmar_protect(zx_vmar_root_self(),
- reinterpret_cast<uintptr_t>(address), size,
- prot) == ZX_OK;
+ return zx_vmar_protect_old(zx_vmar_root_self(),
+ reinterpret_cast<uintptr_t>(address), size,
+ prot) == ZX_OK;
}
// static
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index cf7f3ec9bb..cb25196970 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -248,11 +248,11 @@ void* OS::GetRandomMmapAddr() {
// Use extra address space to isolate the mmap regions.
raw_addr += uint64_t{0x400000000000};
#elif V8_TARGET_BIG_ENDIAN
- // Big-endian Linux: 44 bits of virtual addressing.
+ // Big-endian Linux: 42 bits of virtual addressing.
raw_addr &= uint64_t{0x03FFFFFFF000};
#else
- // Little-endian Linux: 48 bits of virtual addressing.
- raw_addr &= uint64_t{0x3FFFFFFFF000};
+ // Little-endian Linux: 46 bits of virtual addressing.
+ raw_addr &= uint64_t{0x3FFFFFFF0000};
#endif
#elif V8_TARGET_ARCH_MIPS64
// We allocate code in 256 MB aligned segments because of optimizations using
@@ -354,7 +354,8 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
int prot = GetProtectionFromMemoryPermission(access);
int ret = mprotect(address, size, prot);
if (ret == 0 && access == OS::MemoryPermission::kNoAccess) {
- ret = ReclaimInaccessibleMemory(address, size);
+ // This is advisory; ignore errors and continue execution.
+ ReclaimInaccessibleMemory(address, size);
}
// For accounting purposes, we want to call MADV_FREE_REUSE on macOS after
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index f618c65fb6..2e56ac5df1 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -27,6 +27,8 @@
#include "src/base/timezone-cache.h"
#include "src/base/utils/random-number-generator.h"
+#include <VersionHelpers.h>
+
#if defined(_MSC_VER)
#include <crtdbg.h> // NOLINT
#endif // defined(_MSC_VER)
@@ -763,8 +765,12 @@ DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
case OS::MemoryPermission::kReadWrite:
return PAGE_READWRITE;
case OS::MemoryPermission::kReadWriteExecute:
+ if (IsWindows10OrGreater())
+ return PAGE_EXECUTE_READWRITE | PAGE_TARGETS_INVALID;
return PAGE_EXECUTE_READWRITE;
case OS::MemoryPermission::kReadExecute:
+ if (IsWindows10OrGreater())
+ return PAGE_EXECUTE_READ | PAGE_TARGETS_INVALID;
return PAGE_EXECUTE_READ;
}
UNREACHABLE();
diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h
index cbbe7e3cbf..530114a8e2 100644
--- a/deps/v8/src/base/template-utils.h
+++ b/deps/v8/src/base/template-utils.h
@@ -114,6 +114,16 @@ constexpr auto fold(Func func, Ts&&... more) ->
std::forward<Ts>(more)...);
}
+// {is_same<Ts...>::value} is true if all Ts are the same, false otherwise.
+template <typename... Ts>
+struct is_same : public std::false_type {};
+template <>
+struct is_same<> : public std::true_type {};
+template <typename T>
+struct is_same<T> : public std::true_type {};
+template <typename T, typename... Ts>
+struct is_same<T, T, Ts...> : public is_same<T, Ts...> {};
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/basic-block-profiler.cc b/deps/v8/src/basic-block-profiler.cc
index deb1a532d0..eaecd5dc68 100644
--- a/deps/v8/src/basic-block-profiler.cc
+++ b/deps/v8/src/basic-block-profiler.cc
@@ -4,14 +4,28 @@
#include "src/basic-block-profiler.h"
+#include <algorithm>
+#include <numeric>
#include <sstream>
+#include "src/base/lazy-instance.h"
+
namespace v8 {
namespace internal {
-BasicBlockProfiler::Data::Data(size_t n_blocks)
- : n_blocks_(n_blocks), block_ids_(n_blocks_), counts_(n_blocks_, 0) {}
+namespace {
+base::LazyInstance<BasicBlockProfiler>::type kBasicBlockProfiler =
+ LAZY_INSTANCE_INITIALIZER;
+}
+BasicBlockProfiler* BasicBlockProfiler::Get() {
+ return kBasicBlockProfiler.Pointer();
+}
+
+BasicBlockProfiler::Data::Data(size_t n_blocks)
+ : n_blocks_(n_blocks),
+ block_rpo_numbers_(n_blocks_),
+ counts_(n_blocks_, 0) {}
BasicBlockProfiler::Data::~Data() {}
@@ -20,31 +34,31 @@ static void InsertIntoString(std::ostringstream* os, std::string* string) {
string->insert(0, os->str());
}
+static void InsertIntoString(const char* data, std::string* string) {
+ string->insert(0, data);
+}
void BasicBlockProfiler::Data::SetCode(std::ostringstream* os) {
InsertIntoString(os, &code_);
}
-
-void BasicBlockProfiler::Data::SetFunctionName(std::ostringstream* os) {
- InsertIntoString(os, &function_name_);
+void BasicBlockProfiler::Data::SetFunctionName(std::unique_ptr<char[]> name) {
+ InsertIntoString(name.get(), &function_name_);
}
-
void BasicBlockProfiler::Data::SetSchedule(std::ostringstream* os) {
InsertIntoString(os, &schedule_);
}
-
-void BasicBlockProfiler::Data::SetBlockId(size_t offset, size_t block_id) {
+void BasicBlockProfiler::Data::SetBlockRpoNumber(size_t offset,
+ int32_t block_rpo) {
DCHECK(offset < n_blocks_);
- block_ids_[offset] = block_id;
+ block_rpo_numbers_[offset] = block_rpo;
}
-
-uint32_t* BasicBlockProfiler::Data::GetCounterAddress(size_t offset) {
+intptr_t BasicBlockProfiler::Data::GetCounterAddress(size_t offset) {
DCHECK(offset < n_blocks_);
- return &counts_[offset];
+ return reinterpret_cast<intptr_t>(&(counts_[offset]));
}
@@ -59,6 +73,7 @@ BasicBlockProfiler::BasicBlockProfiler() {}
BasicBlockProfiler::Data* BasicBlockProfiler::NewData(size_t n_blocks) {
+ base::LockGuard<base::Mutex> lock(&data_list_mutex_);
Data* data = new Data(n_blocks);
data_list_.push_back(data);
return data;
@@ -91,17 +106,33 @@ std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler& p) {
std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler::Data& d) {
+ int block_count_sum = std::accumulate(d.counts_.begin(), d.counts_.end(), 0);
+ if (block_count_sum == 0) return os;
const char* name = "unknown function";
if (!d.function_name_.empty()) {
name = d.function_name_.c_str();
}
if (!d.schedule_.empty()) {
- os << "schedule for " << name << std::endl;
+ os << "schedule for " << name << " (B0 entered " << d.counts_[0]
+ << " times)" << std::endl;
os << d.schedule_.c_str() << std::endl;
}
os << "block counts for " << name << ":" << std::endl;
+ std::vector<std::pair<int32_t, uint32_t>> pairs;
+ pairs.reserve(d.n_blocks_);
for (size_t i = 0; i < d.n_blocks_; ++i) {
- os << "block " << d.block_ids_[i] << " : " << d.counts_[i] << std::endl;
+ pairs.push_back(std::make_pair(d.block_rpo_numbers_[i], d.counts_[i]));
+ }
+ std::sort(pairs.begin(), pairs.end(),
+ [=](std::pair<int32_t, uint32_t> left,
+ std::pair<int32_t, uint32_t> right) {
+ if (right.second == left.second)
+ return left.first < right.first;
+ return right.second < left.second;
+ });
+ for (auto it : pairs) {
+ if (it.second == 0) break;
+ os << "block B" << it.first << " : " << it.second << std::endl;
}
os << std::endl;
if (!d.code_.empty()) {
diff --git a/deps/v8/src/basic-block-profiler.h b/deps/v8/src/basic-block-profiler.h
index c3c8b649dc..975840e46e 100644
--- a/deps/v8/src/basic-block-profiler.h
+++ b/deps/v8/src/basic-block-profiler.h
@@ -11,6 +11,7 @@
#include <vector>
#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
#include "src/globals.h"
namespace v8 {
@@ -24,10 +25,10 @@ class BasicBlockProfiler {
const uint32_t* counts() const { return &counts_[0]; }
void SetCode(std::ostringstream* os);
- void SetFunctionName(std::ostringstream* os);
+ void SetFunctionName(std::unique_ptr<char[]> name);
void SetSchedule(std::ostringstream* os);
- void SetBlockId(size_t offset, size_t block_id);
- uint32_t* GetCounterAddress(size_t offset);
+ void SetBlockRpoNumber(size_t offset, int32_t block_rpo);
+ intptr_t GetCounterAddress(size_t offset);
private:
friend class BasicBlockProfiler;
@@ -40,7 +41,7 @@ class BasicBlockProfiler {
void ResetCounts();
const size_t n_blocks_;
- std::vector<size_t> block_ids_;
+ std::vector<int32_t> block_rpo_numbers_;
std::vector<uint32_t> counts_;
std::string function_name_;
std::string schedule_;
@@ -53,6 +54,7 @@ class BasicBlockProfiler {
BasicBlockProfiler();
~BasicBlockProfiler();
+ V8_EXPORT_PRIVATE static BasicBlockProfiler* Get();
Data* NewData(size_t n_blocks);
void ResetCounts();
@@ -63,6 +65,7 @@ class BasicBlockProfiler {
std::ostream& os, const BasicBlockProfiler& s);
DataList data_list_;
+ base::Mutex data_list_mutex_;
DISALLOW_COPY_AND_ASSIGN(BasicBlockProfiler);
};
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 656650cd64..30450b133b 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -5,8 +5,8 @@
#include "src/bootstrapper.h"
#include "src/accessors.h"
+#include "src/api-inl.h"
#include "src/api-natives.h"
-#include "src/api.h"
#include "src/base/ieee754.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
@@ -24,11 +24,16 @@
#include "src/objects/hash-table-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
+#include "src/objects/js-collator.h"
+#include "src/objects/js-list-format.h"
#include "src/objects/js-locale.h"
#endif // V8_INTL_SUPPORT
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/js-regexp-string-iterator.h"
#include "src/objects/js-regexp.h"
#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-plural-rules.h"
#include "src/objects/js-relative-time-format.h"
#endif // V8_INTL_SUPPORT
#include "src/objects/templates.h"
@@ -163,7 +168,7 @@ class Genesis BASE_EMBEDDED {
Handle<JSGlobalProxy> global_proxy() { return global_proxy_; }
private:
- Handle<Context> native_context() { return native_context_; }
+ Handle<NativeContext> native_context() { return native_context_; }
// Creates some basic objects. Used for creating a context from scratch.
void CreateRoots();
@@ -292,7 +297,7 @@ class Genesis BASE_EMBEDDED {
Isolate* isolate_;
Handle<Context> result_;
- Handle<Context> native_context_;
+ Handle<NativeContext> native_context_;
Handle<JSGlobalProxy> global_proxy_;
// Temporary function maps needed only during bootstrapping.
@@ -474,10 +479,10 @@ V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
Isolate* isolate, Handle<JSObject> base, Handle<Name> property_name,
Handle<String> function_name, Builtins::Name call, int len, bool adapt,
PropertyAttributes attrs = DONT_ENUM,
- BuiltinFunctionId id = kInvalidBuiltinFunctionId) {
+ BuiltinFunctionId id = BuiltinFunctionId::kInvalidBuiltinFunctionId) {
Handle<JSFunction> fun =
SimpleCreateFunction(isolate, function_name, call, len, adapt);
- if (id != kInvalidBuiltinFunctionId) {
+ if (id != BuiltinFunctionId::kInvalidBuiltinFunctionId) {
fun->shared()->set_builtin_function_id(id);
}
InstallFunction(isolate, base, fun, property_name, attrs);
@@ -488,7 +493,7 @@ V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
Isolate* isolate, Handle<JSObject> base, Handle<String> name,
Builtins::Name call, int len, bool adapt,
PropertyAttributes attrs = DONT_ENUM,
- BuiltinFunctionId id = kInvalidBuiltinFunctionId) {
+ BuiltinFunctionId id = BuiltinFunctionId::kInvalidBuiltinFunctionId) {
return SimpleInstallFunction(isolate, base, name, name, call, len, adapt,
attrs, id);
}
@@ -497,7 +502,7 @@ V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
Isolate* isolate, Handle<JSObject> base, Handle<Name> property_name,
const char* function_name, Builtins::Name call, int len, bool adapt,
PropertyAttributes attrs = DONT_ENUM,
- BuiltinFunctionId id = kInvalidBuiltinFunctionId) {
+ BuiltinFunctionId id = BuiltinFunctionId::kInvalidBuiltinFunctionId) {
// Function name does not have to be internalized.
return SimpleInstallFunction(
isolate, base, property_name,
@@ -509,7 +514,7 @@ V8_NOINLINE Handle<JSFunction> SimpleInstallFunction(
Isolate* isolate, Handle<JSObject> base, const char* name,
Builtins::Name call, int len, bool adapt,
PropertyAttributes attrs = DONT_ENUM,
- BuiltinFunctionId id = kInvalidBuiltinFunctionId) {
+ BuiltinFunctionId id = BuiltinFunctionId::kInvalidBuiltinFunctionId) {
// Although function name does not have to be internalized the property name
// will be internalized during property addition anyway, so do it here now.
return SimpleInstallFunction(isolate, base,
@@ -1104,15 +1109,15 @@ void Genesis::CreateJSProxyMaps() {
Map::EnsureDescriptorSlack(isolate_, map, 2);
{ // proxy
- Descriptor d = Descriptor::DataField(factory()->proxy_string(),
+ Descriptor d = Descriptor::DataField(isolate(), factory()->proxy_string(),
JSProxyRevocableResult::kProxyIndex,
NONE, Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // revoke
- Descriptor d = Descriptor::DataField(factory()->revoke_string(),
- JSProxyRevocableResult::kRevokeIndex,
- NONE, Representation::Tagged());
+ Descriptor d = Descriptor::DataField(
+ isolate(), factory()->revoke_string(),
+ JSProxyRevocableResult::kRevokeIndex, NONE, Representation::Tagged());
map->AppendDescriptor(&d);
}
@@ -1586,7 +1591,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
"[Symbol.hasInstance]", Builtins::kFunctionPrototypeHasInstance, 1,
true,
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY),
- kFunctionHasInstance);
+ BuiltinFunctionId::kFunctionHasInstance);
native_context()->set_function_has_instance(*has_instance);
// Complete setting up function maps.
@@ -1664,7 +1669,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, global, "Array", JS_ARRAY_TYPE, JSArray::kSize, 0,
isolate_->initial_object_prototype(), Builtins::kArrayConstructor);
array_function->shared()->DontAdaptArguments();
- array_function->shared()->set_builtin_function_id(kArrayConstructor);
+ array_function->shared()->set_builtin_function_id(
+ BuiltinFunctionId::kArrayConstructor);
// This seems a bit hackish, but we need to make sure Array.length
// is 1.
@@ -1717,6 +1723,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, proto, "concat", Builtins::kArrayConcat, 1,
false);
+ SimpleInstallFunction(isolate_, proto, "copyWithin",
+ Builtins::kArrayPrototypeCopyWithin, 2, false);
+ SimpleInstallFunction(isolate_, proto, "fill",
+ Builtins::kArrayPrototypeFill, 1, false);
SimpleInstallFunction(isolate_, proto, "find",
Builtins::kArrayPrototypeFind, 1, false);
SimpleInstallFunction(isolate_, proto, "findIndex",
@@ -1725,12 +1735,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
0, false);
SimpleInstallFunction(isolate_, proto, "push",
Builtins::kArrayPrototypePush, 1, false);
+ SimpleInstallFunction(isolate_, proto, "reverse",
+ Builtins::kArrayPrototypeReverse, 0, false);
SimpleInstallFunction(isolate_, proto, "shift",
Builtins::kArrayPrototypeShift, 0, false);
SimpleInstallFunction(isolate_, proto, "unshift", Builtins::kArrayUnshift,
1, false);
SimpleInstallFunction(isolate_, proto, "slice",
Builtins::kArrayPrototypeSlice, 2, false);
+ SimpleInstallFunction(isolate_, proto, "sort",
+ Builtins::kArrayPrototypeSort, 1, false);
if (FLAG_enable_experimental_builtins) {
SimpleInstallFunction(isolate_, proto, "splice",
Builtins::kArraySpliceTorque, 2, false);
@@ -1743,13 +1757,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, proto, "indexOf", Builtins::kArrayIndexOf,
1, false);
SimpleInstallFunction(isolate_, proto, "keys",
- Builtins::kArrayPrototypeKeys, 0, true, kArrayKeys);
+ Builtins::kArrayPrototypeKeys, 0, true,
+ BuiltinFunctionId::kArrayKeys);
SimpleInstallFunction(isolate_, proto, "entries",
Builtins::kArrayPrototypeEntries, 0, true,
- kArrayEntries);
+ BuiltinFunctionId::kArrayEntries);
SimpleInstallFunction(isolate_, proto, factory->iterator_symbol(), "values",
Builtins::kArrayPrototypeValues, 0, true, DONT_ENUM,
- kArrayValues);
+ BuiltinFunctionId::kArrayValues);
SimpleInstallFunction(isolate_, proto, "forEach", Builtins::kArrayForEach,
1, false);
SimpleInstallFunction(isolate_, proto, "filter", Builtins::kArrayFilter, 1,
@@ -1781,7 +1796,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, array_iterator_prototype, "next",
Builtins::kArrayIteratorPrototypeNext, 0, true,
- kArrayIteratorNext);
+ BuiltinFunctionId::kArrayIteratorNext);
Handle<JSFunction> array_iterator_function =
CreateFunction(isolate_, factory->ArrayIterator_string(),
@@ -1799,7 +1814,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> number_fun = InstallFunction(
isolate_, global, "Number", JS_VALUE_TYPE, JSValue::kSize, 0,
isolate_->initial_object_prototype(), Builtins::kNumberConstructor);
- number_fun->shared()->set_builtin_function_id(kNumberConstructor);
+ number_fun->shared()->set_builtin_function_id(
+ BuiltinFunctionId::kNumberConstructor);
number_fun->shared()->DontAdaptArguments();
number_fun->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(isolate_, number_fun,
@@ -1827,7 +1843,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, prototype, "valueOf",
Builtins::kNumberPrototypeValueOf, 0, true);
- // Install Intl fallback functions.
SimpleInstallFunction(isolate_, prototype, "toLocaleString",
Builtins::kNumberPrototypeToLocaleString, 0, false);
@@ -1944,7 +1959,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> string_fun = InstallFunction(
isolate_, global, "String", JS_VALUE_TYPE, JSValue::kSize, 0,
isolate_->initial_object_prototype(), Builtins::kStringConstructor);
- string_fun->shared()->set_builtin_function_id(kStringConstructor);
+ string_fun->shared()->set_builtin_function_id(
+ BuiltinFunctionId::kStringConstructor);
string_fun->shared()->DontAdaptArguments();
string_fun->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(isolate_, string_fun,
@@ -2022,8 +2038,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kStringPrototypeLastIndexOf, 1, false);
SimpleInstallFunction(isolate_, prototype, "link",
Builtins::kStringPrototypeLink, 1, true);
+#ifdef V8_INTL_SUPPORT
+ SimpleInstallFunction(isolate_, prototype, "localeCompare",
+ Builtins::kStringPrototypeLocaleCompare, 1, false);
+#else
SimpleInstallFunction(isolate_, prototype, "localeCompare",
Builtins::kStringPrototypeLocaleCompare, 1, true);
+#endif // V8_INTL_SUPPORT
SimpleInstallFunction(isolate_, prototype, "match",
Builtins::kStringPrototypeMatch, 1, true);
#ifdef V8_INTL_SUPPORT
@@ -2069,18 +2090,18 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kStringPrototypeTrimStart, 0, false);
SimpleInstallFunction(isolate_, prototype, "trimRight",
Builtins::kStringPrototypeTrimEnd, 0, false);
-#ifdef V8_INTL_SUPPORT
- SimpleInstallFunction(isolate_, prototype, "toLowerCase",
- Builtins::kStringPrototypeToLowerCaseIntl, 0, true);
- SimpleInstallFunction(isolate_, prototype, "toUpperCase",
- Builtins::kStringPrototypeToUpperCaseIntl, 0, false);
-#else
SimpleInstallFunction(isolate_, prototype, "toLocaleLowerCase",
Builtins::kStringPrototypeToLocaleLowerCase, 0,
false);
SimpleInstallFunction(isolate_, prototype, "toLocaleUpperCase",
Builtins::kStringPrototypeToLocaleUpperCase, 0,
false);
+#ifdef V8_INTL_SUPPORT
+ SimpleInstallFunction(isolate_, prototype, "toLowerCase",
+ Builtins::kStringPrototypeToLowerCaseIntl, 0, true);
+ SimpleInstallFunction(isolate_, prototype, "toUpperCase",
+ Builtins::kStringPrototypeToUpperCaseIntl, 0, false);
+#else
SimpleInstallFunction(isolate_, prototype, "toLowerCase",
Builtins::kStringPrototypeToLowerCase, 0, false);
SimpleInstallFunction(isolate_, prototype, "toUpperCase",
@@ -2092,7 +2113,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, prototype, factory->iterator_symbol(),
"[Symbol.iterator]",
Builtins::kStringPrototypeIterator, 0, true,
- DONT_ENUM, kStringIterator);
+ DONT_ENUM, BuiltinFunctionId::kStringIterator);
}
{ // --- S t r i n g I t e r a t o r ---
@@ -2110,7 +2131,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, string_iterator_prototype, "next",
Builtins::kStringIteratorPrototypeNext, 0, true,
- kStringIteratorNext);
+ BuiltinFunctionId::kStringIteratorNext);
Handle<JSFunction> string_iterator_function = CreateFunction(
isolate_, factory->NewStringFromAsciiChecked("StringIterator"),
@@ -2125,7 +2146,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> symbol_fun = InstallFunction(
isolate_, global, "Symbol", JS_VALUE_TYPE, JSValue::kSize, 0,
factory->the_hole_value(), Builtins::kSymbolConstructor);
- symbol_fun->shared()->set_builtin_function_id(kSymbolConstructor);
+ symbol_fun->shared()->set_builtin_function_id(
+ BuiltinFunctionId::kSymbolConstructor);
symbol_fun->shared()->set_length(0);
symbol_fun->shared()->DontAdaptArguments();
native_context()->set_symbol_function(*symbol_fun);
@@ -2611,7 +2633,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// ECMA-262, section 15.10.7.5.
PropertyAttributes writable =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- Descriptor d = Descriptor::DataField(factory->lastIndex_string(),
+ Descriptor d = Descriptor::DataField(isolate(), factory->lastIndex_string(),
JSRegExp::kLastIndexFieldIndex,
writable, Representation::Tagged());
initial_map->AppendDescriptor(&d);
@@ -2885,6 +2907,17 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, prototype, "formatToParts",
Builtins::kDateTimeFormatPrototypeFormatToParts, 1,
false);
+
+ SimpleInstallGetter(isolate_, prototype,
+ factory->InternalizeUtf8String("format"),
+ Builtins::kDateTimeFormatPrototypeFormat, false);
+
+ {
+ Handle<SharedFunctionInfo> info = SimpleCreateBuiltinSharedFunctionInfo(
+ isolate_, Builtins::kDateTimeFormatInternalFormat,
+ factory->empty_string(), 1);
+ native_context()->set_date_format_internal_format_shared_fun(*info);
+ }
}
{
@@ -2921,9 +2954,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
Handle<JSFunction> collator_constructor = InstallFunction(
- isolate_, intl, "Collator", JS_OBJECT_TYPE, Collator::kSize, 0,
- factory->the_hole_value(), Builtins::kIllegal);
- native_context()->set_intl_collator_function(*collator_constructor);
+ isolate_, intl, "Collator", JS_INTL_COLLATOR_TYPE, JSCollator::kSize,
+ 0, factory->the_hole_value(), Builtins::kCollatorConstructor);
+ collator_constructor->shared()->DontAdaptArguments();
+ InstallWithIntrinsicDefaultProto(isolate_, collator_constructor,
+ Context::INTL_COLLATOR_FUNCTION_INDEX);
Handle<JSObject> prototype(
JSObject::cast(collator_constructor->prototype()), isolate_);
@@ -2933,6 +2968,17 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, prototype, factory->to_string_tag_symbol(),
factory->Object_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ SimpleInstallGetter(isolate_, prototype,
+ factory->InternalizeUtf8String("compare"),
+ Builtins::kCollatorPrototypeCompare, false);
+
+ {
+ Handle<SharedFunctionInfo> info = SimpleCreateBuiltinSharedFunctionInfo(
+ isolate_, Builtins::kCollatorInternalCompare,
+ factory->empty_string(), 2);
+ native_context()->set_collator_internal_compare_shared_fun(*info);
+ }
}
{
@@ -2951,14 +2997,29 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, prototype, factory->to_string_tag_symbol(),
factory->Object_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ SimpleInstallGetter(isolate_, prototype,
+ factory->InternalizeUtf8String("adoptText"),
+ Builtins::kBreakIteratorPrototypeAdoptText, false);
+
+ {
+ Handle<SharedFunctionInfo> info = SimpleCreateBuiltinSharedFunctionInfo(
+ isolate_, Builtins::kBreakIteratorInternalAdoptText,
+ factory->empty_string(), 1);
+ native_context()->set_break_iterator_internal_adopt_text_shared_fun(
+ *info);
+ }
}
{
Handle<JSFunction> plural_rules_constructor = InstallFunction(
- isolate_, intl, "PluralRules", JS_OBJECT_TYPE, PluralRules::kSize, 0,
- factory->the_hole_value(), Builtins::kIllegal);
- native_context()->set_intl_plural_rules_function(
- *plural_rules_constructor);
+ isolate_, intl, "PluralRules", JS_INTL_PLURAL_RULES_TYPE,
+ JSPluralRules::kSize, 0, factory->the_hole_value(),
+ Builtins::kPluralRulesConstructor);
+ plural_rules_constructor->shared()->DontAdaptArguments();
+ InstallWithIntrinsicDefaultProto(
+ isolate_, plural_rules_constructor,
+ Context::INTL_PLURAL_RULES_FUNCTION_INDEX);
Handle<JSObject> prototype(
JSObject::cast(plural_rules_constructor->prototype()), isolate_);
@@ -3056,29 +3117,29 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kTypedArrayPrototypeBuffer, false);
SimpleInstallGetter(isolate_, prototype, factory->byte_length_string(),
Builtins::kTypedArrayPrototypeByteLength, true,
- kTypedArrayByteLength);
+ BuiltinFunctionId::kTypedArrayByteLength);
SimpleInstallGetter(isolate_, prototype, factory->byte_offset_string(),
Builtins::kTypedArrayPrototypeByteOffset, true,
- kTypedArrayByteOffset);
+ BuiltinFunctionId::kTypedArrayByteOffset);
SimpleInstallGetter(isolate_, prototype, factory->length_string(),
Builtins::kTypedArrayPrototypeLength, true,
- kTypedArrayLength);
+ BuiltinFunctionId::kTypedArrayLength);
SimpleInstallGetter(isolate_, prototype, factory->to_string_tag_symbol(),
Builtins::kTypedArrayPrototypeToStringTag, true,
- kTypedArrayToStringTag);
+ BuiltinFunctionId::kTypedArrayToStringTag);
// Install "keys", "values" and "entries" methods on the {prototype}.
SimpleInstallFunction(isolate_, prototype, "entries",
Builtins::kTypedArrayPrototypeEntries, 0, true,
- kTypedArrayEntries);
+ BuiltinFunctionId::kTypedArrayEntries);
SimpleInstallFunction(isolate_, prototype, "keys",
Builtins::kTypedArrayPrototypeKeys, 0, true,
- kTypedArrayKeys);
+ BuiltinFunctionId::kTypedArrayKeys);
Handle<JSFunction> values = SimpleInstallFunction(
isolate_, prototype, "values", Builtins::kTypedArrayPrototypeValues, 0,
- true, kTypedArrayValues);
+ true, BuiltinFunctionId::kTypedArrayValues);
JSObject::AddProperty(isolate_, prototype, factory->iterator_symbol(),
values, DONT_ENUM);
@@ -3124,7 +3185,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- T y p e d A r r a y s
-#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype) \
{ \
Handle<JSFunction> fun = \
InstallTypedArray(#Type "Array", TYPE##_ELEMENTS); \
@@ -3159,13 +3220,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// on the {prototype}.
SimpleInstallGetter(isolate_, prototype, factory->buffer_string(),
Builtins::kDataViewPrototypeGetBuffer, false,
- kDataViewBuffer);
+ BuiltinFunctionId::kDataViewBuffer);
SimpleInstallGetter(isolate_, prototype, factory->byte_length_string(),
Builtins::kDataViewPrototypeGetByteLength, false,
- kDataViewByteLength);
+ BuiltinFunctionId::kDataViewByteLength);
SimpleInstallGetter(isolate_, prototype, factory->byte_offset_string(),
Builtins::kDataViewPrototypeGetByteOffset, false,
- kDataViewByteOffset);
+ BuiltinFunctionId::kDataViewByteOffset);
SimpleInstallFunction(isolate_, prototype, "getInt8",
Builtins::kDataViewPrototypeGetInt8, 1, false);
@@ -3326,7 +3387,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
PropertyAttributes attribs =
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
Descriptor d =
- Descriptor::DataField(factory->to_string_tag_symbol(),
+ Descriptor::DataField(isolate(), factory->to_string_tag_symbol(),
JSModuleNamespace::kToStringTagFieldIndex,
attribs, Representation::Tagged());
map->AppendDescriptor(&d);
@@ -3340,14 +3401,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Map::EnsureDescriptorSlack(isolate_, map, 2);
{ // value
- Descriptor d = Descriptor::DataField(factory->value_string(),
+ Descriptor d = Descriptor::DataField(isolate(), factory->value_string(),
JSIteratorResult::kValueIndex, NONE,
Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // done
- Descriptor d = Descriptor::DataField(factory->done_string(),
+ Descriptor d = Descriptor::DataField(isolate(), factory->done_string(),
JSIteratorResult::kDoneIndex, NONE,
Representation::Tagged());
map->AppendDescriptor(&d);
@@ -3423,21 +3484,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- P r o x y
CreateJSProxyMaps();
-
// Proxy function map has prototype slot for storing initial map but does
// not have a prototype property.
Handle<Map> proxy_function_map = Map::Copy(
isolate_, isolate_->strict_function_without_prototype_map(), "Proxy");
- // Re-set the unused property fields after changing the instance size.
- // TODO(ulan): Do not change instance size after map creation.
- int unused_property_fields = proxy_function_map->UnusedPropertyFields();
- proxy_function_map->set_instance_size(JSFunction::kSizeWithPrototype);
- // The prototype slot shifts the in-object properties area by one slot.
- proxy_function_map->SetInObjectPropertiesStartInWords(
- proxy_function_map->GetInObjectPropertiesStartInWords() + 1);
- proxy_function_map->set_has_prototype_slot(true);
proxy_function_map->set_is_constructor(true);
- proxy_function_map->SetInObjectUnusedPropertyFields(unused_property_fields);
Handle<String> name = factory->Proxy_string();
@@ -3445,8 +3496,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
name, proxy_function_map, Builtins::kProxyConstructor);
Handle<JSFunction> proxy_function = factory->NewFunction(args);
- JSFunction::SetInitialMap(proxy_function, isolate_->proxy_map(),
- factory->null_value());
+ isolate_->proxy_map()->SetConstructor(*proxy_function);
proxy_function->shared()->set_internal_formal_parameter_count(2);
proxy_function->shared()->set_length(2);
@@ -3455,6 +3505,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallFunction(isolate_, global, name, proxy_function,
factory->Object_string());
+ DCHECK(!proxy_function->has_prototype_property());
+
SimpleInstallFunction(isolate_, proxy_function, "revocable",
Builtins::kProxyRevocable, 2, true);
@@ -3558,15 +3610,17 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Map::EnsureDescriptorSlack(isolate_, map, 2);
{ // length
- Descriptor d = Descriptor::DataField(
- factory->length_string(), JSSloppyArgumentsObject::kLengthIndex,
- DONT_ENUM, Representation::Tagged());
+ Descriptor d =
+ Descriptor::DataField(isolate(), factory->length_string(),
+ JSSloppyArgumentsObject::kLengthIndex,
+ DONT_ENUM, Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // callee
- Descriptor d = Descriptor::DataField(
- factory->callee_string(), JSSloppyArgumentsObject::kCalleeIndex,
- DONT_ENUM, Representation::Tagged());
+ Descriptor d =
+ Descriptor::DataField(isolate(), factory->callee_string(),
+ JSSloppyArgumentsObject::kCalleeIndex,
+ DONT_ENUM, Representation::Tagged());
map->AppendDescriptor(&d);
}
// @@iterator method is added later.
@@ -3610,9 +3664,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Map::EnsureDescriptorSlack(isolate_, map, 2);
{ // length
- Descriptor d = Descriptor::DataField(
- factory->length_string(), JSStrictArgumentsObject::kLengthIndex,
- DONT_ENUM, Representation::Tagged());
+ Descriptor d =
+ Descriptor::DataField(isolate(), factory->length_string(),
+ JSStrictArgumentsObject::kLengthIndex,
+ DONT_ENUM, Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // callee
@@ -3765,9 +3820,10 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
isolate->factory()->NewStringFromUtf8(name).ToHandleChecked();
MaybeHandle<SharedFunctionInfo> maybe_function_info =
Compiler::GetSharedFunctionInfoForScript(
- source, Compiler::ScriptDetails(script_name), ScriptOriginOptions(),
- nullptr, nullptr, ScriptCompiler::kNoCompileOptions,
- ScriptCompiler::kNoCacheNoReason, natives_flag);
+ isolate, source, Compiler::ScriptDetails(script_name),
+ ScriptOriginOptions(), nullptr, nullptr,
+ ScriptCompiler::kNoCompileOptions, ScriptCompiler::kNoCacheNoReason,
+ natives_flag);
Handle<SharedFunctionInfo> function_info;
if (!maybe_function_info.ToHandle(&function_info)) return false;
@@ -3830,8 +3886,9 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
factory->NewStringFromUtf8(name).ToHandleChecked();
MaybeHandle<SharedFunctionInfo> maybe_function_info =
Compiler::GetSharedFunctionInfoForScript(
- source, Compiler::ScriptDetails(script_name), ScriptOriginOptions(),
- extension, nullptr, ScriptCompiler::kNoCompileOptions,
+ isolate, source, Compiler::ScriptDetails(script_name),
+ ScriptOriginOptions(), extension, nullptr,
+ ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheBecauseV8Extension, EXTENSION_CODE);
if (!maybe_function_info.ToHandle(&function_info)) return false;
cache->Add(isolate, name, function_info);
@@ -3917,7 +3974,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<JSObject> container) {
Factory* factory = isolate->factory();
HandleScope scope(isolate);
- Handle<Context> native_context = isolate->native_context();
+ Handle<NativeContext> native_context = isolate->native_context();
#define EXPORT_PRIVATE_SYMBOL(NAME) \
Handle<String> NAME##_name = factory->NewStringFromAsciiChecked(#NAME); \
JSObject::AddProperty(isolate, container, NAME##_name, factory->NAME(), NONE);
@@ -4017,7 +4074,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
// Install the next function on the {prototype}.
SimpleInstallFunction(isolate, prototype, "next",
Builtins::kSetIteratorPrototypeNext, 0, true,
- kSetIteratorNext);
+ BuiltinFunctionId::kSetIteratorNext);
// Setup SetIterator constructor.
Handle<JSFunction> set_iterator_function = InstallFunction(
@@ -4052,7 +4109,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
// Install the next function on the {prototype}.
SimpleInstallFunction(isolate, prototype, "next",
Builtins::kMapIteratorPrototypeNext, 0, true,
- kMapIteratorNext);
+ BuiltinFunctionId::kMapIteratorNext);
// Setup MapIterator constructor.
Handle<JSFunction> map_iterator_function = InstallFunction(
@@ -4215,6 +4272,17 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_numeric_separator)
#undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE
+void Genesis::InitializeGlobal_harmony_global() {
+ if (!FLAG_harmony_global) return;
+
+ Factory* factory = isolate()->factory();
+ Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
+ Handle<JSGlobalProxy> global_proxy(native_context()->global_proxy(),
+ isolate());
+ JSObject::AddProperty(isolate_, global, factory->globalThis_string(),
+ global_proxy, DONT_ENUM);
+}
+
void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
if (!FLAG_harmony_sharedarraybuffer) return;
@@ -4369,7 +4437,7 @@ void Genesis::InitializeGlobal_harmony_string_matchall() {
JS_REGEXP_STRING_ITERATOR_TYPE, JSRegExpStringIterator::kSize, 0,
regexp_string_iterator_prototype, Builtins::kIllegal);
regexp_string_iterator_function->shared()->set_native(false);
- native_context()->set_initial_regexp_string_iterator_prototype_map_index(
+ native_context()->set_initial_regexp_string_iterator_prototype_map(
regexp_string_iterator_function->initial_map());
}
@@ -4398,7 +4466,8 @@ void Genesis::InitializeGlobal_harmony_bigint() {
Handle<JSFunction> bigint_fun = InstallFunction(
isolate(), global, "BigInt", JS_VALUE_TYPE, JSValue::kSize, 0,
factory->the_hole_value(), Builtins::kBigIntConstructor);
- bigint_fun->shared()->set_builtin_function_id(kBigIntConstructor);
+ bigint_fun->shared()->set_builtin_function_id(
+ BuiltinFunctionId::kBigIntConstructor);
bigint_fun->shared()->DontAdaptArguments();
bigint_fun->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(isolate(), bigint_fun,
@@ -4501,6 +4570,39 @@ void Genesis::InitializeGlobal_harmony_await_optimization() {
}
#ifdef V8_INTL_SUPPORT
+void Genesis::InitializeGlobal_harmony_intl_list_format() {
+ if (!FLAG_harmony_intl_list_format) return;
+ Handle<JSObject> intl = Handle<JSObject>::cast(
+ JSReceiver::GetProperty(
+ isolate(),
+ Handle<JSReceiver>(native_context()->global_object(), isolate()),
+ factory()->InternalizeUtf8String("Intl"))
+ .ToHandleChecked());
+
+ Handle<JSFunction> list_format_fun =
+ InstallFunction(isolate(), intl, "ListFormat", JS_INTL_LIST_FORMAT_TYPE,
+ JSListFormat::kSize, 0, factory()->the_hole_value(),
+ Builtins::kListFormatConstructor);
+ list_format_fun->shared()->set_length(0);
+ list_format_fun->shared()->DontAdaptArguments();
+
+ // Setup %ListFormatPrototype%.
+ Handle<JSObject> prototype(
+ JSObject::cast(list_format_fun->instance_prototype()), isolate());
+
+ // Install the @@toStringTag property on the {prototype}.
+ JSObject::AddProperty(isolate(), prototype, factory()->to_string_tag_symbol(),
+ factory()->NewStringFromStaticChars("Intl.ListFormat"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ SimpleInstallFunction(isolate(), prototype, "resolvedOptions",
+ Builtins::kListFormatPrototypeResolvedOptions, 0,
+ false);
+ SimpleInstallFunction(isolate(), prototype, "format",
+ Builtins::kListFormatPrototypeFormat, 1, false);
+ SimpleInstallFunction(isolate(), prototype, "formatToParts",
+ Builtins::kListFormatPrototypeFormatToParts, 1, false);
+}
void Genesis::InitializeGlobal_harmony_locale() {
if (!FLAG_harmony_locale) return;
@@ -4531,6 +4633,10 @@ void Genesis::InitializeGlobal_harmony_locale() {
SimpleInstallFunction(isolate(), prototype, "toString",
Builtins::kLocalePrototypeToString, 0, false);
+ SimpleInstallFunction(isolate(), prototype, "maximize",
+ Builtins::kLocalePrototypeMaximize, 0, false);
+ SimpleInstallFunction(isolate(), prototype, "minimize",
+ Builtins::kLocalePrototypeMinimize, 0, false);
// Base locale getters.
SimpleInstallGetter(isolate(), prototype,
factory()->InternalizeUtf8String("language"),
@@ -4587,13 +4693,19 @@ void Genesis::InitializeGlobal_harmony_intl_relative_time_format() {
isolate());
// Install the @@toStringTag property on the {prototype}.
- JSObject::AddProperty(isolate(), prototype, factory()->to_string_tag_symbol(),
- factory()->Object_string(),
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ JSObject::AddProperty(
+ isolate(), prototype, factory()->to_string_tag_symbol(),
+ factory()->NewStringFromStaticChars("Intl.RelativeTimeFormat"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
SimpleInstallFunction(isolate(), prototype, "resolvedOptions",
Builtins::kRelativeTimeFormatPrototypeResolvedOptions,
0, false);
+ SimpleInstallFunction(isolate(), prototype, "format",
+ Builtins::kRelativeTimeFormatPrototypeFormat, 2, false);
+ SimpleInstallFunction(isolate(), prototype, "formatToParts",
+ Builtins::kRelativeTimeFormatPrototypeFormatToParts, 2,
+ false);
}
#endif // V8_INTL_SUPPORT
@@ -4622,9 +4734,10 @@ Handle<JSFunction> Genesis::CreateArrayBuffer(
switch (array_buffer_kind) {
case ARRAY_BUFFER:
- SimpleInstallFunction(
- isolate(), array_buffer_fun, factory()->isView_string(),
- Builtins::kArrayBufferIsView, 1, true, DONT_ENUM, kArrayBufferIsView);
+ SimpleInstallFunction(isolate(), array_buffer_fun,
+ factory()->isView_string(),
+ Builtins::kArrayBufferIsView, 1, true, DONT_ENUM,
+ BuiltinFunctionId::kArrayBufferIsView);
// Install the "byteLength" getter on the {prototype}.
SimpleInstallGetter(isolate(), prototype, factory()->byte_length_string(),
@@ -4807,29 +4920,33 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// Install Global.decodeURI.
SimpleInstallFunction(isolate(), global_object, "decodeURI",
- Builtins::kGlobalDecodeURI, 1, false, kGlobalDecodeURI);
+ Builtins::kGlobalDecodeURI, 1, false,
+ BuiltinFunctionId::kGlobalDecodeURI);
// Install Global.decodeURIComponent.
SimpleInstallFunction(isolate(), global_object, "decodeURIComponent",
Builtins::kGlobalDecodeURIComponent, 1, false,
- kGlobalDecodeURIComponent);
+ BuiltinFunctionId::kGlobalDecodeURIComponent);
// Install Global.encodeURI.
SimpleInstallFunction(isolate(), global_object, "encodeURI",
- Builtins::kGlobalEncodeURI, 1, false, kGlobalEncodeURI);
+ Builtins::kGlobalEncodeURI, 1, false,
+ BuiltinFunctionId::kGlobalEncodeURI);
// Install Global.encodeURIComponent.
SimpleInstallFunction(isolate(), global_object, "encodeURIComponent",
Builtins::kGlobalEncodeURIComponent, 1, false,
- kGlobalEncodeURIComponent);
+ BuiltinFunctionId::kGlobalEncodeURIComponent);
// Install Global.escape.
SimpleInstallFunction(isolate(), global_object, "escape",
- Builtins::kGlobalEscape, 1, false, kGlobalEscape);
+ Builtins::kGlobalEscape, 1, false,
+ BuiltinFunctionId::kGlobalEscape);
// Install Global.unescape.
SimpleInstallFunction(isolate(), global_object, "unescape",
- Builtins::kGlobalUnescape, 1, false, kGlobalUnescape);
+ Builtins::kGlobalUnescape, 1, false,
+ BuiltinFunctionId::kGlobalUnescape);
// Install Global.eval.
{
@@ -4841,11 +4958,13 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// Install Global.isFinite
SimpleInstallFunction(isolate(), global_object, "isFinite",
- Builtins::kGlobalIsFinite, 1, true, kGlobalIsFinite);
+ Builtins::kGlobalIsFinite, 1, true,
+ BuiltinFunctionId::kGlobalIsFinite);
// Install Global.isNaN
SimpleInstallFunction(isolate(), global_object, "isNaN",
- Builtins::kGlobalIsNaN, 1, true, kGlobalIsNaN);
+ Builtins::kGlobalIsNaN, 1, true,
+ BuiltinFunctionId::kGlobalIsNaN);
// Install Array builtin functions.
{
@@ -4887,27 +5006,29 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Map::EnsureDescriptorSlack(isolate(), map, 4);
{ // get
- Descriptor d = Descriptor::DataField(
- factory()->get_string(), JSAccessorPropertyDescriptor::kGetIndex,
- NONE, Representation::Tagged());
+ Descriptor d =
+ Descriptor::DataField(isolate(), factory()->get_string(),
+ JSAccessorPropertyDescriptor::kGetIndex, NONE,
+ Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // set
- Descriptor d = Descriptor::DataField(
- factory()->set_string(), JSAccessorPropertyDescriptor::kSetIndex,
- NONE, Representation::Tagged());
+ Descriptor d =
+ Descriptor::DataField(isolate(), factory()->set_string(),
+ JSAccessorPropertyDescriptor::kSetIndex, NONE,
+ Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // enumerable
Descriptor d =
- Descriptor::DataField(factory()->enumerable_string(),
+ Descriptor::DataField(isolate(), factory()->enumerable_string(),
JSAccessorPropertyDescriptor::kEnumerableIndex,
NONE, Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // configurable
Descriptor d = Descriptor::DataField(
- factory()->configurable_string(),
+ isolate(), factory()->configurable_string(),
JSAccessorPropertyDescriptor::kConfigurableIndex, NONE,
Representation::Tagged());
map->AppendDescriptor(&d);
@@ -4931,28 +5052,29 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Map::EnsureDescriptorSlack(isolate(), map, 4);
{ // value
- Descriptor d = Descriptor::DataField(
- factory()->value_string(), JSDataPropertyDescriptor::kValueIndex,
- NONE, Representation::Tagged());
+ Descriptor d =
+ Descriptor::DataField(isolate(), factory()->value_string(),
+ JSDataPropertyDescriptor::kValueIndex, NONE,
+ Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // writable
Descriptor d =
- Descriptor::DataField(factory()->writable_string(),
+ Descriptor::DataField(isolate(), factory()->writable_string(),
JSDataPropertyDescriptor::kWritableIndex, NONE,
Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // enumerable
Descriptor d =
- Descriptor::DataField(factory()->enumerable_string(),
+ Descriptor::DataField(isolate(), factory()->enumerable_string(),
JSDataPropertyDescriptor::kEnumerableIndex,
NONE, Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // configurable
Descriptor d =
- Descriptor::DataField(factory()->configurable_string(),
+ Descriptor::DataField(isolate(), factory()->configurable_string(),
JSDataPropertyDescriptor::kConfigurableIndex,
NONE, Representation::Tagged());
map->AppendDescriptor(&d);
@@ -5007,7 +5129,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// index descriptor.
{
- Descriptor d = Descriptor::DataField(factory()->index_string(),
+ Descriptor d = Descriptor::DataField(isolate(), factory()->index_string(),
JSRegExpResult::kIndexIndex, NONE,
Representation::Tagged());
initial_map->AppendDescriptor(&d);
@@ -5015,7 +5137,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// input descriptor.
{
- Descriptor d = Descriptor::DataField(factory()->input_string(),
+ Descriptor d = Descriptor::DataField(isolate(), factory()->input_string(),
JSRegExpResult::kInputIndex, NONE,
Representation::Tagged());
initial_map->AppendDescriptor(&d);
@@ -5023,9 +5145,9 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// groups descriptor.
{
- Descriptor d = Descriptor::DataField(factory()->groups_string(),
- JSRegExpResult::kGroupsIndex, NONE,
- Representation::Tagged());
+ Descriptor d = Descriptor::DataField(
+ isolate(), factory()->groups_string(), JSRegExpResult::kGroupsIndex,
+ NONE, Representation::Tagged());
initial_map->AppendDescriptor(&d);
}
@@ -5124,11 +5246,8 @@ static void InstallBuiltinFunctionId(Isolate* isolate, Handle<JSObject> holder,
function->shared()->set_builtin_function_id(id);
}
-
#define INSTALL_BUILTIN_ID(holder_expr, fun_name, name) \
- { #holder_expr, #fun_name, k##name } \
- ,
-
+ {#holder_expr, #fun_name, BuiltinFunctionId::k##name},
void Genesis::InstallBuiltinFunctionIds() {
HandleScope scope(isolate());
@@ -5170,12 +5289,10 @@ bool Bootstrapper::InstallExtensions(Handle<Context> native_context,
bool Genesis::InstallSpecialObjects(Isolate* isolate,
Handle<Context> native_context) {
- Factory* factory = isolate->factory();
HandleScope scope(isolate);
Handle<JSObject> Error = isolate->error_function();
- Handle<String> name =
- factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("stackTraceLimit"));
+ Handle<String> name = isolate->factory()->stackTraceLimit_string();
Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit), isolate);
JSObject::AddProperty(isolate, Error, name, stack_trace_limit, NONE);
@@ -5558,12 +5675,15 @@ Genesis::Genesis(
// We can only de-serialize a context if the isolate was initialized from
// a snapshot. Otherwise we have to build the context from scratch.
// Also create a context from scratch to expose natives, if required by flag.
- if (!isolate->initialized_from_snapshot() ||
- !Snapshot::NewContextFromSnapshot(isolate, global_proxy,
- context_snapshot_index,
- embedder_fields_deserializer)
- .ToHandle(&native_context_)) {
- native_context_ = Handle<Context>();
+ DCHECK(native_context_.is_null());
+ if (isolate->initialized_from_snapshot()) {
+ Handle<Context> context;
+ if (Snapshot::NewContextFromSnapshot(isolate, global_proxy,
+ context_snapshot_index,
+ embedder_fields_deserializer)
+ .ToHandle(&context)) {
+ native_context_ = Handle<NativeContext>::cast(context);
+ }
}
if (!native_context().is_null()) {
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index a5219bf070..c18811a4b6 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -13,7 +13,9 @@
#include "src/frame-constants.h"
#include "src/frames.h"
#include "src/objects-inl.h"
+#include "src/objects/js-generator.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -2217,7 +2219,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Convert to Smi for the runtime call.
__ SmiTag(r4, r4);
{
- TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
@@ -2414,7 +2416,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
Label negate, done;
- TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
UseScratchRegisterScope temps(masm);
Register result_reg = r7;
Register double_low = GetRegisterThatIsNotOneOf(result_reg);
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 2254f010c1..61fee9013b 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -13,7 +13,9 @@
#include "src/frame-constants.h"
#include "src/frames.h"
#include "src/objects-inl.h"
+#include "src/objects/js-generator.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -74,7 +76,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ PushArgument(x1);
__ CallRuntime(function_id, 1);
- __ Move(x2, x0);
+ __ Mov(x2, x0);
// Restore target function and new target.
__ Pop(padreg, x3, x1, x0);
@@ -494,8 +496,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
- __ Move(x3, x1);
- __ Move(x1, x4);
+ __ Mov(x3, x1);
+ __ Mov(x1, x4);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
__ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
@@ -2675,7 +2677,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ sxtw(x8, w8);
__ SmiTag(x8, x8);
{
- TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
@@ -2696,7 +2698,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
- __ Move(cp, Smi::kZero);
+ __ Mov(cp, Smi::kZero);
__ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, x2);
// The entrypoint address is the return value.
__ mov(x8, kReturnRegister0);
@@ -2717,7 +2719,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// fall-back Abort mechanism.
//
// Note that this stub must be generated before any use of Abort.
- MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+ HardAbortScope hard_aborts(masm);
ASM_LOCATION("CEntry::Generate entry");
ProfileEntryHookStub::MaybeCallEntryHook(masm);
@@ -2937,7 +2939,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
DCHECK(result.Is64Bits());
- TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
UseScratchRegisterScope temps(masm);
Register scratch1 = temps.AcquireX();
Register scratch2 = temps.AcquireX();
diff --git a/deps/v8/src/builtins/array-copywithin.tq b/deps/v8/src/builtins/array-copywithin.tq
new file mode 100644
index 0000000000..8406123b20
--- /dev/null
+++ b/deps/v8/src/builtins/array-copywithin.tq
@@ -0,0 +1,93 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module array {
+ macro ConvertToRelativeIndex(index: Number, length: Number): Number {
+ return index < 0 ? max(index + length, 0) : min(index, length);
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.copyWithin
+ javascript builtin ArrayPrototypeCopyWithin(
+ context: Context, receiver: Object, ...arguments): Object {
+ // 1. Let O be ? ToObject(this value).
+ const object: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const length: Number = GetLengthProperty(context, object);
+
+ // 3. Let relativeTarget be ? ToInteger(target).
+ const relative_target: Number = ToInteger_Inline(context, arguments[0]);
+
+ // 4. If relativeTarget < 0, let to be max((len + relativeTarget), 0);
+ // else let to be min(relativeTarget, len).
+ let to: Number = ConvertToRelativeIndex(relative_target, length);
+
+ // 5. Let relativeStart be ? ToInteger(start).
+ const relative_start: Number = ToInteger_Inline(context, arguments[1]);
+
+ // 6. If relativeStart < 0, let from be max((len + relativeStart), 0);
+ // else let from be min(relativeStart, len).
+ let from: Number = ConvertToRelativeIndex(relative_start, length);
+
+ // 7. If end is undefined, let relativeEnd be len;
+ // else let relativeEnd be ? ToInteger(end).
+ let relative_end: Number = length;
+ if (arguments[2] != Undefined) {
+ relative_end = ToInteger_Inline(context, arguments[2]);
+ }
+
+ // 8. If relativeEnd < 0, let final be max((len + relativeEnd), 0);
+ // else let final be min(relativeEnd, len).
+ const final: Number = ConvertToRelativeIndex(relative_end, length);
+
+ // 9. Let count be min(final-from, len-to).
+ let count: Number = min(final - from, length - to);
+
+ // 10. If from<to and to<from+count, then.
+ let direction: Number = 1;
+
+ if (from < to && to < (from + count)) {
+ // a. Let direction be -1.
+ direction = -1;
+
+ // b. Let from be from + count - 1.
+ from = from + count - 1;
+
+ // c. Let to be to + count - 1.
+ to = to + count - 1;
+ }
+
+ // 12. Repeat, while count > 0.
+ while (count > 0) {
+ // a. Let fromKey be ! ToString(from).
+ // b. Let toKey be ! ToString(to).
+ // c. Let fromPresent be ? HasProperty(O, fromKey).
+ const from_present: Boolean = HasProperty(context, object, from);
+
+ // d. If fromPresent is true, then.
+ if (from_present == True) {
+ // i. Let fromVal be ? Get(O, fromKey).
+ const from_val: Object = GetProperty(context, object, from);
+
+ // ii. Perform ? Set(O, toKey, fromVal, true).
+ SetProperty(context, object, to, from_val);
+ } else {
+ // i. Perform ? DeletePropertyOrThrow(O, toKey).
+ DeleteProperty(context, object, to, kStrict);
+ }
+
+ // f. Let from be from + direction.
+ from = from + direction;
+
+ // g. Let to be to + direction.
+ to = to + direction;
+
+ // h. Let count be count - 1.
+ --count;
+ }
+
+ // 13. Return O.
+ return object;
+ }
+}
diff --git a/deps/v8/src/builtins/array-foreach.tq b/deps/v8/src/builtins/array-foreach.tq
index 9919f9e395..c0e19c0803 100644
--- a/deps/v8/src/builtins/array-foreach.tq
+++ b/deps/v8/src/builtins/array-foreach.tq
@@ -4,21 +4,21 @@
module array {
macro ArrayForEachTorqueContinuation(
- context: Context, o: Object, len: Number, callbackfn: Callable,
+ context: Context, o: JSReceiver, len: Number, callbackfn: Callable,
thisArg: Object, initial_k: Smi): Object {
// 5. Let k be 0.
// 6. Repeat, while k < len
for (let k: Smi = initial_k; k < len; k = k + 1) {
// 6a. Let Pk be ! ToString(k).
- let pK: String = ToString_Inline(context, k);
+ const pK: String = ToString_Inline(context, k);
// 6b. Let kPresent be ? HasProperty(O, Pk).
- let kPresent: Oddball = HasPropertyObject(o, pK, context, kHasProperty);
+ const kPresent: Boolean = HasProperty(context, o, pK);
// 6c. If kPresent is true, then
if (kPresent == True) {
// 6c. i. Let kValue be ? Get(O, Pk).
- let kValue: Object = GetProperty(context, o, pK);
+ const kValue: Object = GetProperty(context, o, pK);
// 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
Call(context, callbackfn, thisArg, kValue, k, o);
@@ -32,30 +32,39 @@ module array {
javascript builtin ArrayForEachLoopEagerDeoptContinuation(
context: Context, receiver: Object, callback: Object, thisArg: Object,
initialK: Object, length: Object): Object {
+ // The unsafe cast is safe because all continuation points in forEach are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver: JSReceiver = unsafe_cast<JSReceiver>(receiver);
return ArrayForEachLoopContinuation(
- context, receiver, callback, thisArg, Undefined, receiver, initialK,
+ context, jsreceiver, callback, thisArg, Undefined, jsreceiver, initialK,
length, Undefined);
}
javascript builtin ArrayForEachLoopLazyDeoptContinuation(
context: Context, receiver: Object, callback: Object, thisArg: Object,
initialK: Object, length: Object, result: Object): Object {
+ // The unsafe cast is safe because all continuation points in forEach are
+ // after the ToObject(O) call that ensures we are dealing with a
+ // JSReceiver.
+ const jsreceiver: JSReceiver = unsafe_cast<JSReceiver>(receiver);
return ArrayForEachLoopContinuation(
- context, receiver, callback, thisArg, Undefined, receiver, initialK,
+ context, jsreceiver, callback, thisArg, Undefined, jsreceiver, initialK,
length, Undefined);
}
builtin ArrayForEachLoopContinuation(
- context: Context, receiver: Object, callback: Object, thisArg: Object,
+ context: Context, receiver: JSReceiver, callback: Object, thisArg: Object,
array: Object, object: Object, initialK: Object, length: Object,
to: Object): Object {
try {
- let callbackfn: Callable = cast<Callable>(callback) otherwise Unexpected;
- let k: Smi = cast<Smi>(initialK) otherwise Unexpected;
- let number_length: Number = cast<Number>(length) otherwise Unexpected;
+ const callbackfn: Callable =
+ cast<Callable>(callback) otherwise Unexpected;
+ const k: Smi = cast<Smi>(initialK) otherwise Unexpected;
+ const number_length: Number = cast<Number>(length) otherwise Unexpected;
return ArrayForEachTorqueContinuation(
- context, object, number_length, callbackfn, thisArg, k);
+ context, receiver, number_length, callbackfn, thisArg, k);
}
label Unexpected {
unreachable;
@@ -67,7 +76,7 @@ module array {
thisArg: Object): void labels
Bailout(Smi) {
let k: Smi = 0;
- let map: Map = a.map;
+ const map: Map = a.map;
try {
// Build a fast loop over the smi array.
@@ -78,7 +87,7 @@ module array {
if (k >= a.length) goto Slow;
try {
- let value: Object =
+ const value: Object =
LoadElementNoHole<FixedArrayType>(a, k) otherwise FoundHole;
Call(context, callbackfn, thisArg, value, k, a);
}
@@ -97,17 +106,17 @@ module array {
}
macro FastArrayForEach(
- context: Context, o: Object, len: Number, callbackfn: Callable,
+ context: Context, o: JSReceiver, len: Number, callbackfn: Callable,
thisArg: Object): Object labels
Bailout(Smi) {
let k: Smi = 0;
try {
- let smi_len: Smi = cast<Smi>(len) otherwise Slow;
- let a: JSArray = cast<JSArray>(o) otherwise Slow;
- let map: Map = a.map;
+ const smi_len: Smi = cast<Smi>(len) otherwise Slow;
+ const a: JSArray = cast<JSArray>(o) otherwise Slow;
+ const map: Map = a.map;
if (!IsPrototypeInitialArrayPrototype(context, map)) goto Slow;
- let elementsKind: ElementsKind = map.elements_kind;
+ const elementsKind: ElementsKind = map.elements_kind;
if (!IsFastElementsKind(elementsKind)) goto Slow;
if (IsElementsKindGreaterThan(elementsKind, HOLEY_ELEMENTS)) {
@@ -134,20 +143,20 @@ module array {
}
// 1. Let O be ? ToObject(this value).
- let o: Object = ToObject(context, receiver);
+ const o: JSReceiver = ToObject_Inline(context, receiver);
// 2. Let len be ? ToLength(? Get(O, "length")).
- let len: Number = GetLengthProperty(context, o);
+ const len: Number = GetLengthProperty(context, o);
// 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
if (arguments.length == 0) {
goto TypeError;
}
- let callbackfn: Callable =
+ const callbackfn: Callable =
cast<Callable>(arguments[0]) otherwise TypeError;
// 4. If thisArg is present, let T be thisArg; else let T be undefined.
- let thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ const thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
// Special cases.
let k: Smi = 0;
diff --git a/deps/v8/src/builtins/array-reverse.tq b/deps/v8/src/builtins/array-reverse.tq
new file mode 100644
index 0000000000..8db542ddef
--- /dev/null
+++ b/deps/v8/src/builtins/array-reverse.tq
@@ -0,0 +1,190 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module array {
+ macro LoadElement<ElementsAccessor : type, T : type>(
+ elements: FixedArrayBase, index: Smi): T;
+
+ LoadElement<FastPackedSmiElements, Smi>(
+ elements: FixedArrayBase, index: Smi): Smi {
+ const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ return unsafe_cast<Smi>(elems[index]);
+ }
+
+ LoadElement<FastPackedObjectElements, Object>(
+ elements: FixedArrayBase, index: Smi): Object {
+ const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ return elems[index];
+ }
+
+ LoadElement<FastPackedDoubleElements, float64>(
+ elements: FixedArrayBase, index: Smi): float64 {
+ try {
+ const elems: FixedDoubleArray = unsafe_cast<FixedDoubleArray>(elements);
+ return LoadDoubleWithHoleCheck(elems, index) otherwise Hole;
+ }
+ label Hole {
+ // This macro is only used for PACKED_DOUBLE, loading the hole should
+ // be impossible.
+ unreachable;
+ }
+ }
+
+ macro StoreElement<ElementsAccessor : type, T : type>(
+ elements: FixedArrayBase, index: Smi, value: T);
+
+ StoreElement<FastPackedSmiElements, Smi>(
+ elements: FixedArrayBase, index: Smi, value: Smi) {
+ const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ StoreFixedArrayElementSmi(elems, index, value, SKIP_WRITE_BARRIER);
+ }
+
+ StoreElement<FastPackedObjectElements, Object>(
+ elements: FixedArrayBase, index: Smi, value: Object) {
+ const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ elems[index] = value;
+ }
+
+ StoreElement<FastPackedDoubleElements, float64>(
+ elements: FixedArrayBase, index: Smi, value: float64) {
+ const elems: FixedDoubleArray = unsafe_cast<FixedDoubleArray>(elements);
+
+ assert(value == Float64SilenceNaN(value));
+ StoreFixedDoubleArrayElementWithSmiIndex(elems, index, value);
+ }
+
+ // Fast-path for all PACKED_* elements kinds. These do not need to check
+ // whether a property is present, so we can simply swap them using fast
+ // FixedArray loads/stores.
+ macro FastPackedArrayReverse<Accessor : type, T : type>(
+ elements: FixedArrayBase, length: Smi) {
+ let lower: Smi = 0;
+ let upper: Smi = length - 1;
+
+ while (lower < upper) {
+ const lower_value: T = LoadElement<Accessor, T>(elements, lower);
+ const upper_value: T = LoadElement<Accessor, T>(elements, upper);
+ StoreElement<Accessor, T>(elements, lower, upper_value);
+ StoreElement<Accessor, T>(elements, upper, lower_value);
+ ++lower;
+ --upper;
+ }
+ }
+
+ macro GenericArrayReverse(context: Context, receiver: Object): Object {
+ // 1. Let O be ? ToObject(this value).
+ const object: JSReceiver = ToObject_Inline(context, receiver);
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ const length: Number = GetLengthProperty(context, object);
+
+ // 3. Let middle be floor(len / 2).
+ // 4. Let lower be 0.
+ // 5. Repeat, while lower != middle.
+ // a. Let upper be len - lower - 1.
+
+ // Instead of calculating the middle value, we simply initialize upper
+ // with len - 1 and decrement it after each iteration.
+ let lower: Number = 0;
+ let upper: Number = length - 1;
+
+ while (lower < upper) {
+ let lower_value: Object = Undefined;
+ let upper_value: Object = Undefined;
+
+ // b. Let upperP be ! ToString(upper).
+ // c. Let lowerP be ! ToString(lower).
+ // d. Let lowerExists be ? HasProperty(O, lowerP).
+ const lower_exists: Boolean = HasProperty(context, object, lower);
+
+ // e. If lowerExists is true, then.
+ if (lower_exists == True) {
+ // i. Let lowerValue be ? Get(O, lowerP).
+ lower_value = GetProperty(context, object, lower);
+ }
+
+ // f. Let upperExists be ? HasProperty(O, upperP).
+ const upper_exists: Boolean = HasProperty(context, object, upper);
+
+ // g. If upperExists is true, then.
+ if (upper_exists == True) {
+ // i. Let upperValue be ? Get(O, upperP).
+ upper_value = GetProperty(context, object, upper);
+ }
+
+ // h. If lowerExists is true and upperExists is true, then
+ if (lower_exists == True && upper_exists == True) {
+ // i. Perform ? Set(O, lowerP, upperValue, true).
+ SetProperty(context, object, lower, upper_value);
+
+ // ii. Perform ? Set(O, upperP, lowerValue, true).
+ SetProperty(context, object, upper, lower_value);
+ } else if (lower_exists == False && upper_exists == True) {
+ // i. Perform ? Set(O, lowerP, upperValue, true).
+ SetProperty(context, object, lower, upper_value);
+
+ // ii. Perform ? DeletePropertyOrThrow(O, upperP).
+ DeleteProperty(context, object, upper, kStrict);
+ } else if (lower_exists == True && upper_exists == False) {
+ // i. Perform ? DeletePropertyOrThrow(O, lowerP).
+ DeleteProperty(context, object, lower, kStrict);
+
+ // ii. Perform ? Set(O, upperP, lowerValue, true).
+ SetProperty(context, object, upper, lower_value);
+ }
+
+ // l. Increase lower by 1.
+ ++lower;
+ --upper;
+ }
+
+ // 6. Return O.
+ return object;
+ }
+
+ macro EnsureWriteableFastElements(array: JSArray) {
+ const elements: FixedArrayBase = array.elements;
+ if (elements.map != kCOWMap) return;
+
+ // There are no COW *_DOUBLE_ELEMENTS arrays, so we are allowed to always
+ // extract FixedArrays and don't have to worry about FixedDoubleArrays.
+ assert(IsFastSmiOrTaggedElementsKind(array.map.elements_kind));
+
+ const length: Smi = array.length_fast;
+ array.elements = ExtractFixedArray(
+ unsafe_cast<FixedArray>(elements), 0, length, length, kFixedArrays);
+ }
+
+ macro TryFastPackedArrayReverse(receiver: Object) labels Slow {
+ const array: JSArray = cast<JSArray>(receiver) otherwise Slow;
+ EnsureWriteableFastElements(array);
+ assert(array.elements.map != kCOWMap);
+
+ const kind: ElementsKind = array.map.elements_kind;
+ if (kind == PACKED_SMI_ELEMENTS) {
+ FastPackedArrayReverse<FastPackedSmiElements, Smi>(
+ array.elements, array.length_fast);
+ } else if (kind == PACKED_ELEMENTS) {
+ FastPackedArrayReverse<FastPackedObjectElements, Object>(
+ array.elements, array.length_fast);
+ } else if (kind == PACKED_DOUBLE_ELEMENTS) {
+ FastPackedArrayReverse<FastPackedDoubleElements, float64>(
+ array.elements, array.length_fast);
+ } else {
+ goto Slow;
+ }
+ }
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.reverse
+ javascript builtin ArrayPrototypeReverse(
+ context: Context, receiver: Object, ...arguments): Object {
+ try {
+ TryFastPackedArrayReverse(receiver) otherwise Baseline;
+ return receiver;
+ }
+ label Baseline {
+ return GenericArrayReverse(context, receiver);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/array-sort.tq b/deps/v8/src/builtins/array-sort.tq
deleted file mode 100644
index 30bbf5ef74..0000000000
--- a/deps/v8/src/builtins/array-sort.tq
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-module array {
- // TODO(szuend): TimSort implementation will go here. Keeping the file around
- // after removing the QuickSort Torque implementation.
-}
diff --git a/deps/v8/src/builtins/array.tq b/deps/v8/src/builtins/array.tq
index edfe342ae3..590947dd44 100644
--- a/deps/v8/src/builtins/array.tq
+++ b/deps/v8/src/builtins/array.tq
@@ -3,6 +3,17 @@
// found in the LICENSE file.
module array {
+ // Naming convention from elements.cc. We have a similar intent but implement
+ // fastpaths using generics instead of using a class hierarchy for elements
+ // kinds specific implementations.
+ type GenericElementsAccessor;
+ type FastPackedSmiElements;
+ type FastPackedObjectElements;
+ type FastPackedDoubleElements;
+ type FastSmiOrObjectElements;
+ type FastDoubleElements;
+ type DictionaryElements;
+
macro GetLengthProperty(context: Context, o: Object): Number {
if (BranchIfFastJSArray(o, context)) {
let a: JSArray = unsafe_cast<JSArray>(o);
@@ -110,7 +121,7 @@ module array {
javascript builtin ArraySpliceTorque(
context: Context, receiver: Object, ...arguments): Object {
// 1. Let O be ? ToObject(this value).
- let o: Object = ToObject(context, receiver);
+ let o: JSReceiver = ToObject(context, receiver);
// 2. Let len be ? ToLength(? Get(O, "length")).
let len: Number = GetLengthProperty(context, o);
@@ -178,8 +189,7 @@ module array {
let from: String = ToString_Inline(context, actualStart + k);
// b. Let fromPresent be ? HasProperty(O, from).
- let fromPresent: Oddball =
- HasPropertyObject(o, from, context, kHasProperty);
+ let fromPresent: Oddball = HasProperty(context, o, from);
// c. If fromPresent is true, then
if (fromPresent == True) {
@@ -195,7 +205,7 @@ module array {
}
// 12. Perform ? Set(A, "length", actualDeleteCount, true).
- SetProperty(context, a, 'length', actualDeleteCount, kStrict);
+ SetProperty(context, a, 'length', actualDeleteCount);
// 13. Let items be a List whose elements are, in left-to-right order,
// the portion of the actual argument list starting with the third
@@ -217,8 +227,7 @@ module array {
let to: String = ToString_Inline(context, k + itemCount);
// iii. Let fromPresent be ? HasProperty(O, from).
- let fromPresent: Oddball =
- HasPropertyObject(o, from, context, kHasProperty);
+ let fromPresent: Oddball = HasProperty(context, o, from);
// iv. If fromPresent is true, then
if (fromPresent == True) {
@@ -226,7 +235,7 @@ module array {
let fromValue: Object = GetProperty(context, o, from);
// 2. Perform ? Set(O, to, fromValue, true).
- SetProperty(context, o, to, fromValue, kStrict);
+ SetProperty(context, o, to, fromValue);
// v. Else fromPresent is false,
} else {
@@ -261,8 +270,7 @@ module array {
let to: String = ToString_Inline(context, k + itemCount - 1);
// iii. Let fromPresent be ? HasProperty(O, from).
- let fromPresent: Oddball =
- HasPropertyObject(o, from, context, kHasProperty);
+ let fromPresent: Oddball = HasProperty(context, o, from);
// iv. If fromPresent is true, then
if (fromPresent == True) {
@@ -270,7 +278,7 @@ module array {
let fromValue: Object = GetProperty(context, o, from);
// 2. Perform ? Set(O, to, fromValue, true).
- SetProperty(context, o, to, fromValue, kStrict);
+ SetProperty(context, o, to, fromValue);
// v. Else fromPresent is false,
} else {
@@ -292,7 +300,7 @@ module array {
if (arguments.length > 2) {
for (let e: Object of arguments [2: ]) {
// b. Perform ? Set(O, ! ToString(k), E, true).
- SetProperty(context, o, ToString_Inline(context, k), e, kStrict);
+ SetProperty(context, o, ToString_Inline(context, k), e);
// c. Increase k by 1.
k = k + 1;
@@ -301,8 +309,7 @@ module array {
// 19. Perform ? Set(O, "length", len - actualDeleteCount + itemCount,
// true).
- SetProperty(
- context, o, 'length', len - actualDeleteCount + itemCount, kStrict);
+ SetProperty(context, o, 'length', len - actualDeleteCount + itemCount);
return a;
}
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index 1c9acdd5c6..1b9d577f10 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -62,25 +62,25 @@ type ExtractFixedArrayFlags generates
'TNode<Smi>' constexpr 'ExtractFixedArrayFlags';
type ParameterMode generates 'TNode<Int32T>' constexpr 'ParameterMode';
type RootListIndex generates 'TNode<Int32T>' constexpr 'Heap::RootListIndex';
+type WriteBarrierMode generates 'TNode<Int32T>' constexpr 'WriteBarrierMode';
-type MessageTemplate constexpr 'MessageTemplate';
-type HasPropertyLookupMode constexpr 'HasPropertyLookupMode';
+type MessageTemplate constexpr 'MessageTemplate::Template';
type ToIntegerTruncationMode constexpr 'ToIntegerTruncationMode';
const NO_ELEMENTS: constexpr ElementsKind generates 'NO_ELEMENTS';
const PACKED_SMI_ELEMENTS: constexpr ElementsKind generates
- 'PACKED_SMI_ELEMENTS';
+'PACKED_SMI_ELEMENTS';
const HOLEY_SMI_ELEMENTS: constexpr ElementsKind generates 'HOLEY_SMI_ELEMENTS';
const PACKED_ELEMENTS: constexpr ElementsKind generates 'PACKED_ELEMENTS';
const HOLEY_ELEMENTS: constexpr ElementsKind generates 'HOLEY_ELEMENTS';
const PACKED_DOUBLE_ELEMENTS: constexpr ElementsKind generates
- 'PACKED_DOUBLE_ELEMENTS';
+'PACKED_DOUBLE_ELEMENTS';
const HOLEY_DOUBLE_ELEMENTS: constexpr ElementsKind generates
- 'HOLEY_DOUBLE_ELEMENTS';
+'HOLEY_DOUBLE_ELEMENTS';
const DICTIONARY_ELEMENTS: constexpr ElementsKind generates
- 'DICTIONARY_ELEMENTS';
+'DICTIONARY_ELEMENTS';
const UINT8_ELEMENTS: constexpr ElementsKind generates 'UINT8_ELEMENTS';
const INT8_ELEMENTS: constexpr ElementsKind generates 'INT8_ELEMENTS';
@@ -91,7 +91,7 @@ const INT32_ELEMENTS: constexpr ElementsKind generates 'INT32_ELEMENTS';
const FLOAT32_ELEMENTS: constexpr ElementsKind generates 'FLOAT32_ELEMENTS';
const FLOAT64_ELEMENTS: constexpr ElementsKind generates 'FLOAT64_ELEMENTS';
const UINT8_CLAMPED_ELEMENTS: constexpr ElementsKind generates
- 'UINT8_CLAMPED_ELEMENTS';
+'UINT8_CLAMPED_ELEMENTS';
const BIGUINT64_ELEMENTS: constexpr ElementsKind generates 'BIGUINT64_ELEMENTS';
const BIGINT64_ELEMENTS: constexpr ElementsKind generates 'BIGINT64_ELEMENTS';
@@ -109,6 +109,8 @@ type FixedBigInt64Array extends FixedTypedArray;
const kAllFixedArrays: constexpr ExtractFixedArrayFlags generates
'ExtractFixedArrayFlag::kAllFixedArrays';
+const kFixedArrays: constexpr ExtractFixedArrayFlags generates
+'ExtractFixedArrayFlag::kFixedArrays';
const kFixedCOWArrayMapRootIndex: constexpr RootListIndex generates
'Heap::kFixedCOWArrayMapRootIndex';
@@ -122,8 +124,6 @@ const kCalledNonCallable: constexpr MessageTemplate generates
const kCalledOnNullOrUndefined: constexpr MessageTemplate generates
'MessageTemplate::kCalledOnNullOrUndefined';
-const kHasProperty: constexpr HasPropertyLookupMode generates 'kHasProperty';
-
const kMaxSafeInteger: constexpr float64 generates 'kMaxSafeInteger';
const kTruncateMinusZero: constexpr ToIntegerTruncationMode generates
@@ -147,6 +147,8 @@ extern macro NullConstant(): Oddball;
extern macro UndefinedConstant(): Oddball;
extern macro TrueConstant(): Boolean;
extern macro FalseConstant(): Boolean;
+extern macro Int32TrueConstant(): bool;
+extern macro Int32FalseConstant(): bool;
const Hole: Oddball = TheHoleConstant();
const Null: Oddball = NullConstant();
@@ -163,8 +165,14 @@ const kSloppy: constexpr LanguageMode generates 'LanguageMode::kSloppy';
const SMI_PARAMETERS: constexpr ParameterMode generates 'SMI_PARAMETERS';
const INTPTR_PARAMETERS: constexpr ParameterMode generates 'INTPTR_PARAMETERS';
+
+const SKIP_WRITE_BARRIER: constexpr WriteBarrierMode
+ generates 'SKIP_WRITE_BARRIER';
+
extern macro Is64(): constexpr bool;
+extern macro SelectBooleanConstant(bool): Boolean;
+
extern macro Print(constexpr string);
extern macro Print(constexpr string, Object);
extern macro Print(Object);
@@ -176,25 +184,26 @@ extern macro ToLength_Inline(Context, Object): Number;
extern macro ToNumber_Inline(Context, Object): Number;
extern macro ToString_Inline(Context, Object): String;
extern macro GetProperty(Context, Object, Object): Object;
-extern macro HasProperty(
- HeapObject, Object, Context, constexpr HasPropertyLookupMode): Oddball;
+extern builtin SetProperty(Context, Object, Object, Object);
+extern builtin DeleteProperty(Context, Object, Object, LanguageMode);
+extern builtin HasProperty(Context, JSReceiver, Object): Boolean;
+
extern macro ThrowRangeError(Context, constexpr MessageTemplate): never;
extern macro ThrowTypeError(Context, constexpr MessageTemplate): never;
extern macro ThrowTypeError(Context, constexpr MessageTemplate, Object): never;
-extern macro ThrowTypeError(Context, constexpr MessageTemplate, Object, Object,
- Object): never;
+extern macro ThrowTypeError(
+ Context, constexpr MessageTemplate, Object, Object, Object): never;
extern macro ArraySpeciesCreate(Context, Object, Number): Object;
extern macro EnsureArrayPushable(Map): ElementsKind labels Bailout;
extern builtin ToObject(Context, Object): JSReceiver;
+extern macro ToObject_Inline(Context, Object): JSReceiver;
extern macro IsNullOrUndefined(Object): bool;
extern macro IsTheHole(Object): bool;
extern macro IsString(HeapObject): bool;
extern builtin ToString(Context, Object): String;
extern runtime CreateDataProperty(Context, Object, Object, Object);
-extern runtime SetProperty(Context, Object, Object, Object, LanguageMode);
-extern runtime DeleteProperty(Context, Object, Object, LanguageMode);
extern macro LoadRoot(constexpr RootListIndex): Object;
extern macro StoreRoot(constexpr RootListIndex, Object): Object;
@@ -257,7 +266,9 @@ extern operator '!=' macro WordNotEqual(Object, Object): bool;
extern operator '+' macro SmiAdd(Smi, Smi): Smi;
extern operator '-' macro SmiSub(Smi, Smi): Smi;
extern operator '&' macro SmiAnd(Smi, Smi): Smi;
+extern operator '|' macro SmiOr(Smi, Smi): Smi;
extern operator '>>>' macro SmiShr(Smi, constexpr int31): Smi;
+extern operator '<<' macro SmiShl(Smi, constexpr int31): Smi;
extern operator '+' macro IntPtrAdd(intptr, intptr): intptr;
extern operator '-' macro IntPtrSub(intptr, intptr): intptr;
@@ -287,11 +298,21 @@ extern operator '|' macro Word32Or(uint32, uint32): uint32;
extern operator '+' macro NumberAdd(Number, Number): Number;
extern operator '-' macro NumberSub(Number, Number): Number;
-extern operator 'min' macro NumberMin(Number, Number): Number;
-extern operator 'max' macro NumberMax(Number, Number): Number;
+extern macro NumberMin(Number, Number): Number;
+extern macro NumberMax(Number, Number): Number;
+macro min(x: Number, y: Number): Number {
+ return NumberMin(x, y);
+}
+macro max(x: Number, y: Number): Number {
+ return NumberMax(x, y);
+}
+
+extern macro SmiMax(Smi, Smi): Smi;
+extern macro SmiMin(Smi, Smi): Smi;
extern operator '!' macro ConstexprBoolNot(constexpr bool): constexpr bool;
extern operator '!' macro Word32BinaryNot(bool): bool;
+extern operator '!' macro IsFalse(Boolean): bool;
extern operator '.map' macro LoadMap(HeapObject): Map;
extern operator '.map=' macro StoreMap(HeapObject, Map);
@@ -308,42 +329,50 @@ extern operator 'is<Smi>' macro TaggedIsSmi(Object): bool;
extern operator 'isnt<Smi>' macro TaggedIsNotSmi(Object): bool;
extern macro TaggedIsPositiveSmi(Object): bool;
-extern macro TaggedToJSDataView(Object): JSDataView labels CastError;
+extern macro HeapObjectToJSDataView(HeapObject): JSDataView labels CastError;
extern macro TaggedToHeapObject(Object): HeapObject labels CastError;
extern macro TaggedToSmi(Object): Smi labels CastError;
-extern macro TaggedToJSArray(Object): JSArray labels CastError;
-extern macro TaggedToCallable(Object): Callable labels CastError;
-extern macro ConvertFixedArrayBaseToFixedArray(FixedArrayBase):
+extern macro HeapObjectToJSArray(HeapObject): JSArray labels CastError;
+extern macro HeapObjectToCallable(HeapObject): Callable labels CastError;
+extern macro HeapObjectToFixedArray(HeapObject):
FixedArray labels CastError;
-extern macro ConvertFixedArrayBaseToFixedDoubleArray(FixedArrayBase):
+extern macro HeapObjectToFixedDoubleArray(HeapObject):
FixedDoubleArray labels CastError;
extern macro TaggedToNumber(Object): Number labels CastError;
-macro cast<A : type>(o: Object): A labels CastError;
-cast<Number>(o: Object): Number labels CastError {
- return TaggedToNumber(o) otherwise CastError;
+macro cast_HeapObject<A : type>(o : HeapObject) : A labels CastError;
+cast_HeapObject<HeapObject>(o : HeapObject) : HeapObject labels CastError { return o; }
+cast_HeapObject<FixedArray>(o: HeapObject): FixedArray labels CastError {
+ return HeapObjectToFixedArray(o) otherwise CastError;
}
-cast<HeapObject>(o: Object): HeapObject labels CastError {
- return TaggedToHeapObject(o) otherwise CastError;
+cast_HeapObject<FixedDoubleArray>(o: HeapObject): FixedDoubleArray labels CastError {
+ return HeapObjectToFixedDoubleArray(o) otherwise CastError;
}
-cast<Smi>(o: Object): Smi labels CastError {
- return TaggedToSmi(o) otherwise CastError;
+cast_HeapObject<JSDataView>(o: HeapObject): JSDataView labels CastError {
+ return HeapObjectToJSDataView(o) otherwise CastError;
+}
+cast_HeapObject<Callable>(o: HeapObject): Callable labels CastError {
+ return HeapObjectToCallable(o) otherwise CastError;
}
-cast<JSDataView>(o: Object): JSDataView labels CastError {
- return TaggedToJSDataView(o) otherwise CastError;
+cast_HeapObject<JSArray>(o: HeapObject): JSArray labels CastError {
+ return HeapObjectToJSArray(o) otherwise CastError;
}
-cast<Callable>(o: Object): Callable labels CastError {
- return TaggedToCallable(o) otherwise CastError;
+
+macro cast<A : type>(o: HeapObject): A labels CastError {
+ return cast_HeapObject<A>(o) otherwise CastError;
}
-cast<JSArray>(o: Object): JSArray labels CastError {
- return TaggedToJSArray(o) otherwise CastError;
+
+// cast_HeapObject allows this default-implementation to be non-recursive.
+// Otherwise the generated CSA code might run into infinite recursion.
+macro cast<A : type>(o: Object): A labels CastError {
+ return cast_HeapObject<A>(
+ TaggedToHeapObject(o) otherwise CastError) otherwise CastError;
}
-macro cast<A : type>(o: FixedArrayBase): A labels CastError;
-cast<FixedArray>(o: FixedArrayBase): FixedArray labels CastError {
- return ConvertFixedArrayBaseToFixedArray(o) otherwise CastError;
+cast<Smi>(o: Object): Smi labels CastError {
+ return TaggedToSmi(o) otherwise CastError;
}
-cast<FixedDoubleArray>(o: FixedArrayBase): FixedDoubleArray labels CastError {
- return ConvertFixedArrayBaseToFixedDoubleArray(o) otherwise CastError;
+cast<Number>(o: Object): Number labels CastError {
+ return TaggedToNumber(o) otherwise CastError;
}
extern macro AllocateHeapNumberWithValue(float64): HeapNumber;
@@ -365,7 +394,7 @@ extern macro LoadHeapNumberValue(HeapNumber): float64;
extern macro ChangeFloat32ToFloat64(float32): float64;
extern macro ChangeNumberToFloat64(Number): float64;
extern macro ChangeFloat64ToUintPtr(float64): uintptr;
-extern macro ChangeInt32ToIntPtr(int32): intptr; // Sign-extends.
+extern macro ChangeInt32ToIntPtr(int32): intptr; // Sign-extends.
extern macro ChangeUint32ToWord(uint32): uintptr; // Doesn't sign-extend.
extern macro NumberConstant(constexpr float64): Number;
@@ -573,8 +602,8 @@ unsafe_cast<FixedArrayBase>(o: Object): FixedArrayBase {
}
const kCOWMap: Map = unsafe_cast<Map>(LoadRoot(kFixedCOWArrayMapRootIndex));
-const kEmptyFixedArray: FixedArrayBase = unsafe_cast<FixedArrayBase>(
- LoadRoot(kEmptyFixedArrayRootIndex));
+const kEmptyFixedArray: FixedArrayBase =
+ unsafe_cast<FixedArrayBase>(LoadRoot(kEmptyFixedArrayRootIndex));
extern macro BranchIfFastJSArray(Object, Context): never labels Taken, NotTaken;
extern macro BranchIfNotFastJSArray(Object, Context): never labels Taken,
@@ -607,15 +636,18 @@ extern operator '.length' macro LoadFixedArrayBaseLength(FixedArrayBase): Smi;
extern operator '[]' macro LoadFixedArrayElement(FixedArray, intptr): Object;
extern operator '[]' macro LoadFixedArrayElement(FixedArray, Smi): Object;
extern operator
-'[]' macro LoadFixedArrayElementInt(FixedArray, constexpr int31): Object;
+'[]' macro LoadFixedArrayElement(FixedArray, constexpr int31): Object;
extern operator
'[]=' macro StoreFixedArrayElement(FixedArray, intptr, Object): void;
extern operator
-'[]=' macro StoreFixedArrayElementInt(
+'[]=' macro StoreFixedArrayElement(
FixedArray, constexpr int31, Object): void;
extern operator
'[]=' macro StoreFixedArrayElementSmi(FixedArray, Smi, Object): void;
+extern macro StoreFixedArrayElementSmi(FixedArray, Smi, Object,
+ constexpr WriteBarrierMode): void;
+
extern operator '.instance_type' macro LoadMapInstanceType(Map): int32;
extern macro LoadFixedDoubleArrayElement(FixedDoubleArray, Smi): float64;
@@ -629,7 +661,8 @@ macro StoreFixedDoubleArrayElementWithSmiIndex(
}
extern macro BasicLoadNumberDictionaryElement(NumberDictionary, intptr):
- Object labels NotData, IfHole;
+ Object labels NotData,
+ IfHole;
extern macro BasicStoreNumberDictionaryElement(NumberDictionary, intptr, Object)
labels NotData, IfHole, ReadOnly;
@@ -639,7 +672,8 @@ extern macro IsFastSmiOrTaggedElementsKind(ElementsKind): bool;
extern macro IsFastSmiElementsKind(ElementsKind): bool;
extern macro IsHoleyFastElementsKind(ElementsKind): bool;
-extern macro AllocateFixedArray(constexpr ElementsKind, intptr): FixedArray;
+extern macro AllocateZeroedFixedArray(intptr): FixedArray;
+extern macro AllocateZeroedFixedDoubleArray(intptr): FixedDoubleArray;
extern macro CopyFixedArrayElements(
constexpr ElementsKind, FixedArray, constexpr ElementsKind, FixedArray,
@@ -660,8 +694,10 @@ extern macro Call(Context, Callable, Object): Object;
extern macro Call(Context, Callable, Object, Object): Object;
extern macro Call(Context, Callable, Object, Object, Object): Object;
extern macro Call(Context, Callable, Object, Object, Object, Object): Object;
-extern macro Call(Context, Callable, Object, Object, Object, Object, Object): Object;
-extern macro Call(Context, Callable, Object, Object, Object, Object, Object, Object): Object;
+extern macro Call(
+ Context, Callable, Object, Object, Object, Object, Object): Object;
+extern macro Call(
+ Context, Callable, Object, Object, Object, Object, Object, Object): Object;
extern macro ExtractFixedArray(
FixedArray, Smi, Smi, Smi, constexpr ExtractFixedArrayFlags): FixedArray;
@@ -699,32 +735,26 @@ labels IfHole {
}
}
-macro HasPropertyObject(
- o: Object, p: Object, c: Context,
- f: constexpr HasPropertyLookupMode): Oddball {
- try {
- return HasProperty((cast<HeapObject>(o) otherwise CastError), p, c, f);
- }
- label CastError {
- return False;
- }
-}
-
extern macro IsCallable(HeapObject): bool;
extern macro IsJSArray(HeapObject): bool;
extern macro TaggedIsCallable(Object): bool;
extern macro IsDetachedBuffer(JSArrayBuffer): bool;
extern macro IsHeapNumber(HeapObject): bool;
+extern macro IsFixedArray(HeapObject): bool;
extern macro IsExtensibleMap(Map): bool;
extern macro IsCustomElementsReceiverInstanceType(int32): bool;
extern macro Typeof(Object): Object;
// Return true iff number is NaN.
macro NumberIsNaN(number: Number): bool {
- if (TaggedIsSmi(number)) return false;
-
- let value: float64 = convert<float64>(unsafe_cast<HeapNumber>(number));
- return value != value;
+ typeswitch(number) {
+ case (Smi) {
+ return false;
+ } case (hn : HeapNumber) {
+ let value: float64 = convert<float64>(hn);
+ return value != value;
+ }
+ }
}
extern macro BranchIfToBooleanIsTrue(Object): never labels Taken, NotTaken;
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index d77bc79238..95f5f2ebbd 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
-
#include "src/api-arguments-inl.h"
#include "src/api-natives.h"
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
+#include "src/builtins/builtins.h"
#include "src/counters.h"
#include "src/log.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc
index 09c725fe37..c82cef3919 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.cc
+++ b/deps/v8/src/builtins/builtins-arguments-gen.cc
@@ -305,8 +305,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
JSSloppyArgumentsObject::kSize);
StoreObjectFieldNoWriteBarrier(
argument_object, JSSloppyArgumentsObject::kCalleeOffset, function);
- StoreFixedArrayElement(map_array, 0, context, SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(map_array, 1, elements, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(CAST(map_array), 0, context, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(CAST(map_array), 1, elements, SKIP_WRITE_BARRIER);
Comment("Fill in non-mapped parameters");
Node* argument_offset =
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index fd08639c72..d61a516422 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -366,8 +366,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
Goto(&done);
BIND(&slow);
- CallRuntime(Runtime::kSetProperty, context(), a(), k, mapped_value,
- SmiConstant(LanguageMode::kStrict));
+ SetPropertyStrict(context(), CAST(a()), CAST(k), CAST(mapped_value));
Goto(&done);
BIND(&detached);
@@ -381,6 +380,28 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
void ArrayBuiltinsAssembler::NullPostLoopAction() {}
+ void ArrayBuiltinsAssembler::FillFixedArrayWithSmiZero(
+ TNode<FixedArray> array, TNode<Smi> smi_length) {
+ CSA_ASSERT(this, Word32BinaryNot(IsFixedDoubleArray(array)));
+
+ TNode<IntPtrT> length = SmiToIntPtr(smi_length);
+ TNode<WordT> byte_length = TimesPointerSize(length);
+ CSA_ASSERT(this, UintPtrLessThan(length, byte_length));
+
+ static const int32_t fa_base_data_offset =
+ FixedArray::kHeaderSize - kHeapObjectTag;
+ TNode<IntPtrT> backing_store = IntPtrAdd(
+ BitcastTaggedToWord(array), IntPtrConstant(fa_base_data_offset));
+
+ // Call out to memset to perform initialization.
+ TNode<ExternalReference> memset =
+ ExternalConstant(ExternalReference::libc_memset_function());
+ STATIC_ASSERT(kSizetSize == kIntptrSize);
+ CallCFunction3(MachineType::Pointer(), MachineType::Pointer(),
+ MachineType::IntPtr(), MachineType::UintPtr(), memset,
+ backing_store, IntPtrConstant(0), byte_length);
+ }
+
void ArrayBuiltinsAssembler::ReturnFromBuiltin(Node* value) {
if (argc_ == nullptr) {
Return(value);
@@ -417,7 +438,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
// 1. Let O be ToObject(this value).
// 2. ReturnIfAbrupt(O)
- o_ = ToObject(context(), receiver());
+ o_ = ToObject_Inline(context(), receiver());
// 3. Let len be ToLength(Get(O, "length")).
// 4. ReturnIfAbrupt(len).
@@ -500,8 +521,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
// ValidateTypedArray: tc39.github.io/ecma262/#sec-validatetypedarray
- Label throw_not_typed_array(this, Label::kDeferred),
- throw_detached(this, Label::kDeferred);
+ Label throw_not_typed_array(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver_), &throw_not_typed_array);
GotoIfNot(HasInstanceType(CAST(receiver_), JS_TYPED_ARRAY_TYPE),
@@ -510,9 +530,8 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
TNode<JSTypedArray> typed_array = CAST(receiver_);
o_ = typed_array;
- Node* array_buffer =
- LoadObjectField(typed_array, JSTypedArray::kBufferOffset);
- GotoIf(IsDetachedBuffer(array_buffer), &throw_detached);
+ TNode<JSArrayBuffer> array_buffer = LoadArrayBufferViewBuffer(typed_array);
+ ThrowIfArrayBufferIsDetached(context_, array_buffer, name_);
len_ = LoadTypedArrayLength(typed_array);
@@ -525,9 +544,6 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
BIND(&throw_not_typed_array);
ThrowTypeError(context_, MessageTemplate::kNotTypedArray);
- BIND(&throw_detached);
- ThrowTypeError(context_, MessageTemplate::kDetachedOperation, name_);
-
BIND(&throw_not_callable);
ThrowTypeError(context_, MessageTemplate::kCalledNonCallable, callbackfn_);
@@ -536,7 +552,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
Unreachable();
std::vector<int32_t> instance_types = {
-#define INSTANCE_TYPE(Type, type, TYPE, ctype, size) FIXED_##TYPE##_ARRAY_TYPE,
+#define INSTANCE_TYPE(Type, type, TYPE, ctype) FIXED_##TYPE##_ARRAY_TYPE,
TYPED_ARRAYS(INSTANCE_TYPE)
#undef INSTANCE_TYPE
};
@@ -608,7 +624,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
// b. Let kPresent be HasProperty(O, Pk).
// c. ReturnIfAbrupt(kPresent).
TNode<Oddball> k_present =
- HasProperty(o(), k(), context(), kHasProperty);
+ HasProperty(context(), o(), k(), kHasProperty);
// d. If kPresent is true, then
GotoIf(IsFalse(k_present), &done_element);
@@ -643,8 +659,8 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
ElementsKind ArrayBuiltinsAssembler::ElementsKindForInstanceType(
InstanceType type) {
switch (type) {
-#define INSTANCE_TYPE_TO_ELEMENTS_KIND(Type, type, TYPE, ctype, size) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
+#define INSTANCE_TYPE_TO_ELEMENTS_KIND(Type, type, TYPE, ctype) \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
return TYPE##_ELEMENTS;
TYPED_ARRAYS(INSTANCE_TYPE_TO_ELEMENTS_KIND)
@@ -995,8 +1011,8 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
BIND(&fast_elements);
{
- Node* value = LoadFixedArrayElement(elements, new_length);
- StoreFixedArrayElement(elements, new_length, TheHoleConstant());
+ Node* value = LoadFixedArrayElement(CAST(elements), new_length);
+ StoreFixedArrayElement(CAST(elements), new_length, TheHoleConstant());
GotoIf(WordEqual(value, TheHoleConstant()), &return_undefined);
args.PopAndReturn(value);
}
@@ -1065,8 +1081,7 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
Node* length = LoadJSArrayLength(array_receiver);
// TODO(danno): Use the KeyedStoreGeneric stub here when possible,
// calling into the runtime to do the elements transition is overkill.
- CallRuntime(Runtime::kSetProperty, context, array_receiver, length, arg,
- SmiConstant(LanguageMode::kStrict));
+ SetPropertyStrict(context, array_receiver, CAST(length), CAST(arg));
Increment(&arg_index);
// The runtime SetProperty call could have converted the array to dictionary
// mode, which must be detected to abort the fast-path.
@@ -1112,8 +1127,7 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
Node* length = LoadJSArrayLength(array_receiver);
// TODO(danno): Use the KeyedStoreGeneric stub here when possible,
// calling into the runtime to do the elements transition is overkill.
- CallRuntime(Runtime::kSetProperty, context, array_receiver, length, arg,
- SmiConstant(LanguageMode::kStrict));
+ SetPropertyStrict(context, array_receiver, CAST(length), CAST(arg));
Increment(&arg_index);
// The runtime SetProperty call could have converted the array to dictionary
// mode, which must be detected to abort the fast-path.
@@ -1132,8 +1146,7 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
args.ForEach(
[this, array_receiver, context](Node* arg) {
Node* length = LoadJSArrayLength(array_receiver);
- CallRuntime(Runtime::kSetProperty, context, array_receiver, length,
- arg, SmiConstant(LanguageMode::kStrict));
+ SetPropertyStrict(context, array_receiver, CAST(length), CAST(arg));
},
arg_index.value());
args.PopAndReturn(LoadJSArrayLength(array_receiver));
@@ -1198,7 +1211,7 @@ class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
GotoIf(WordNotEqual(map, fast_aliasted_arguments_map), &try_simple_slice);
- Node* sloppy_elements = LoadElements(array);
+ TNode<SloppyArgumentsElements> sloppy_elements = CAST(LoadElements(array));
TNode<Smi> sloppy_elements_length =
LoadFixedArrayBaseLength(sloppy_elements);
TNode<Smi> parameter_map_length =
@@ -1217,8 +1230,8 @@ class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
TNode<Smi> end = SmiAdd(CAST(from), CAST(count));
- Node* unmapped_elements = LoadFixedArrayElement(
- sloppy_elements, SloppyArgumentsElements::kArgumentsIndex);
+ TNode<FixedArray> unmapped_elements = CAST(LoadFixedArrayElement(
+ sloppy_elements, SloppyArgumentsElements::kArgumentsIndex));
TNode<Smi> unmapped_elements_length =
LoadFixedArrayBaseLength(unmapped_elements);
@@ -1229,7 +1242,7 @@ class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
nullptr, SMI_PARAMETERS));
index_out.Bind(IntPtrConstant(0));
- Node* result_elements = LoadElements(result.value());
+ TNode<FixedArray> result_elements = CAST(LoadElements(result.value()));
TNode<Smi> from_mapped = SmiMin(parameter_map_length, CAST(from));
TNode<Smi> to = SmiMin(parameter_map_length, end);
Node* arguments_context = LoadFixedArrayElement(
@@ -1293,7 +1306,7 @@ class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
Variable& n) {
// b. Let kPresent be HasProperty(O, Pk).
// c. ReturnIfAbrupt(kPresent).
- TNode<Oddball> k_present = HasProperty(o, p_k, context, kHasProperty);
+ TNode<Oddball> k_present = HasProperty(context, o, p_k, kHasProperty);
// d. If kPresent is true, then
Label done_element(this);
@@ -1375,7 +1388,7 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
BIND(&generic_length);
// 1. Let O be ToObject(this value).
// 2. ReturnIfAbrupt(O).
- o = ToObject(context, receiver);
+ o = ToObject_Inline(context, receiver);
// 3. Let len be ToLength(Get(O, "length")).
// 4. ReturnIfAbrupt(len).
@@ -1480,10 +1493,8 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
// 16. Let setStatus be Set(A, "length", n, true).
// 17. ReturnIfAbrupt(setStatus).
- CallRuntime(Runtime::kSetProperty, context, a,
- HeapConstant(isolate()->factory()->length_string()), n.value(),
- SmiConstant(static_cast<int>(LanguageMode::kStrict)));
-
+ SetPropertyStrict(context, CAST(a), CodeStubAssembler::LengthStringConstant(),
+ CAST(n.value()));
args.PopAndReturn(a);
}
@@ -1601,35 +1612,39 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
BIND(&fast_elements_tagged);
{
- Node* value = LoadFixedArrayElement(elements, 0);
- BuildFastLoop(IntPtrConstant(0), new_length,
- [&](Node* index) {
- StoreFixedArrayElement(
- elements, index,
- LoadFixedArrayElement(
- elements, IntPtrAdd(index, IntPtrConstant(1))));
- },
- 1, ParameterMode::INTPTR_PARAMETERS,
- IndexAdvanceMode::kPost);
- StoreFixedArrayElement(elements, new_length, TheHoleConstant());
+ TNode<FixedArray> elements_fixed_array = CAST(elements);
+ Node* value = LoadFixedArrayElement(elements_fixed_array, 0);
+ BuildFastLoop(
+ IntPtrConstant(0), new_length,
+ [&](Node* index) {
+ StoreFixedArrayElement(
+ elements_fixed_array, index,
+ LoadFixedArrayElement(elements_fixed_array,
+ IntPtrAdd(index, IntPtrConstant(1))));
+ },
+ 1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ StoreFixedArrayElement(elements_fixed_array, new_length,
+ TheHoleConstant());
GotoIf(WordEqual(value, TheHoleConstant()), &return_undefined);
args.PopAndReturn(value);
}
BIND(&fast_elements_smi);
{
- Node* value = LoadFixedArrayElement(elements, 0);
- BuildFastLoop(IntPtrConstant(0), new_length,
- [&](Node* index) {
- StoreFixedArrayElement(
- elements, index,
- LoadFixedArrayElement(
- elements, IntPtrAdd(index, IntPtrConstant(1))),
- SKIP_WRITE_BARRIER);
- },
- 1, ParameterMode::INTPTR_PARAMETERS,
- IndexAdvanceMode::kPost);
- StoreFixedArrayElement(elements, new_length, TheHoleConstant());
+ TNode<FixedArray> elements_fixed_array = CAST(elements);
+ Node* value = LoadFixedArrayElement(elements_fixed_array, 0);
+ BuildFastLoop(
+ IntPtrConstant(0), new_length,
+ [&](Node* index) {
+ StoreFixedArrayElement(
+ elements_fixed_array, index,
+ LoadFixedArrayElement(elements_fixed_array,
+ IntPtrAdd(index, IntPtrConstant(1))),
+ SKIP_WRITE_BARRIER);
+ },
+ 1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ StoreFixedArrayElement(elements_fixed_array, new_length,
+ TheHoleConstant());
GotoIf(WordEqual(value, TheHoleConstant()), &return_undefined);
args.PopAndReturn(value);
}
@@ -1995,9 +2010,8 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
BIND(&runtime);
{
- CallRuntime(Runtime::kSetProperty, context, static_cast<Node*>(array),
- CodeStubAssembler::LengthStringConstant(), length,
- SmiConstant(LanguageMode::kStrict));
+ SetPropertyStrict(context, array,
+ CodeStubAssembler::LengthStringConstant(), length);
Goto(&done);
}
@@ -2034,7 +2048,7 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
TNode<Object> items = args.GetOptionalArgumentValue(0);
// The spec doesn't require ToObject to be called directly on the iterable
// branch, but it's part of GetMethod that is in the spec.
- TNode<JSReceiver> array_like = ToObject(context, items);
+ TNode<JSReceiver> array_like = ToObject_Inline(context, items);
TVARIABLE(Object, array);
TVARIABLE(Number, length);
@@ -3097,7 +3111,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
{
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ Node* element_k = LoadFixedArrayElement(CAST(elements), index_var.value());
GotoIf(WordEqual(element_k, search_element), &return_found);
Increment(&index_var);
@@ -3109,7 +3123,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ Node* element_k = LoadFixedArrayElement(CAST(elements), index_var.value());
GotoIf(IsUndefined(element_k), &return_found);
GotoIf(IsTheHole(element_k), &return_found);
@@ -3128,7 +3142,8 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
Label continue_loop(this), not_smi(this);
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ Node* element_k =
+ LoadFixedArrayElement(CAST(elements), index_var.value());
GotoIfNot(TaggedIsSmi(element_k), &not_smi);
Branch(Float64Equal(search_num.value(), SmiToFloat64(element_k)),
&return_found, &continue_loop);
@@ -3149,7 +3164,8 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
Label continue_loop(this);
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ Node* element_k =
+ LoadFixedArrayElement(CAST(elements), index_var.value());
GotoIf(TaggedIsSmi(element_k), &continue_loop);
GotoIfNot(IsHeapNumber(CAST(element_k)), &continue_loop);
BranchIfFloat64IsNaN(LoadHeapNumberValue(element_k), &return_found,
@@ -3172,7 +3188,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
BIND(&next_iteration);
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ Node* element_k = LoadFixedArrayElement(CAST(elements), index_var.value());
GotoIf(TaggedIsSmi(element_k), &continue_loop);
GotoIf(WordEqual(search_element_string, element_k), &return_found);
Node* element_k_type = LoadInstanceType(element_k);
@@ -3200,7 +3216,7 @@ void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ Node* element_k = LoadFixedArrayElement(CAST(elements), index_var.value());
Label continue_loop(this);
GotoIf(TaggedIsSmi(element_k), &continue_loop);
GotoIfNot(IsBigInt(CAST(element_k)), &continue_loop);
@@ -3475,7 +3491,7 @@ TF_BUILTIN(ArrayIndexOfHoleyDoubles, ArrayIncludesIndexofAssembler) {
TF_BUILTIN(ArrayPrototypeValues, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Return(CreateArrayIterator(context, ToObject(context, receiver),
+ Return(CreateArrayIterator(context, ToObject_Inline(context, receiver),
IterationKind::kValues));
}
@@ -3483,7 +3499,7 @@ TF_BUILTIN(ArrayPrototypeValues, CodeStubAssembler) {
TF_BUILTIN(ArrayPrototypeEntries, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Return(CreateArrayIterator(context, ToObject(context, receiver),
+ Return(CreateArrayIterator(context, ToObject_Inline(context, receiver),
IterationKind::kEntries));
}
@@ -3491,7 +3507,7 @@ TF_BUILTIN(ArrayPrototypeEntries, CodeStubAssembler) {
TF_BUILTIN(ArrayPrototypeKeys, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Return(CreateArrayIterator(context, ToObject(context, receiver),
+ Return(CreateArrayIterator(context, ToObject_Inline(context, receiver),
IterationKind::kKeys));
}
@@ -3502,147 +3518,122 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* iterator = Parameter(Descriptor::kReceiver);
- VARIABLE(var_value, MachineRepresentation::kTagged);
- VARIABLE(var_done, MachineRepresentation::kTagged);
+ VARIABLE(var_done, MachineRepresentation::kTagged, TrueConstant());
+ VARIABLE(var_value, MachineRepresentation::kTagged, UndefinedConstant());
- // Required, or else `throw_bad_receiver` fails a DCHECK due to these
- // variables not being bound along all paths, despite not being used.
- var_done.Bind(TrueConstant());
- var_value.Bind(UndefinedConstant());
-
- Label throw_bad_receiver(this, Label::kDeferred);
- Label set_done(this);
Label allocate_entry_if_needed(this);
Label allocate_iterator_result(this);
+ Label if_typedarray(this), if_other(this, Label::kDeferred), if_array(this),
+ if_generic(this, Label::kDeferred);
+ Label set_done(this, Label::kDeferred);
// If O does not have all of the internal slots of an Array Iterator Instance
// (22.1.5.3), throw a TypeError exception
- GotoIf(TaggedIsSmi(iterator), &throw_bad_receiver);
- GotoIfNot(IsJSArrayIterator(CAST(iterator)), &throw_bad_receiver);
+ ThrowIfNotInstanceType(context, iterator, JS_ARRAY_ITERATOR_TYPE,
+ method_name);
// Let a be O.[[IteratedObject]].
- Node* array =
- LoadObjectField(iterator, JSArrayIterator::kIteratedObjectOffset);
+ TNode<JSReceiver> array =
+ CAST(LoadObjectField(iterator, JSArrayIterator::kIteratedObjectOffset));
// Let index be O.[[ArrayIteratorNextIndex]].
- Node* index = LoadObjectField(iterator, JSArrayIterator::kNextIndexOffset);
- Node* array_map = LoadMap(array);
-
- Label if_detached(this, Label::kDeferred);
-
- Label if_typedarray(this), if_other(this, Label::kDeferred), if_array(this),
- if_generic(this, Label::kDeferred);
+ TNode<Number> index =
+ CAST(LoadObjectField(iterator, JSArrayIterator::kNextIndexOffset));
+ CSA_ASSERT(this, IsNumberNonNegativeSafeInteger(index));
- Node* array_type = LoadInstanceType(array);
+ // Dispatch based on the type of the {array}.
+ TNode<Map> array_map = LoadMap(array);
+ TNode<Int32T> array_type = LoadMapInstanceType(array_map);
GotoIf(InstanceTypeEqual(array_type, JS_ARRAY_TYPE), &if_array);
Branch(InstanceTypeEqual(array_type, JS_TYPED_ARRAY_TYPE), &if_typedarray,
&if_other);
BIND(&if_array);
{
- // We can only handle fast elements here.
- Node* elements_kind = LoadMapElementsKind(array_map);
- GotoIfNot(IsFastElementsKind(elements_kind), &if_other);
-
- TNode<Smi> length = CAST(LoadJSArrayLength(array));
+ // If {array} is a JSArray, then the {index} must be in Unsigned32 range.
+ CSA_ASSERT(this, IsNumberArrayIndex(index));
+
+ // Check that the {index} is within range for the {array}. We handle all
+ // kinds of JSArray's here, so we do the computation on Uint32.
+ TNode<Uint32T> index32 = ChangeNumberToUint32(index);
+ TNode<Uint32T> length32 =
+ ChangeNumberToUint32(LoadJSArrayLength(CAST(array)));
+ GotoIfNot(Uint32LessThan(index32, length32), &set_done);
+ StoreObjectField(
+ iterator, JSArrayIterator::kNextIndexOffset,
+ ChangeUint32ToTagged(Unsigned(Int32Add(index32, Int32Constant(1)))));
- GotoIfNot(SmiBelow(CAST(index), length), &set_done);
-
- var_value.Bind(index);
- TNode<Smi> one = SmiConstant(1);
- StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
- SmiAdd(CAST(index), one));
var_done.Bind(FalseConstant());
+ var_value.Bind(index);
GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField(
iterator, JSArrayIterator::kKindOffset),
Int32Constant(static_cast<int>(IterationKind::kKeys))),
&allocate_iterator_result);
- Node* elements = LoadElements(array);
- Label if_packed(this), if_holey(this), if_packed_double(this),
- if_holey_double(this), if_unknown_kind(this, Label::kDeferred);
- int32_t kinds[] = {// Handled by if_packed.
- PACKED_SMI_ELEMENTS, PACKED_ELEMENTS,
- // Handled by if_holey.
- HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS,
- // Handled by if_packed_double.
- PACKED_DOUBLE_ELEMENTS,
- // Handled by if_holey_double.
- HOLEY_DOUBLE_ELEMENTS};
- Label* labels[] = {// PACKED_{SMI,}_ELEMENTS
- &if_packed, &if_packed,
- // HOLEY_{SMI,}_ELEMENTS
- &if_holey, &if_holey,
- // PACKED_DOUBLE_ELEMENTS
- &if_packed_double,
- // HOLEY_DOUBLE_ELEMENTS
- &if_holey_double};
- Switch(elements_kind, &if_unknown_kind, kinds, labels, arraysize(kinds));
-
- BIND(&if_packed);
- {
- var_value.Bind(LoadFixedArrayElement(elements, index, 0, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
-
- BIND(&if_holey);
- {
- Node* element = LoadFixedArrayElement(elements, index, 0, SMI_PARAMETERS);
- var_value.Bind(element);
- GotoIfNot(WordEqual(element, TheHoleConstant()),
- &allocate_entry_if_needed);
- GotoIf(IsNoElementsProtectorCellInvalid(), &if_generic);
- var_value.Bind(UndefinedConstant());
- Goto(&allocate_entry_if_needed);
- }
-
- BIND(&if_packed_double);
- {
- Node* value = LoadFixedDoubleArrayElement(
- elements, index, MachineType::Float64(), 0, SMI_PARAMETERS);
- var_value.Bind(AllocateHeapNumberWithValue(value));
- Goto(&allocate_entry_if_needed);
- }
+ Label if_hole(this, Label::kDeferred);
+ TNode<Int32T> elements_kind = LoadMapElementsKind(array_map);
+ TNode<FixedArrayBase> elements = LoadElements(CAST(array));
+ var_value.Bind(LoadFixedArrayBaseElementAsTagged(
+ elements, Signed(ChangeUint32ToWord(index32)), elements_kind,
+ &if_generic, &if_hole));
+ Goto(&allocate_entry_if_needed);
- BIND(&if_holey_double);
+ BIND(&if_hole);
{
- Label if_hole(this, Label::kDeferred);
- Node* value = LoadFixedDoubleArrayElement(
- elements, index, MachineType::Float64(), 0, SMI_PARAMETERS, &if_hole);
- var_value.Bind(AllocateHeapNumberWithValue(value));
- Goto(&allocate_entry_if_needed);
- BIND(&if_hole);
GotoIf(IsNoElementsProtectorCellInvalid(), &if_generic);
var_value.Bind(UndefinedConstant());
Goto(&allocate_entry_if_needed);
}
-
- BIND(&if_unknown_kind);
- Unreachable();
}
BIND(&if_other);
{
- // If a is undefined, return CreateIterResultObject(undefined, true)
- GotoIf(IsUndefined(array), &allocate_iterator_result);
+ // We cannot enter here with either JSArray's or JSTypedArray's.
+ CSA_ASSERT(this, Word32BinaryNot(IsJSArray(array)));
+ CSA_ASSERT(this, Word32BinaryNot(IsJSTypedArray(array)));
- Node* length =
+ // Check that the {index} is within the bounds of the {array}s "length".
+ TNode<Number> length = CAST(
CallBuiltin(Builtins::kToLength, context,
- GetProperty(context, array, factory()->length_string()));
-
+ GetProperty(context, array, factory()->length_string())));
GotoIfNumberGreaterThanOrEqual(index, length, &set_done);
-
- var_value.Bind(index);
StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
NumberInc(index));
+
var_done.Bind(FalseConstant());
+ var_value.Bind(index);
- GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField(
+ Branch(Word32Equal(LoadAndUntagToWord32ObjectField(
iterator, JSArrayIterator::kKindOffset),
Int32Constant(static_cast<int>(IterationKind::kKeys))),
- &allocate_iterator_result);
- Goto(&if_generic);
+ &allocate_iterator_result, &if_generic);
+ }
+
+ BIND(&set_done);
+ {
+ // Change the [[ArrayIteratorNextIndex]] such that the {iterator} will
+ // never produce values anymore, because it will always fail the bounds
+ // check. Note that this is different from what the specification does,
+ // which is changing the [[IteratedObject]] to undefined, because leaving
+ // [[IteratedObject]] alone helps TurboFan to generate better code with
+ // the inlining in JSCallReducer::ReduceArrayIteratorPrototypeNext().
+ //
+ // The terminal value we chose here depends on the type of the {array},
+ // for JSArray's we use kMaxUInt32 so that TurboFan can always use
+ // Word32 representation for fast-path indices (and this is safe since
+ // the "length" of JSArray's is limited to Unsigned32 range). For other
+ // JSReceiver's we have to use kMaxSafeInteger, since the "length" can
+ // be any arbitrary value in the safe integer range.
+ //
+ // Note specifically that JSTypedArray's will never take this path, so
+ // we don't need to worry about their maximum value.
+ CSA_ASSERT(this, Word32BinaryNot(IsJSTypedArray(array)));
+ TNode<Number> max_length =
+ SelectConstant(IsJSArray(array), NumberConstant(kMaxUInt32),
+ NumberConstant(kMaxSafeInteger));
+ StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset, max_length);
+ Goto(&allocate_iterator_result);
}
BIND(&if_generic);
@@ -3653,77 +3644,41 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&if_typedarray);
{
- Node* buffer = LoadObjectField(array, JSTypedArray::kBufferOffset);
- GotoIf(IsDetachedBuffer(buffer), &if_detached);
+ // If {array} is a JSTypedArray, the {index} must always be a Smi.
+ CSA_ASSERT(this, TaggedIsSmi(index));
- TNode<Smi> length = LoadTypedArrayLength(CAST(array));
+ // Check that the {array}s buffer wasn't neutered.
+ ThrowIfArrayBufferViewBufferIsDetached(context, CAST(array), method_name);
- GotoIfNot(SmiBelow(CAST(index), length), &set_done);
-
- var_value.Bind(index);
- TNode<Smi> one = SmiConstant(1);
+ // If we go outside of the {length}, we don't need to update the
+ // [[ArrayIteratorNextIndex]] anymore, since a JSTypedArray's
+ // length cannot change anymore, so this {iterator} will never
+ // produce values again anyways.
+ TNode<Smi> length = LoadTypedArrayLength(CAST(array));
+ GotoIfNot(SmiBelow(CAST(index), length), &allocate_iterator_result);
StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
- SmiAdd(CAST(index), one));
+ SmiInc(CAST(index)));
+
var_done.Bind(FalseConstant());
+ var_value.Bind(index);
GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField(
iterator, JSArrayIterator::kKindOffset),
Int32Constant(static_cast<int>(IterationKind::kKeys))),
&allocate_iterator_result);
- Node* elements_kind = LoadMapElementsKind(array_map);
- Node* elements = LoadElements(array);
+ TNode<Int32T> elements_kind = LoadMapElementsKind(array_map);
+ Node* elements = LoadElements(CAST(array));
Node* base_ptr =
LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
Node* external_ptr =
LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
MachineType::Pointer());
- Node* data_ptr = IntPtrAdd(BitcastTaggedToWord(base_ptr), external_ptr);
-
- Label if_unknown_type(this, Label::kDeferred);
- int32_t elements_kinds[] = {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) TYPE##_ELEMENTS,
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- };
-
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- Label if_##type##array(this);
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
- Label* elements_kind_labels[] = {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) &if_##type##array,
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- };
- STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels));
-
- Switch(elements_kind, &if_unknown_type, elements_kinds,
- elements_kind_labels, arraysize(elements_kinds));
-
- BIND(&if_unknown_type);
- Unreachable();
-
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- BIND(&if_##type##array); \
- { \
- var_value.Bind(LoadFixedTypedArrayElementAsTagged( \
- data_ptr, index, TYPE##_ELEMENTS, SMI_PARAMETERS)); \
- Goto(&allocate_entry_if_needed); \
- }
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
- BIND(&if_detached);
- ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
- }
-
- BIND(&set_done);
- {
- StoreObjectFieldNoWriteBarrier(
- iterator, JSArrayIterator::kIteratedObjectOffset, UndefinedConstant());
- Goto(&allocate_iterator_result);
+ TNode<WordT> data_ptr =
+ IntPtrAdd(BitcastTaggedToWord(base_ptr), external_ptr);
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged(data_ptr, CAST(index),
+ elements_kind));
+ Goto(&allocate_entry_if_needed);
}
BIND(&allocate_entry_if_needed);
@@ -3733,48 +3688,17 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
Int32Constant(static_cast<int>(IterationKind::kValues))),
&allocate_iterator_result);
- Node* elements = AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
- StoreFixedArrayElement(elements, 0, index, SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(elements, 1, var_value.value(), SKIP_WRITE_BARRIER);
-
- Node* entry = Allocate(JSArray::kSize);
- Node* map = LoadContextElement(LoadNativeContext(context),
- Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
-
- StoreMapNoWriteBarrier(entry, map);
- StoreObjectFieldRoot(entry, JSArray::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldNoWriteBarrier(entry, JSArray::kElementsOffset, elements);
- StoreObjectFieldNoWriteBarrier(entry, JSArray::kLengthOffset,
- SmiConstant(2));
-
- var_value.Bind(entry);
- Goto(&allocate_iterator_result);
+ Node* result =
+ AllocateJSIteratorResultForEntry(context, index, var_value.value());
+ Return(result);
}
BIND(&allocate_iterator_result);
{
- Node* result = Allocate(JSIteratorResult::kSize);
- Node* map = LoadContextElement(LoadNativeContext(context),
- Context::ITERATOR_RESULT_MAP_INDEX);
- StoreMapNoWriteBarrier(result, map);
- StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOrHashOffset,
- Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset,
- var_value.value());
- StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset,
- var_done.value());
+ Node* result =
+ AllocateJSIteratorResult(context, var_value.value(), var_done.value());
Return(result);
}
-
- BIND(&throw_bad_receiver);
- {
- // The {receiver} is not a valid JSArrayIterator.
- ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
- StringConstant(method_name), iterator);
- }
}
namespace {
@@ -3817,7 +3741,7 @@ class ArrayFlattenAssembler : public CodeStubAssembler {
CSA_ASSERT(this,
SmiGreaterThanOrEqual(CAST(source_index), SmiConstant(0)));
Node* const exists =
- HasProperty(source, source_index, context, kHasProperty);
+ HasProperty(context, source, source_index, kHasProperty);
// c. If exists is true, then
Label next(this);
@@ -3959,7 +3883,7 @@ TF_BUILTIN(ArrayPrototypeFlat, CodeStubAssembler) {
Node* const depth = args.GetOptionalArgumentValue(0);
// 1. Let O be ? ToObject(this value).
- Node* const o = ToObject(context, receiver);
+ Node* const o = ToObject_Inline(CAST(context), CAST(receiver));
// 2. Let sourceLen be ? ToLength(? Get(O, "length")).
Node* const source_length =
@@ -4002,7 +3926,7 @@ TF_BUILTIN(ArrayPrototypeFlatMap, CodeStubAssembler) {
Node* const mapper_function = args.GetOptionalArgumentValue(0);
// 1. Let O be ? ToObject(this value).
- Node* const o = ToObject(context, receiver);
+ Node* const o = ToObject_Inline(CAST(context), CAST(receiver));
// 2. Let sourceLen be ? ToLength(? Get(O, "length")).
Node* const source_length =
diff --git a/deps/v8/src/builtins/builtins-array-gen.h b/deps/v8/src/builtins/builtins-array-gen.h
index 92b32115a0..a73c072cee 100644
--- a/deps/v8/src/builtins/builtins-array-gen.h
+++ b/deps/v8/src/builtins/builtins-array-gen.h
@@ -68,14 +68,9 @@ class ArrayBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
void NullPostLoopAction();
- // TODO(szuend): Remove once overload resolution is fixed in Torque.
- TNode<Object> LoadFixedArrayElementInt(TNode<FixedArray> array, int index) {
- return LoadFixedArrayElement(array, index);
- }
- Node* StoreFixedArrayElementInt(TNode<FixedArray> array, int index,
- TNode<Object> value) {
- return StoreFixedArrayElement(array, index, value);
- }
+ // Uses memset to effectively initialize the given FixedArray with Smi zeroes.
+ void FillFixedArrayWithSmiZero(TNode<FixedArray> array,
+ TNode<Smi> smi_length);
protected:
TNode<Context> context() { return context_; }
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 5154b904f5..ceeee5f37d 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -2,19 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/builtins/builtins-utils.h"
-
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/contexts.h"
#include "src/counters.h"
-#include "src/elements.h"
+#include "src/debug/debug.h"
+#include "src/elements-inl.h"
#include "src/global-handles.h"
#include "src/isolate.h"
#include "src/lookup.h"
#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/prototype.h"
namespace v8 {
@@ -79,11 +80,14 @@ inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver* receiver) {
}
// Returns |false| if not applicable.
+// TODO(szuend): Refactor this function because it is getting hard to
+// understand what each call-site actually checks.
V8_WARN_UNUSED_RESULT
inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
Handle<Object> receiver,
BuiltinArguments* args,
- int first_added_arg) {
+ int first_arg_index,
+ int num_arguments) {
if (!receiver->IsJSArray()) return false;
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
ElementsKind origin_kind = array->GetElementsKind();
@@ -102,13 +106,14 @@ inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
// Need to ensure that the arguments passed in args can be contained in
// the array.
int args_length = args->length();
- if (first_added_arg >= args_length) return true;
+ if (first_arg_index >= args_length) return true;
if (IsObjectElementsKind(origin_kind)) return true;
ElementsKind target_kind = origin_kind;
{
DisallowHeapAllocation no_gc;
- for (int i = first_added_arg; i < args_length; i++) {
+ int last_arg_index = std::min(first_arg_index + num_arguments, args_length);
+ for (int i = first_arg_index; i < last_arg_index; i++) {
Object* arg = (*args)[i];
if (arg->IsHeapObject()) {
if (arg->IsHeapNumber()) {
@@ -142,6 +147,164 @@ V8_WARN_UNUSED_RESULT static Object* CallJsIntrinsic(
Execution::Call(isolate, function, args.receiver(), argc, argv.start()));
}
+// If |index| is Undefined, returns init_if_undefined.
+// If |index| is negative, returns length + index.
+// If |index| is positive, returns index.
+// Returned value is guaranteed to be in the interval of [0, length].
+V8_WARN_UNUSED_RESULT Maybe<double> GetRelativeIndex(Isolate* isolate,
+ double length,
+ Handle<Object> index,
+ double init_if_undefined) {
+ double relative_index = init_if_undefined;
+ if (!index->IsUndefined()) {
+ Handle<Object> relative_index_obj;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, relative_index_obj,
+ Object::ToInteger(isolate, index),
+ Nothing<double>());
+ relative_index = relative_index_obj->Number();
+ }
+
+ if (relative_index < 0) {
+ return Just(std::max(length + relative_index, 0.0));
+ }
+
+ return Just(std::min(relative_index, length));
+}
+
+// Returns "length", has "fast-path" for JSArrays.
+V8_WARN_UNUSED_RESULT Maybe<double> GetLengthProperty(
+ Isolate* isolate, Handle<JSReceiver> receiver) {
+ if (receiver->IsJSArray()) {
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ double length = array->length()->Number();
+ DCHECK(0 <= length && length <= kMaxSafeInteger);
+
+ return Just(length);
+ }
+
+ Handle<Object> raw_length_number;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, raw_length_number,
+ Object::GetLengthFromArrayLike(isolate, receiver), Nothing<double>());
+ return Just(raw_length_number->Number());
+}
+
+V8_WARN_UNUSED_RESULT Object* GenericArrayFill(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<Object> value,
+ double start, double end) {
+ // 7. Repeat, while k < final.
+ while (start < end) {
+ // a. Let Pk be ! ToString(k).
+ Handle<String> index = isolate->factory()->NumberToString(
+ isolate->factory()->NewNumber(start));
+
+ // b. Perform ? Set(O, Pk, value, true).
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, Object::SetPropertyOrElement(isolate, receiver, index, value,
+ LanguageMode::kStrict));
+
+ // c. Increase k by 1.
+ ++start;
+ }
+
+ // 8. Return O.
+ return *receiver;
+}
+
+V8_WARN_UNUSED_RESULT bool TryFastArrayFill(
+ Isolate* isolate, BuiltinArguments* args, Handle<JSReceiver> receiver,
+ Handle<Object> value, double start_index, double end_index) {
+ // If indices are too large, use generic path since they are stored as
+ // properties, not in the element backing store.
+ if (end_index > kMaxUInt32) return false;
+ if (!receiver->IsJSObject()) return false;
+
+ if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, args, 1, 1)) {
+ return false;
+ }
+
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+
+ // If no argument was provided, we fill the array with 'undefined'.
+ // EnsureJSArrayWith... does not handle that case so we do it here.
+ // TODO(szuend): Pass target elements kind to EnsureJSArrayWith... when
+ // it gets refactored.
+ if (args->length() == 1 && array->GetElementsKind() != PACKED_ELEMENTS) {
+ // Use a short-lived HandleScope to avoid creating several copies of the
+ // elements handle which would cause issues when left-trimming later-on.
+ HandleScope scope(isolate);
+ JSObject::TransitionElementsKind(array, PACKED_ELEMENTS);
+ }
+
+ DCHECK_LE(start_index, kMaxUInt32);
+ DCHECK_LE(end_index, kMaxUInt32);
+
+ uint32_t start, end;
+ CHECK(DoubleToUint32IfEqualToSelf(start_index, &start));
+ CHECK(DoubleToUint32IfEqualToSelf(end_index, &end));
+
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ accessor->Fill(array, value, start, end);
+ return true;
+}
+} // namespace
+
+BUILTIN(ArrayPrototypeFill) {
+ HandleScope scope(isolate);
+
+ if (isolate->debug_execution_mode() == DebugInfo::kSideEffects) {
+ if (!isolate->debug()->PerformSideEffectCheckForObject(args.receiver())) {
+ return ReadOnlyRoots(isolate).exception();
+ }
+ }
+
+ // 1. Let O be ? ToObject(this value).
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, receiver, Object::ToObject(isolate, args.receiver()));
+
+ // 2. Let len be ? ToLength(? Get(O, "length")).
+ double length;
+ MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, length, GetLengthProperty(isolate, receiver));
+
+ // 3. Let relativeStart be ? ToInteger(start).
+ // 4. If relativeStart < 0, let k be max((len + relativeStart), 0);
+ // else let k be min(relativeStart, len).
+ Handle<Object> start = args.atOrUndefined(isolate, 2);
+
+ double start_index;
+ MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, start_index, GetRelativeIndex(isolate, length, start, 0));
+
+ // 5. If end is undefined, let relativeEnd be len;
+ // else let relativeEnd be ? ToInteger(end).
+ // 6. If relativeEnd < 0, let final be max((len + relativeEnd), 0);
+ // else let final be min(relativeEnd, len).
+ Handle<Object> end = args.atOrUndefined(isolate, 3);
+
+ double end_index;
+ MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, end_index, GetRelativeIndex(isolate, length, end, length));
+
+ if (start_index >= end_index) return *receiver;
+
+ // Ensure indexes are within array bounds
+ DCHECK_LE(0, start_index);
+ DCHECK_LE(start_index, end_index);
+ DCHECK_LE(end_index, length);
+
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+
+ if (TryFastArrayFill(isolate, &args, receiver, value, start_index,
+ end_index)) {
+ return *receiver;
+ }
+ return GenericArrayFill(isolate, receiver, value, start_index, end_index);
+}
+
+namespace {
V8_WARN_UNUSED_RESULT Object* GenericArrayPush(Isolate* isolate,
BuiltinArguments* args) {
// 1. Let O be ? ToObject(this value).
@@ -153,7 +316,7 @@ V8_WARN_UNUSED_RESULT Object* GenericArrayPush(Isolate* isolate,
Handle<Object> raw_length_number;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, raw_length_number,
- Object::GetLengthFromArrayLike(isolate, Handle<Object>::cast(receiver)));
+ Object::GetLengthFromArrayLike(isolate, receiver));
// 3. Let args be a List whose elements are, in left to right order,
// the arguments that were passed to this function invocation.
@@ -210,7 +373,8 @@ V8_WARN_UNUSED_RESULT Object* GenericArrayPush(Isolate* isolate,
BUILTIN(ArrayPush) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
- if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1)) {
+ if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1,
+ args.length() - 1)) {
return GenericArrayPush(isolate, &args);
}
@@ -293,7 +457,8 @@ V8_WARN_UNUSED_RESULT Object* GenericArrayPop(Isolate* isolate,
BUILTIN(ArrayPop) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
- if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0)) {
+ if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0,
+ 0)) {
return GenericArrayPop(isolate, &args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -324,7 +489,8 @@ BUILTIN(ArrayShift) {
HandleScope scope(isolate);
Heap* heap = isolate->heap();
Handle<Object> receiver = args.receiver();
- if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0) ||
+ if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0,
+ 0) ||
!IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
return CallJsIntrinsic(isolate, isolate->array_shift(), args);
}
@@ -344,7 +510,8 @@ BUILTIN(ArrayShift) {
BUILTIN(ArrayUnshift) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
- if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1)) {
+ if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1,
+ args.length() - 1)) {
return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -367,7 +534,8 @@ BUILTIN(ArraySplice) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
if (V8_UNLIKELY(
- !EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3) ||
+ !EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3,
+ args.length() - 3) ||
// If this is a subclass of Array, then call out to JS.
!Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) ||
// If anything with @@species has been messed with, call out to JS.
@@ -662,7 +830,7 @@ uint32_t EstimateElementCount(Isolate* isolate, Handle<JSArray> array) {
}
break;
}
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -733,7 +901,7 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
});
break;
}
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -958,7 +1126,7 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
}
case NO_ELEMENTS:
break;
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
return IterateElementsSlow(isolate, receiver, length, visitor);
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index 92b003a4e2..808c34e43b 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -2,11 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/conversions.h"
#include "src/counters.h"
+#include "src/maybe-handles-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
namespace v8 {
namespace internal {
@@ -61,20 +63,20 @@ BUILTIN(ArrayBufferConstructor) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
handle(target->shared()->Name(), isolate)));
- } else { // [[Construct]]
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- Handle<Object> length = args.atOrUndefined(isolate, 1);
-
- Handle<Object> number_length;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_length,
- Object::ToInteger(isolate, length));
- if (number_length->Number() < 0.0) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
+ }
+ // [[Construct]]
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ Handle<Object> length = args.atOrUndefined(isolate, 1);
+
+ Handle<Object> number_length;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_length,
+ Object::ToInteger(isolate, length));
+ if (number_length->Number() < 0.0) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
return ConstructBuffer(isolate, target, new_target, number_length, true);
- }
}
// This is a helper to construct an ArrayBuffer with uinitialized memory.
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index 7fbfbdd494..cf5e18f6a0 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -7,6 +7,7 @@
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/objects-inl.h"
+#include "src/objects/js-generator.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index c7e3c5cdeb..4568507a9f 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -123,8 +123,8 @@ Node* AsyncBuiltinsAssembler::Await(
// than a meaningful catch handler
Node* const key =
HeapConstant(factory()->promise_forwarding_handler_symbol());
- CallRuntime(Runtime::kSetProperty, context, on_reject, key,
- TrueConstant(), SmiConstant(LanguageMode::kStrict));
+ SetPropertyStrict(CAST(context), CAST(on_reject), CAST(key),
+ TrueConstant());
GotoIf(IsFalse(is_predicted_as_caught), &common);
PromiseSetHandledHint(value);
@@ -137,8 +137,8 @@ Node* AsyncBuiltinsAssembler::Await(
CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
Node* const key = HeapConstant(factory()->promise_handled_by_symbol());
- CallRuntime(Runtime::kSetProperty, context, throwaway, key, outer_promise,
- SmiConstant(LanguageMode::kStrict));
+ SetPropertyStrict(CAST(context), CAST(throwaway), CAST(key),
+ CAST(outer_promise));
}
Goto(&do_perform_promise_then);
@@ -237,8 +237,8 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(
// than a meaningful catch handler
Node* const key =
HeapConstant(factory()->promise_forwarding_handler_symbol());
- CallRuntime(Runtime::kSetProperty, context, on_reject, key,
- TrueConstant(), SmiConstant(LanguageMode::kStrict));
+ SetPropertyStrict(CAST(context), CAST(on_reject), CAST(key),
+ TrueConstant());
GotoIf(IsFalse(is_predicted_as_caught), &common);
PromiseSetHandledHint(value);
@@ -251,8 +251,8 @@ Node* AsyncBuiltinsAssembler::AwaitOptimized(
CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
Node* const key = HeapConstant(factory()->promise_handled_by_symbol());
- CallRuntime(Runtime::kSetProperty, context, throwaway, key, outer_promise,
- SmiConstant(LanguageMode::kStrict));
+ SetPropertyStrict(CAST(context), CAST(throwaway), CAST(key),
+ CAST(outer_promise));
}
Goto(&do_perform_promise_then);
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index 251ff1ee40..bbb2571691 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -8,6 +8,7 @@
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/frames-inl.h"
+#include "src/objects/js-generator.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
index 6bc1a911f7..2800441ffc 100644
--- a/deps/v8/src/builtins/builtins-bigint.cc
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/conversions.h"
#include "src/counters.h"
@@ -13,26 +13,26 @@ namespace internal {
BUILTIN(BigIntConstructor) {
HandleScope scope(isolate);
- if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
- Handle<Object> value = args.atOrUndefined(isolate, 1);
-
- if (value->IsJSReceiver()) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, value,
- JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(value),
- ToPrimitiveHint::kNumber));
- }
-
- if (value->IsNumber()) {
- RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromNumber(isolate, value));
- } else {
- RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromObject(isolate, value));
- }
- } else { // [[Construct]]
+ if (!args.new_target()->IsUndefined(isolate)) { // [[Construct]]
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotConstructor,
isolate->factory()->BigInt_string()));
}
+ // [[Call]]
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+
+ if (value->IsJSReceiver()) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, value,
+ JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(value),
+ ToPrimitiveHint::kNumber));
+ }
+
+ if (value->IsNumber()) {
+ RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromNumber(isolate, value));
+ } else {
+ RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromObject(isolate, value));
+ }
}
BUILTIN(BigIntAsUintN) {
diff --git a/deps/v8/src/builtins/builtins-boolean.cc b/deps/v8/src/builtins/builtins-boolean.cc
index e0e1bb738c..52645cbaa0 100644
--- a/deps/v8/src/builtins/builtins-boolean.cc
+++ b/deps/v8/src/builtins/builtins-boolean.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/counters.h"
#include "src/objects-inl.h"
@@ -19,19 +19,18 @@ BUILTIN(BooleanConstructor) {
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
Handle<Object> value = args.atOrUndefined(isolate, 1);
return isolate->heap()->ToBoolean(value->BooleanValue(isolate));
- } else { // [[Construct]]
- HandleScope scope(isolate);
- Handle<Object> value = args.atOrUndefined(isolate, 1);
- Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- DCHECK(*target == target->native_context()->boolean_function());
- Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::New(target, new_target));
- Handle<JSValue>::cast(result)->set_value(
- isolate->heap()->ToBoolean(value->BooleanValue(isolate)));
- return *result;
}
+ // [[Construct]]
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ DCHECK(*target == target->native_context()->boolean_function());
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::New(target, new_target));
+ Handle<JSValue>::cast(result)->set_value(
+ isolate->heap()->ToBoolean(value->BooleanValue(isolate)));
+ return *result;
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index 35aaee5ec2..4defe28cb7 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -237,7 +237,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs(
TNode<Object> target, SloppyTNode<Object> new_target,
TNode<FixedDoubleArray> elements, TNode<Int32T> length,
TNode<Int32T> args_count, TNode<Context> context, TNode<Int32T> kind) {
- Label if_holey_double(this), if_packed_double(this), if_done(this);
+ Label if_done(this);
const ElementsKind new_kind = PACKED_ELEMENTS;
const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
@@ -245,27 +245,22 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs(
CSA_ASSERT(this, WordNotEqual(intptr_length, IntPtrConstant(0)));
// Allocate a new FixedArray of Objects.
- TNode<FixedArray> new_elements = AllocateFixedArray(
- new_kind, intptr_length, CodeStubAssembler::kAllowLargeObjectAllocation);
+ TNode<FixedArray> new_elements = CAST(AllocateFixedArray(
+ new_kind, intptr_length, CodeStubAssembler::kAllowLargeObjectAllocation));
Branch(Word32Equal(kind, Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
- &if_holey_double, &if_packed_double);
-
- BIND(&if_holey_double);
- {
- // Fill the FixedArray with pointers to HeapObjects.
- CopyFixedArrayElements(HOLEY_DOUBLE_ELEMENTS, elements, new_kind,
- new_elements, intptr_length, intptr_length,
- barrier_mode);
- Goto(&if_done);
- }
-
- BIND(&if_packed_double);
- {
- CopyFixedArrayElements(PACKED_DOUBLE_ELEMENTS, elements, new_kind,
- new_elements, intptr_length, intptr_length,
- barrier_mode);
- Goto(&if_done);
- }
+ [&] {
+ // Fill the FixedArray with pointers to HeapObjects.
+ CopyFixedArrayElements(HOLEY_DOUBLE_ELEMENTS, elements, new_kind,
+ new_elements, intptr_length, intptr_length,
+ barrier_mode);
+ Goto(&if_done);
+ },
+ [&] {
+ CopyFixedArrayElements(PACKED_DOUBLE_ELEMENTS, elements, new_kind,
+ new_elements, intptr_length, intptr_length,
+ barrier_mode);
+ Goto(&if_done);
+ });
BIND(&if_done);
{
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index ff04fa2dbe..c41626dfd4 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -2,13 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/builtins/builtins-utils.h"
-
#include "src/counters.h"
#include "src/objects-inl.h"
#include "src/objects/frame-array-inl.h"
-#include "src/string-builder.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index 6cab828e2e..5808d2a98c 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -119,7 +119,7 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
// Loads an element from a fixed array. If the element is the hole, returns
// `undefined`.
- TNode<Object> LoadAndNormalizeFixedArrayElement(TNode<HeapObject> elements,
+ TNode<Object> LoadAndNormalizeFixedArrayElement(TNode<FixedArray> elements,
TNode<IntPtrT> index);
// Loads an element from a fixed double array. If the element is the hole,
@@ -164,8 +164,7 @@ void BaseCollectionsAssembler::AddConstructorEntries(
Variant variant, TNode<Context> context, TNode<Context> native_context,
TNode<Object> collection, TNode<Object> initial_entries) {
TVARIABLE(BoolT, use_fast_loop,
- IsFastJSArrayWithNoCustomIteration(initial_entries, context,
- native_context));
+ IsFastJSArrayWithNoCustomIteration(initial_entries, context));
TNode<IntPtrT> at_least_space_for =
EstimatedInitialSize(initial_entries, use_fast_loop.value());
Label allocate_table(this, &use_fast_loop), exit(this), fast_loop(this),
@@ -186,8 +185,8 @@ void BaseCollectionsAssembler::AddConstructorEntries(
TNode<JSArray> initial_entries_jsarray =
UncheckedCast<JSArray>(initial_entries);
#if DEBUG
- CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(
- initial_entries_jsarray, context, native_context));
+ CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(initial_entries_jsarray,
+ context));
TNode<Map> original_initial_entries_map = LoadMap(initial_entries_jsarray);
#endif
@@ -228,8 +227,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
CSA_ASSERT(
this,
WordEqual(GetAddFunction(variant, native_context, collection), add_func));
- CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(fast_jsarray, context,
- native_context));
+ CSA_ASSERT(this, IsFastJSArrayWithNoCustomIteration(fast_jsarray, context));
TNode<IntPtrT> length = SmiUntag(LoadFastJSArrayLength(fast_jsarray));
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(length, IntPtrConstant(0)));
CSA_ASSERT(
@@ -247,7 +245,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
{
auto set_entry = [&](Node* index) {
TNode<Object> element = LoadAndNormalizeFixedArrayElement(
- elements, UncheckedCast<IntPtrT>(index));
+ CAST(elements), UncheckedCast<IntPtrT>(index));
AddConstructorEntry(variant, context, collection, add_func, element,
if_may_have_side_effects);
};
@@ -491,7 +489,7 @@ TNode<BoolT> BaseCollectionsAssembler::HasInitialCollectionPrototype(
}
TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedArrayElement(
- TNode<HeapObject> elements, TNode<IntPtrT> index) {
+ TNode<FixedArray> elements, TNode<IntPtrT> index) {
TNode<Object> element = LoadFixedArrayElement(elements, index);
return Select<Object>(IsTheHole(element), [=] { return UndefinedConstant(); },
[=] { return element; });
@@ -547,14 +545,18 @@ void BaseCollectionsAssembler::LoadKeyValue(
}
BIND(&if_one);
{
- *key = LoadAndNormalizeFixedArrayElement(elements, IntPtrConstant(0));
+ *key = LoadAndNormalizeFixedArrayElement(CAST(elements),
+ IntPtrConstant(0));
*value = UndefinedConstant();
Goto(&exit);
}
BIND(&if_two);
{
- *key = LoadAndNormalizeFixedArrayElement(elements, IntPtrConstant(0));
- *value = LoadAndNormalizeFixedArrayElement(elements, IntPtrConstant(1));
+ TNode<FixedArray> elements_fixed_array = CAST(elements);
+ *key = LoadAndNormalizeFixedArrayElement(elements_fixed_array,
+ IntPtrConstant(0));
+ *value = LoadAndNormalizeFixedArrayElement(elements_fixed_array,
+ IntPtrConstant(1));
Goto(&exit);
}
}
@@ -636,14 +638,15 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
typedef std::function<void(Node* const table, Node* const index)>
UpdateInTransition;
template <typename TableType>
- std::tuple<Node*, Node*> Transition(
- Node* const table, Node* const index,
+ std::pair<TNode<TableType>, TNode<IntPtrT>> Transition(
+ TNode<TableType> const table, TNode<IntPtrT> const index,
UpdateInTransition const& update_in_transition);
template <typename IteratorType, typename TableType>
- std::tuple<Node*, Node*> TransitionAndUpdate(Node* const iterator);
+ std::pair<TNode<TableType>, TNode<IntPtrT>> TransitionAndUpdate(
+ TNode<IteratorType> const iterator);
template <typename TableType>
- std::tuple<Node*, Node*, Node*> NextSkipHoles(Node* table, Node* index,
- Label* if_end);
+ std::tuple<TNode<Object>, TNode<IntPtrT>, TNode<IntPtrT>> NextSkipHoles(
+ TNode<TableType> table, TNode<IntPtrT> index, Label* if_end);
// Specialization for Smi.
// The {result} variable will contain the entry index if the key was found,
@@ -708,12 +711,13 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
Label* if_not_found);
Node* NormalizeNumberKey(Node* key);
- void StoreOrderedHashMapNewEntry(Node* const table, Node* const key,
- Node* const value, Node* const hash,
+ void StoreOrderedHashMapNewEntry(TNode<OrderedHashMap> const table,
+ Node* const key, Node* const value,
+ Node* const hash,
Node* const number_of_buckets,
Node* const occupancy);
- void StoreOrderedHashSetNewEntry(Node* const table, Node* const key,
- Node* const hash,
+ void StoreOrderedHashSetNewEntry(TNode<OrderedHashSet> const table,
+ Node* const key, Node* const hash,
Node* const number_of_buckets,
Node* const occupancy);
};
@@ -1020,7 +1024,8 @@ TF_BUILTIN(OrderedHashTableHealIndex, CollectionsBuiltinsAssembler) {
Node* i = var_i.value();
GotoIfNot(IntPtrLessThan(i, number_of_deleted_elements), &return_index);
TNode<Smi> removed_index = CAST(LoadFixedArrayElement(
- table, i, OrderedHashTableBase::kRemovedHolesIndex * kPointerSize));
+ CAST(table), i,
+ OrderedHashTableBase::kRemovedHolesIndex * kPointerSize));
GotoIf(SmiGreaterThanOrEqual(removed_index, index), &return_index);
Decrement(&var_index, 1, SMI_PARAMETERS);
Increment(&var_i);
@@ -1035,11 +1040,12 @@ TF_BUILTIN(OrderedHashTableHealIndex, CollectionsBuiltinsAssembler) {
}
template <typename TableType>
-std::tuple<Node*, Node*> CollectionsBuiltinsAssembler::Transition(
- Node* const table, Node* const index,
+std::pair<TNode<TableType>, TNode<IntPtrT>>
+CollectionsBuiltinsAssembler::Transition(
+ TNode<TableType> const table, TNode<IntPtrT> const index,
UpdateInTransition const& update_in_transition) {
- VARIABLE(var_index, MachineType::PointerRepresentation(), index);
- VARIABLE(var_table, MachineRepresentation::kTagged, table);
+ TVARIABLE(IntPtrT, var_index, index);
+ TVARIABLE(TableType, var_table, table);
Label if_done(this), if_transition(this, Label::kDeferred);
Branch(TaggedIsSmi(
LoadObjectField(var_table.value(), TableType::kNextTableOffset)),
@@ -1051,16 +1057,17 @@ std::tuple<Node*, Node*> CollectionsBuiltinsAssembler::Transition(
Goto(&loop);
BIND(&loop);
{
- Node* table = var_table.value();
- Node* index = var_index.value();
+ TNode<TableType> table = var_table.value();
+ TNode<IntPtrT> index = var_index.value();
- Node* next_table = LoadObjectField(table, TableType::kNextTableOffset);
+ TNode<Object> next_table =
+ LoadObjectField(table, TableType::kNextTableOffset);
GotoIf(TaggedIsSmi(next_table), &done_loop);
- var_table.Bind(next_table);
- var_index.Bind(SmiUntag(
+ var_table = CAST(next_table);
+ var_index = SmiUntag(
CAST(CallBuiltin(Builtins::kOrderedHashTableHealIndex,
- NoContextConstant(), table, SmiTag(index)))));
+ NoContextConstant(), table, SmiTag(index))));
Goto(&loop);
}
BIND(&done_loop);
@@ -1071,14 +1078,15 @@ std::tuple<Node*, Node*> CollectionsBuiltinsAssembler::Transition(
}
BIND(&if_done);
- return std::tuple<Node*, Node*>(var_table.value(), var_index.value());
+ return {var_table.value(), var_index.value()};
}
template <typename IteratorType, typename TableType>
-std::tuple<Node*, Node*> CollectionsBuiltinsAssembler::TransitionAndUpdate(
- Node* const iterator) {
+std::pair<TNode<TableType>, TNode<IntPtrT>>
+CollectionsBuiltinsAssembler::TransitionAndUpdate(
+ TNode<IteratorType> const iterator) {
return Transition<TableType>(
- LoadObjectField(iterator, IteratorType::kTableOffset),
+ CAST(LoadObjectField(iterator, IteratorType::kTableOffset)),
LoadAndUntagObjectField(iterator, IteratorType::kIndexOffset),
[this, iterator](Node* const table, Node* const index) {
// Update the {iterator} with the new state.
@@ -1089,21 +1097,23 @@ std::tuple<Node*, Node*> CollectionsBuiltinsAssembler::TransitionAndUpdate(
}
template <typename TableType>
-std::tuple<Node*, Node*, Node*> CollectionsBuiltinsAssembler::NextSkipHoles(
- Node* table, Node* index, Label* if_end) {
+std::tuple<TNode<Object>, TNode<IntPtrT>, TNode<IntPtrT>>
+CollectionsBuiltinsAssembler::NextSkipHoles(TNode<TableType> table,
+ TNode<IntPtrT> index,
+ Label* if_end) {
// Compute the used capacity for the {table}.
- Node* number_of_buckets =
+ TNode<IntPtrT> number_of_buckets =
LoadAndUntagObjectField(table, TableType::kNumberOfBucketsOffset);
- Node* number_of_elements =
+ TNode<IntPtrT> number_of_elements =
LoadAndUntagObjectField(table, TableType::kNumberOfElementsOffset);
- Node* number_of_deleted_elements =
+ TNode<IntPtrT> number_of_deleted_elements =
LoadAndUntagObjectField(table, TableType::kNumberOfDeletedElementsOffset);
- Node* used_capacity =
+ TNode<IntPtrT> used_capacity =
IntPtrAdd(number_of_elements, number_of_deleted_elements);
- Node* entry_key;
- Node* entry_start_position;
- VARIABLE(var_index, MachineType::PointerRepresentation(), index);
+ TNode<Object> entry_key;
+ TNode<IntPtrT> entry_start_position;
+ TVARIABLE(IntPtrT, var_index, index);
Label loop(this, &var_index), done_loop(this);
Goto(&loop);
BIND(&loop);
@@ -1120,8 +1130,8 @@ std::tuple<Node*, Node*, Node*> CollectionsBuiltinsAssembler::NextSkipHoles(
}
BIND(&done_loop);
- return std::tuple<Node*, Node*, Node*>(entry_key, entry_start_position,
- var_index.value());
+ return std::tuple<TNode<Object>, TNode<IntPtrT>, TNode<IntPtrT>>{
+ entry_key, entry_start_position, var_index.value()};
}
TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) {
@@ -1141,7 +1151,7 @@ TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) {
BIND(&if_found);
Return(LoadFixedArrayElement(
- table, SmiUntag(index),
+ CAST(table), SmiUntag(index),
(OrderedHashMap::kHashTableStartIndex + OrderedHashMap::kValueOffset) *
kPointerSize));
@@ -1198,7 +1208,8 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
key = NormalizeNumberKey(key);
- Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
+ TNode<OrderedHashMap> const table =
+ CAST(LoadObjectField(receiver, JSMap::kTableOffset));
VARIABLE(entry_start_position_or_hash, MachineType::PointerRepresentation(),
IntPtrConstant(0));
@@ -1232,7 +1243,7 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
BIND(&add_entry);
VARIABLE(number_of_buckets, MachineType::PointerRepresentation());
VARIABLE(occupancy, MachineType::PointerRepresentation());
- VARIABLE(table_var, MachineRepresentation::kTaggedPointer, table);
+ TVARIABLE(OrderedHashMap, table_var, table);
{
// Check we have enough space for the entry.
number_of_buckets.Bind(SmiUntag(CAST(
@@ -1250,7 +1261,7 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
// We do not have enough space, grow the table and reload the relevant
// fields.
CallRuntime(Runtime::kMapGrow, context, receiver);
- table_var.Bind(LoadObjectField(receiver, JSMap::kTableOffset));
+ table_var = CAST(LoadObjectField(receiver, JSMap::kTableOffset));
number_of_buckets.Bind(SmiUntag(CAST(LoadFixedArrayElement(
table_var.value(), OrderedHashMap::kNumberOfBucketsIndex))));
Node* const new_number_of_elements = SmiUntag(CAST(LoadObjectField(
@@ -1269,8 +1280,8 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
}
void CollectionsBuiltinsAssembler::StoreOrderedHashMapNewEntry(
- Node* const table, Node* const key, Node* const value, Node* const hash,
- Node* const number_of_buckets, Node* const occupancy) {
+ TNode<OrderedHashMap> const table, Node* const key, Node* const value,
+ Node* const hash, Node* const number_of_buckets, Node* const occupancy) {
Node* const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
Node* const bucket_entry = LoadFixedArrayElement(
@@ -1308,7 +1319,8 @@ TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) {
ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE,
"Map.prototype.delete");
- Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
+ TNode<OrderedHashMap> const table =
+ CAST(LoadObjectField(receiver, JSMap::kTableOffset));
VARIABLE(entry_start_position_or_hash, MachineType::PointerRepresentation(),
IntPtrConstant(0));
@@ -1368,7 +1380,8 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
key = NormalizeNumberKey(key);
- Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
+ TNode<OrderedHashSet> const table =
+ CAST(LoadObjectField(receiver, JSMap::kTableOffset));
VARIABLE(entry_start_position_or_hash, MachineType::PointerRepresentation(),
IntPtrConstant(0));
@@ -1398,7 +1411,7 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
BIND(&add_entry);
VARIABLE(number_of_buckets, MachineType::PointerRepresentation());
VARIABLE(occupancy, MachineType::PointerRepresentation());
- VARIABLE(table_var, MachineRepresentation::kTaggedPointer, table);
+ TVARIABLE(OrderedHashSet, table_var, table);
{
// Check we have enough space for the entry.
number_of_buckets.Bind(SmiUntag(CAST(
@@ -1416,7 +1429,7 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
// We do not have enough space, grow the table and reload the relevant
// fields.
CallRuntime(Runtime::kSetGrow, context, receiver);
- table_var.Bind(LoadObjectField(receiver, JSMap::kTableOffset));
+ table_var = CAST(LoadObjectField(receiver, JSMap::kTableOffset));
number_of_buckets.Bind(SmiUntag(CAST(LoadFixedArrayElement(
table_var.value(), OrderedHashSet::kNumberOfBucketsIndex))));
Node* const new_number_of_elements = SmiUntag(CAST(LoadObjectField(
@@ -1435,7 +1448,7 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
}
void CollectionsBuiltinsAssembler::StoreOrderedHashSetNewEntry(
- Node* const table, Node* const key, Node* const hash,
+ TNode<OrderedHashSet> const table, Node* const key, Node* const hash,
Node* const number_of_buckets, Node* const occupancy) {
Node* const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
@@ -1471,7 +1484,8 @@ TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) {
ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE,
"Set.prototype.delete");
- Node* const table = LoadObjectField(receiver, JSMap::kTableOffset);
+ TNode<OrderedHashSet> const table =
+ CAST(LoadObjectField(receiver, JSMap::kTableOffset));
VARIABLE(entry_start_position_or_hash, MachineType::PointerRepresentation(),
IntPtrConstant(0));
@@ -1552,23 +1566,23 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
GotoIf(TaggedIsSmi(callback), &callback_not_callable);
GotoIfNot(IsCallable(callback), &callback_not_callable);
- VARIABLE(var_index, MachineType::PointerRepresentation(), IntPtrConstant(0));
- VARIABLE(var_table, MachineRepresentation::kTagged,
- LoadObjectField(receiver, JSMap::kTableOffset));
+ TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
+ TVARIABLE(OrderedHashMap, var_table,
+ CAST(LoadObjectField(receiver, JSMap::kTableOffset)));
Label loop(this, {&var_index, &var_table}), done_loop(this);
Goto(&loop);
BIND(&loop);
{
// Transition {table} and {index} if there was any modification to
// the {receiver} while we're iterating.
- Node* index = var_index.value();
- Node* table = var_table.value();
+ TNode<IntPtrT> index = var_index.value();
+ TNode<OrderedHashMap> table = var_table.value();
std::tie(table, index) =
Transition<OrderedHashMap>(table, index, [](Node*, Node*) {});
// Read the next entry from the {table}, skipping holes.
- Node* entry_key;
- Node* entry_start_position;
+ TNode<Object> entry_key;
+ TNode<IntPtrT> entry_start_position;
std::tie(entry_key, entry_start_position, index) =
NextSkipHoles<OrderedHashMap>(table, index, &done_loop);
@@ -1584,8 +1598,8 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) {
entry_value, entry_key, receiver);
// Continue with the next entry.
- var_index.Bind(index);
- var_table.Bind(table);
+ var_index = index;
+ var_table = table;
Goto(&loop);
}
@@ -1644,14 +1658,14 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
return_end(this, Label::kDeferred);
// Transition the {receiver} table if necessary.
- Node* table;
- Node* index;
+ TNode<OrderedHashMap> table;
+ TNode<IntPtrT> index;
std::tie(table, index) =
- TransitionAndUpdate<JSMapIterator, OrderedHashMap>(receiver);
+ TransitionAndUpdate<JSMapIterator, OrderedHashMap>(CAST(receiver));
// Read the next entry from the {table}, skipping holes.
- Node* entry_key;
- Node* entry_start_position;
+ TNode<Object> entry_key;
+ TNode<IntPtrT> entry_start_position;
std::tie(entry_key, entry_start_position, index) =
NextSkipHoles<OrderedHashMap>(table, index, &return_end);
StoreObjectFieldNoWriteBarrier(receiver, JSMapIterator::kIndexOffset,
@@ -1783,17 +1797,17 @@ TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) {
GotoIf(TaggedIsSmi(callback), &callback_not_callable);
GotoIfNot(IsCallable(callback), &callback_not_callable);
- VARIABLE(var_index, MachineType::PointerRepresentation(), IntPtrConstant(0));
- VARIABLE(var_table, MachineRepresentation::kTagged,
- LoadObjectField(receiver, JSSet::kTableOffset));
+ TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
+ TVARIABLE(OrderedHashSet, var_table,
+ CAST(LoadObjectField(receiver, JSSet::kTableOffset)));
Label loop(this, {&var_index, &var_table}), done_loop(this);
Goto(&loop);
BIND(&loop);
{
// Transition {table} and {index} if there was any modification to
// the {receiver} while we're iterating.
- Node* index = var_index.value();
- Node* table = var_table.value();
+ TNode<IntPtrT> index = var_index.value();
+ TNode<OrderedHashSet> table = var_table.value();
std::tie(table, index) =
Transition<OrderedHashSet>(table, index, [](Node*, Node*) {});
@@ -1808,8 +1822,8 @@ TF_BUILTIN(SetPrototypeForEach, CollectionsBuiltinsAssembler) {
entry_key, receiver);
// Continue with the next entry.
- var_index.Bind(index);
- var_table.Bind(table);
+ var_index = index;
+ var_table = table;
Goto(&loop);
}
@@ -1858,10 +1872,10 @@ TF_BUILTIN(SetIteratorPrototypeNext, CollectionsBuiltinsAssembler) {
return_end(this, Label::kDeferred);
// Transition the {receiver} table if necessary.
- Node* table;
- Node* index;
+ TNode<OrderedHashSet> table;
+ TNode<IntPtrT> index;
std::tie(table, index) =
- TransitionAndUpdate<JSSetIterator, OrderedHashSet>(receiver);
+ TransitionAndUpdate<JSSetIterator, OrderedHashSet>(CAST(receiver));
// Read the next entry from the {table}, skipping holes.
Node* entry_key;
@@ -1968,7 +1982,7 @@ class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
: BaseCollectionsAssembler(state) {}
protected:
- void AddEntry(TNode<HeapObject> table, TNode<IntPtrT> key_index,
+ void AddEntry(TNode<EphemeronHashTable> table, TNode<IntPtrT> key_index,
TNode<Object> key, TNode<Object> value,
TNode<IntPtrT> number_of_elements);
@@ -2007,12 +2021,14 @@ class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
TNode<IntPtrT> number_of_deleted);
TNode<IntPtrT> KeyIndexFromEntry(TNode<IntPtrT> entry);
- TNode<IntPtrT> LoadNumberOfElements(TNode<HeapObject> table, int offset);
- TNode<IntPtrT> LoadNumberOfDeleted(TNode<HeapObject> table, int offset = 0);
- TNode<HeapObject> LoadTable(SloppyTNode<HeapObject> collection);
- TNode<IntPtrT> LoadTableCapacity(TNode<HeapObject> table);
+ TNode<IntPtrT> LoadNumberOfElements(TNode<EphemeronHashTable> table,
+ int offset);
+ TNode<IntPtrT> LoadNumberOfDeleted(TNode<EphemeronHashTable> table,
+ int offset = 0);
+ TNode<EphemeronHashTable> LoadTable(TNode<JSWeakCollection> collection);
+ TNode<IntPtrT> LoadTableCapacity(TNode<EphemeronHashTable> table);
- void RemoveEntry(TNode<HeapObject> table, TNode<IntPtrT> key_index,
+ void RemoveEntry(TNode<EphemeronHashTable> table, TNode<IntPtrT> key_index,
TNode<IntPtrT> number_of_elements);
TNode<BoolT> ShouldRehash(TNode<IntPtrT> number_of_elements,
TNode<IntPtrT> number_of_deleted);
@@ -2022,8 +2038,8 @@ class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
};
void WeakCollectionsBuiltinsAssembler::AddEntry(
- TNode<HeapObject> table, TNode<IntPtrT> key_index, TNode<Object> key,
- TNode<Object> value, TNode<IntPtrT> number_of_elements) {
+ TNode<EphemeronHashTable> table, TNode<IntPtrT> key_index,
+ TNode<Object> key, TNode<Object> value, TNode<IntPtrT> number_of_elements) {
// See EphemeronHashTable::AddEntry().
TNode<IntPtrT> value_index = ValueIndexFromKeyIndex(key_index);
StoreFixedArrayElement(table, key_index, key);
@@ -2044,8 +2060,8 @@ TNode<Object> WeakCollectionsBuiltinsAssembler::AllocateTable(
// See HashTable::NewInternal().
TNode<IntPtrT> length = KeyIndexFromEntry(capacity);
- TNode<FixedArray> table =
- AllocateFixedArray(HOLEY_ELEMENTS, length, kAllowLargeObjectAllocation);
+ TNode<FixedArray> table = CAST(
+ AllocateFixedArray(HOLEY_ELEMENTS, length, kAllowLargeObjectAllocation));
Heap::RootListIndex map_root_index = static_cast<Heap::RootListIndex>(
EphemeronHashTableShape::GetMapRootIndex());
@@ -2097,7 +2113,7 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::FindKeyIndex(
TNode<IntPtrT> key_index;
{
key_index = KeyIndexFromEntry(var_entry.value());
- TNode<Object> entry_key = LoadFixedArrayElement(table, key_index);
+ TNode<Object> entry_key = LoadFixedArrayElement(CAST(table), key_index);
key_compare(entry_key, &if_found);
@@ -2146,26 +2162,26 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::KeyIndexFromEntry(
}
TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadNumberOfElements(
- TNode<HeapObject> table, int offset) {
+ TNode<EphemeronHashTable> table, int offset) {
TNode<IntPtrT> number_of_elements = SmiUntag(CAST(LoadFixedArrayElement(
table, EphemeronHashTable::kNumberOfElementsIndex)));
return IntPtrAdd(number_of_elements, IntPtrConstant(offset));
}
TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadNumberOfDeleted(
- TNode<HeapObject> table, int offset) {
+ TNode<EphemeronHashTable> table, int offset) {
TNode<IntPtrT> number_of_deleted = SmiUntag(CAST(LoadFixedArrayElement(
table, EphemeronHashTable::kNumberOfDeletedElementsIndex)));
return IntPtrAdd(number_of_deleted, IntPtrConstant(offset));
}
-TNode<HeapObject> WeakCollectionsBuiltinsAssembler::LoadTable(
- SloppyTNode<HeapObject> collection) {
+TNode<EphemeronHashTable> WeakCollectionsBuiltinsAssembler::LoadTable(
+ TNode<JSWeakCollection> collection) {
return CAST(LoadObjectField(collection, JSWeakCollection::kTableOffset));
}
TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadTableCapacity(
- TNode<HeapObject> table) {
+ TNode<EphemeronHashTable> table) {
return SmiUntag(
CAST(LoadFixedArrayElement(table, EphemeronHashTable::kCapacityIndex)));
}
@@ -2189,7 +2205,7 @@ TNode<Word32T> WeakCollectionsBuiltinsAssembler::InsufficientCapacityToAdd(
}
void WeakCollectionsBuiltinsAssembler::RemoveEntry(
- TNode<HeapObject> table, TNode<IntPtrT> key_index,
+ TNode<EphemeronHashTable> table, TNode<IntPtrT> key_index,
TNode<IntPtrT> number_of_elements) {
// See EphemeronHashTable::RemoveEntry().
TNode<IntPtrT> value_index = ValueIndexFromKeyIndex(key_index);
@@ -2256,7 +2272,7 @@ TF_BUILTIN(WeakSetConstructor, WeakCollectionsBuiltinsAssembler) {
}
TF_BUILTIN(WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) {
- TNode<HeapObject> table = CAST(Parameter(Descriptor::kTable));
+ TNode<EphemeronHashTable> table = CAST(Parameter(Descriptor::kTable));
TNode<Object> key = CAST(Parameter(Descriptor::kKey));
Label if_not_found(this);
@@ -2283,9 +2299,9 @@ TF_BUILTIN(WeakMapGet, WeakCollectionsBuiltinsAssembler) {
ThrowIfNotInstanceType(context, receiver, JS_WEAK_MAP_TYPE,
"WeakMap.prototype.get");
- Node* const table = LoadTable(receiver);
- Node* const index =
- CallBuiltin(Builtins::kWeakMapLookupHashIndex, context, table, key);
+ TNode<EphemeronHashTable> const table = LoadTable(CAST(receiver));
+ TNode<Smi> const index =
+ CAST(CallBuiltin(Builtins::kWeakMapLookupHashIndex, context, table, key));
GotoIf(WordEqual(index, SmiConstant(-1)), &return_undefined);
@@ -2305,7 +2321,7 @@ TF_BUILTIN(WeakMapHas, WeakCollectionsBuiltinsAssembler) {
ThrowIfNotInstanceType(context, receiver, JS_WEAK_MAP_TYPE,
"WeakMap.prototype.has");
- Node* const table = LoadTable(receiver);
+ TNode<EphemeronHashTable> const table = LoadTable(CAST(receiver));
Node* const index =
CallBuiltin(Builtins::kWeakMapLookupHashIndex, context, table, key);
@@ -2321,7 +2337,7 @@ TF_BUILTIN(WeakMapHas, WeakCollectionsBuiltinsAssembler) {
// (EphemeronHashTable) of a WeakMap or WeakSet.
TF_BUILTIN(WeakCollectionDelete, WeakCollectionsBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> collection = CAST(Parameter(Descriptor::kCollection));
+ TNode<JSWeakCollection> collection = CAST(Parameter(Descriptor::kCollection));
TNode<Object> key = CAST(Parameter(Descriptor::kKey));
Label call_runtime(this), if_not_found(this);
@@ -2329,7 +2345,7 @@ TF_BUILTIN(WeakCollectionDelete, WeakCollectionsBuiltinsAssembler) {
GotoIfNotJSReceiver(key, &if_not_found);
TNode<IntPtrT> hash = LoadJSReceiverIdentityHash(key, &if_not_found);
- TNode<HeapObject> table = LoadTable(collection);
+ TNode<EphemeronHashTable> table = LoadTable(collection);
TNode<IntPtrT> capacity = LoadTableCapacity(table);
TNode<IntPtrT> key_index =
FindKeyIndexForKey(table, key, hash, EntryMask(capacity), &if_not_found);
@@ -2351,7 +2367,7 @@ TF_BUILTIN(WeakCollectionDelete, WeakCollectionsBuiltinsAssembler) {
// of a WeakMap or WeakSet.
TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<HeapObject> collection = CAST(Parameter(Descriptor::kCollection));
+ TNode<JSWeakCollection> collection = CAST(Parameter(Descriptor::kCollection));
TNode<JSReceiver> key = CAST(Parameter(Descriptor::kKey));
TNode<Object> value = CAST(Parameter(Descriptor::kValue));
@@ -2359,7 +2375,7 @@ TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) {
Label call_runtime(this), if_no_hash(this), if_not_found(this);
- TNode<HeapObject> table = LoadTable(collection);
+ TNode<EphemeronHashTable> table = LoadTable(collection);
TNode<IntPtrT> capacity = LoadTableCapacity(table);
TNode<IntPtrT> entry_mask = EntryMask(capacity);
@@ -2469,7 +2485,7 @@ TF_BUILTIN(WeakSetHas, WeakCollectionsBuiltinsAssembler) {
ThrowIfNotInstanceType(context, receiver, JS_WEAK_SET_TYPE,
"WeakSet.prototype.has");
- Node* const table = LoadTable(receiver);
+ Node* const table = LoadTable(CAST(receiver));
Node* const index =
CallBuiltin(Builtins::kWeakMapLookupHashIndex, context, table, key);
diff --git a/deps/v8/src/builtins/builtins-collections.cc b/deps/v8/src/builtins/builtins-collections.cc
index 9a642e7d3b..f9b1ebc0ac 100644
--- a/deps/v8/src/builtins/builtins-collections.cc
+++ b/deps/v8/src/builtins/builtins-collections.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/objects-inl.h"
#include "src/objects/js-collection-inl.h"
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index d87183c716..e9f252cb6a 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins-utils.h"
+#include "src/api-inl.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-
-#include "src/api.h"
#include "src/debug/interface-types.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 76c28bc869..8e54c4c369 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -233,7 +233,8 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
Node* size = GetFixedArrayAllocationSize(length, PACKED_ELEMENTS, mode);
// Create a new closure from the given function info in new space
- Node* function_context = AllocateInNewSpace(size);
+ TNode<Context> function_context =
+ UncheckedCast<Context>(AllocateInNewSpace(size));
Heap::RootListIndex context_type;
switch (scope_type) {
@@ -730,6 +731,12 @@ TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
}
}
+TF_BUILTIN(GenericConstructorLazyDeoptContinuation,
+ ConstructorBuiltinsAssembler) {
+ Node* result = Parameter(Descriptor::kResult);
+ Return(result);
+}
+
// https://tc39.github.io/ecma262/#sec-string-constructor
TF_BUILTIN(StringConstructor, ConstructorBuiltinsAssembler) {
Node* context = Parameter(Descriptor::kContext);
diff --git a/deps/v8/src/builtins/builtins-constructor.h b/deps/v8/src/builtins/builtins-constructor.h
index bb6d13e4b4..0978bf3245 100644
--- a/deps/v8/src/builtins/builtins-constructor.h
+++ b/deps/v8/src/builtins/builtins-constructor.h
@@ -8,6 +8,7 @@
#include "src/contexts.h"
#include "src/objects.h"
#include "src/objects/dictionary.h"
+#include "src/objects/js-array.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index b1441adc37..7bdc2759c4 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -142,6 +142,14 @@ TF_BUILTIN(ToNumber, CodeStubAssembler) {
Return(ToNumber(context, input));
}
+// Like ToNumber, but also converts BigInts.
+TF_BUILTIN(ToNumberConvertBigInt, CodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* input = Parameter(Descriptor::kArgument);
+
+ Return(ToNumber(context, input, BigIntHandling::kConvertToNumber));
+}
+
// ES section #sec-tostring-applied-to-the-number-type
TF_BUILTIN(NumberToString, CodeStubAssembler) {
TNode<Number> input = CAST(Parameter(Descriptor::kArgument));
@@ -368,7 +376,7 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
Goto(&if_wrapjsvalue);
BIND(&if_wrapjsvalue);
- Node* native_context = LoadNativeContext(context);
+ TNode<Context> native_context = LoadNativeContext(context);
Node* constructor = LoadFixedArrayElement(
native_context, constructor_function_index_var.value());
Node* initial_map =
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index 07aa4eb48b..72ea685982 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/conversions.h"
#include "src/counters.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
namespace v8 {
namespace internal {
@@ -24,84 +25,84 @@ BUILTIN(DataViewConstructor) {
isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
isolate->factory()->NewStringFromAsciiChecked(
"DataView")));
- } else { // [[Construct]]
- Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- Handle<Object> buffer = args.atOrUndefined(isolate, 1);
- Handle<Object> byte_offset = args.atOrUndefined(isolate, 2);
- Handle<Object> byte_length = args.atOrUndefined(isolate, 3);
-
- // 2. If Type(buffer) is not Object, throw a TypeError exception.
- // 3. If buffer does not have an [[ArrayBufferData]] internal slot, throw a
- // TypeError exception.
- if (!buffer->IsJSArrayBuffer()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kDataViewNotArrayBuffer));
- }
- Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(buffer);
+ }
+ // [[Construct]]
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ Handle<Object> buffer = args.atOrUndefined(isolate, 1);
+ Handle<Object> byte_offset = args.atOrUndefined(isolate, 2);
+ Handle<Object> byte_length = args.atOrUndefined(isolate, 3);
+
+ // 2. If Type(buffer) is not Object, throw a TypeError exception.
+ // 3. If buffer does not have an [[ArrayBufferData]] internal slot, throw a
+ // TypeError exception.
+ if (!buffer->IsJSArrayBuffer()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDataViewNotArrayBuffer));
+ }
+ Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(buffer);
- // 4. Let offset be ? ToIndex(byteOffset).
- Handle<Object> offset;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, offset,
- Object::ToIndex(isolate, byte_offset, MessageTemplate::kInvalidOffset));
+ // 4. Let offset be ? ToIndex(byteOffset).
+ Handle<Object> offset;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, offset,
+ Object::ToIndex(isolate, byte_offset, MessageTemplate::kInvalidOffset));
- // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
- // We currently violate the specification at this point. TODO: Fix that.
+ // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ // We currently violate the specification at this point. TODO: Fix that.
- // 6. Let bufferByteLength be the value of buffer's
- // [[ArrayBufferByteLength]] internal slot.
- double const buffer_byte_length = array_buffer->byte_length()->Number();
+ // 6. Let bufferByteLength be the value of buffer's
+ // [[ArrayBufferByteLength]] internal slot.
+ double const buffer_byte_length = array_buffer->byte_length()->Number();
- // 7. If offset > bufferByteLength, throw a RangeError exception.
- if (offset->Number() > buffer_byte_length) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidOffset, offset));
- }
+ // 7. If offset > bufferByteLength, throw a RangeError exception.
+ if (offset->Number() > buffer_byte_length) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidOffset, offset));
+ }
- Handle<Object> view_byte_length;
- if (byte_length->IsUndefined(isolate)) {
- // 8. If byteLength is either not present or undefined, then
- // a. Let viewByteLength be bufferByteLength - offset.
- view_byte_length =
- isolate->factory()->NewNumber(buffer_byte_length - offset->Number());
- } else {
- // 9. Else,
- // a. Let viewByteLength be ? ToIndex(byteLength).
- // b. If offset+viewByteLength > bufferByteLength, throw a
- // RangeError exception.
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, view_byte_length,
- Object::ToIndex(isolate, byte_length,
- MessageTemplate::kInvalidDataViewLength));
- if (offset->Number() + view_byte_length->Number() > buffer_byte_length) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidDataViewLength));
- }
+ Handle<Object> view_byte_length;
+ if (byte_length->IsUndefined(isolate)) {
+ // 8. If byteLength is either not present or undefined, then
+ // a. Let viewByteLength be bufferByteLength - offset.
+ view_byte_length =
+ isolate->factory()->NewNumber(buffer_byte_length - offset->Number());
+ } else {
+ // 9. Else,
+ // a. Let viewByteLength be ? ToIndex(byteLength).
+ // b. If offset+viewByteLength > bufferByteLength, throw a
+ // RangeError exception.
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, view_byte_length,
+ Object::ToIndex(isolate, byte_length,
+ MessageTemplate::kInvalidDataViewLength));
+ if (offset->Number() + view_byte_length->Number() > buffer_byte_length) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidDataViewLength));
}
+ }
- // 10. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
- // "%DataViewPrototype%", «[[DataView]], [[ViewedArrayBuffer]],
- // [[ByteLength]], [[ByteOffset]]»).
- Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::New(target, new_target));
- for (int i = 0; i < ArrayBufferView::kEmbedderFieldCount; ++i) {
- Handle<JSDataView>::cast(result)->SetEmbedderField(i, Smi::kZero);
- }
+ // 10. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
+ // "%DataViewPrototype%", «[[DataView]], [[ViewedArrayBuffer]],
+ // [[ByteLength]], [[ByteOffset]]»).
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::New(target, new_target));
+ for (int i = 0; i < ArrayBufferView::kEmbedderFieldCount; ++i) {
+ Handle<JSDataView>::cast(result)->SetEmbedderField(i, Smi::kZero);
+ }
- // 11. Set O's [[ViewedArrayBuffer]] internal slot to buffer.
- Handle<JSDataView>::cast(result)->set_buffer(*array_buffer);
+ // 11. Set O's [[ViewedArrayBuffer]] internal slot to buffer.
+ Handle<JSDataView>::cast(result)->set_buffer(*array_buffer);
- // 12. Set O's [[ByteLength]] internal slot to viewByteLength.
- Handle<JSDataView>::cast(result)->set_byte_length(*view_byte_length);
+ // 12. Set O's [[ByteLength]] internal slot to viewByteLength.
+ Handle<JSDataView>::cast(result)->set_byte_length(*view_byte_length);
- // 13. Set O's [[ByteOffset]] internal slot to offset.
- Handle<JSDataView>::cast(result)->set_byte_offset(*offset);
+ // 13. Set O's [[ByteOffset]] internal slot to offset.
+ Handle<JSDataView>::cast(result)->set_byte_offset(*offset);
- // 14. Return O.
- return *result;
- }
+ // 14. Return O.
+ return *result;
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index 0669963a09..569a5807e2 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
@@ -195,86 +195,84 @@ BUILTIN(DateConstructor) {
ToDateString(time_val, ArrayVector(buffer), isolate->date_cache());
RETURN_RESULT_OR_FAILURE(
isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
- } else {
- int const argc = args.length() - 1;
- Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- double time_val;
- if (argc == 0) {
- time_val = JSDate::CurrentTimeValue(isolate);
- } else if (argc == 1) {
- Handle<Object> value = args.at(1);
- if (value->IsJSDate()) {
- time_val = Handle<JSDate>::cast(value)->value()->Number();
+ }
+ // [Construct]
+ int const argc = args.length() - 1;
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ double time_val;
+ if (argc == 0) {
+ time_val = JSDate::CurrentTimeValue(isolate);
+ } else if (argc == 1) {
+ Handle<Object> value = args.at(1);
+ if (value->IsJSDate()) {
+ time_val = Handle<JSDate>::cast(value)->value()->Number();
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+ Object::ToPrimitive(value));
+ if (value->IsString()) {
+ time_val = ParseDateTimeString(isolate, Handle<String>::cast(value));
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
- Object::ToPrimitive(value));
- if (value->IsString()) {
- time_val = ParseDateTimeString(isolate, Handle<String>::cast(value));
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
- Object::ToNumber(isolate, value));
- time_val = value->Number();
- }
+ Object::ToNumber(isolate, value));
+ time_val = value->Number();
}
- } else {
- Handle<Object> year_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
- Object::ToNumber(isolate, args.at(1)));
- Handle<Object> month_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
- Object::ToNumber(isolate, args.at(2)));
- double year = year_object->Number();
- double month = month_object->Number();
- double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
- if (argc >= 3) {
- Handle<Object> date_object;
+ }
+ } else {
+ Handle<Object> year_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
+ Object::ToNumber(isolate, args.at(1)));
+ Handle<Object> month_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
+ Object::ToNumber(isolate, args.at(2)));
+ double year = year_object->Number();
+ double month = month_object->Number();
+ double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
+ if (argc >= 3) {
+ Handle<Object> date_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
+ Object::ToNumber(isolate, args.at(3)));
+ date = date_object->Number();
+ if (argc >= 4) {
+ Handle<Object> hours_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, date_object, Object::ToNumber(isolate, args.at(3)));
- date = date_object->Number();
- if (argc >= 4) {
- Handle<Object> hours_object;
+ isolate, hours_object, Object::ToNumber(isolate, args.at(4)));
+ hours = hours_object->Number();
+ if (argc >= 5) {
+ Handle<Object> minutes_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, hours_object, Object::ToNumber(isolate, args.at(4)));
- hours = hours_object->Number();
- if (argc >= 5) {
- Handle<Object> minutes_object;
+ isolate, minutes_object, Object::ToNumber(isolate, args.at(5)));
+ minutes = minutes_object->Number();
+ if (argc >= 6) {
+ Handle<Object> seconds_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, minutes_object, Object::ToNumber(isolate, args.at(5)));
- minutes = minutes_object->Number();
- if (argc >= 6) {
- Handle<Object> seconds_object;
+ isolate, seconds_object, Object::ToNumber(isolate, args.at(6)));
+ seconds = seconds_object->Number();
+ if (argc >= 7) {
+ Handle<Object> ms_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, seconds_object,
- Object::ToNumber(isolate, args.at(6)));
- seconds = seconds_object->Number();
- if (argc >= 7) {
- Handle<Object> ms_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, ms_object, Object::ToNumber(isolate, args.at(7)));
- ms = ms_object->Number();
- }
+ isolate, ms_object, Object::ToNumber(isolate, args.at(7)));
+ ms = ms_object->Number();
}
}
}
}
- if (!std::isnan(year)) {
- double const y = DoubleToInteger(year);
- if (0.0 <= y && y <= 99) year = 1900 + y;
- }
- double const day = MakeDay(year, month, date);
- double const time = MakeTime(hours, minutes, seconds, ms);
- time_val = MakeDate(day, time);
- if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
- time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
- time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
- } else {
- time_val = std::numeric_limits<double>::quiet_NaN();
- }
}
- RETURN_RESULT_OR_FAILURE(isolate,
- JSDate::New(target, new_target, time_val));
+ if (!std::isnan(year)) {
+ double const y = DoubleToInteger(year);
+ if (0.0 <= y && y <= 99) year = 1900 + y;
+ }
+ double const day = MakeDay(year, month, date);
+ double const time = MakeTime(hours, minutes, seconds, ms);
+ time_val = MakeDate(day, time);
+ if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
+ time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
+ time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
+ } else {
+ time_val = std::numeric_limits<double>::quiet_NaN();
+ }
}
+ RETURN_RESULT_OR_FAILURE(isolate, JSDate::New(target, new_target, time_val));
}
// ES6 section 20.3.3.1 Date.now ( )
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 2d7c780c70..62765b802f 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -5,6 +5,8 @@
#ifndef V8_BUILTINS_BUILTINS_DEFINITIONS_H_
#define V8_BUILTINS_BUILTINS_DEFINITIONS_H_
+#include "src/interpreter/bytecodes.h"
+
// include generated header
#include "torque-generated/builtin-definitions-from-dsl.h"
@@ -23,6 +25,8 @@ namespace internal {
// Args: name, interface descriptor, return_size
// TFH: Handlers in Turbofan, with CodeStub linkage.
// Args: name, interface descriptor
+// BCH: Bytecode Handlers, with bytecode dispatch linkage.
+// Args: name
// ASM: Builtin in platform-dependent assembly.
// Args: name
@@ -186,6 +190,7 @@ namespace internal {
TFC(NonNumberToNumber, TypeConversion, 1) \
TFC(NonNumberToNumeric, TypeConversion, 1) \
TFC(ToNumber, TypeConversion, 1) \
+ TFC(ToNumberConvertBigInt, TypeConversion, 1) \
TFC(ToNumeric, TypeConversion, 1) \
TFC(NumberToString, TypeConversion, 1) \
TFC(ToString, TypeConversion, 1) \
@@ -219,7 +224,7 @@ namespace internal {
TFC(RunMicrotasks, RunMicrotasks, 1) \
\
/* Object property helpers */ \
- TFS(HasProperty, kKey, kObject) \
+ TFS(HasProperty, kObject, kKey) \
TFS(DeleteProperty, kObject, kKey, kLanguageMode) \
\
/* Abort */ \
@@ -282,6 +287,8 @@ namespace internal {
CPP(ArrayConcat) \
/* ES6 #sec-array.isarray */ \
TFJ(ArrayIsArray, 1, kReceiver, kArg) \
+ /* ES6 #sec-array.prototype.fill */ \
+ CPP(ArrayPrototypeFill) \
/* ES6 #sec-array.from */ \
TFJ(ArrayFrom, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.of */ \
@@ -637,6 +644,7 @@ namespace internal {
TFH(LoadGlobalICInsideTypeof, LoadGlobalWithVector) \
TFH(LoadGlobalICTrampoline, LoadGlobal) \
TFH(LoadGlobalICInsideTypeofTrampoline, LoadGlobal) \
+ TFH(CloneObjectIC, CloneObjectWithVector) \
\
/* Map */ \
TFS(FindOrderedHashMapEntry, kTable, kKey) \
@@ -1129,7 +1137,7 @@ namespace internal {
/* ES #sec-typedarray-constructors */ \
TFS(CreateTypedArray, kTarget, kNewTarget, kArg1, kArg2, kArg3) \
TFJ(TypedArrayBaseConstructor, 0, kReceiver) \
- TFJ(TypedArrayConstructorLazyDeoptContinuation, 1, kReceiver, kResult) \
+ TFJ(GenericConstructorLazyDeoptContinuation, 1, kReceiver, kResult) \
TFJ(TypedArrayConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(TypedArrayPrototypeBuffer) \
/* ES6 #sec-get-%typedarray%.prototype.bytelength */ \
@@ -1207,6 +1215,7 @@ namespace internal {
TFC(WasmToNumber, TypeConversion, 1) \
TFS(ThrowWasmTrapUnreachable) \
TFS(ThrowWasmTrapMemOutOfBounds) \
+ TFS(ThrowWasmTrapUnalignedAccess) \
TFS(ThrowWasmTrapDivByZero) \
TFS(ThrowWasmTrapDivUnrepresentable) \
TFS(ThrowWasmTrapRemByZero) \
@@ -1308,6 +1317,7 @@ namespace internal {
ASM(CallApiGetter) \
ASM(DoubleToI) \
TFC(GetProperty, GetProperty, 1) \
+ TFS(SetProperty, kReceiver, kKey, kValue) \
ASM(MathPowInternal) \
\
/* Trace */ \
@@ -1315,10 +1325,9 @@ namespace internal {
CPP(Trace)
#ifdef V8_INTL_SUPPORT
-#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
- BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
- BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
- \
+#define BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
+ /* ecma402 #sec-intl.collator */ \
+ CPP(CollatorConstructor) \
TFS(StringToLowerCaseIntl, kString) \
/* ES #sec-string.prototype.tolowercase */ \
TFJ(StringPrototypeToLowerCaseIntl, 0, kReceiver) \
@@ -1331,6 +1340,16 @@ namespace internal {
/* ecma402 #sec-intl.datetimeformat.prototype.formattoparts */ \
CPP(DateTimeFormatPrototypeFormatToParts) \
/* ecma402 #new proposal */ \
+ /* ecma402 #sec-intl-listformat-constructor */ \
+ CPP(ListFormatConstructor) \
+ /* ecma402 #sec-intl.listformat.prototype.resolvedoptions */ \
+ CPP(ListFormatPrototypeResolvedOptions) \
+ /* ecma402 #sec-intl-list-format.prototype.format */ \
+ TFJ(ListFormatPrototypeFormat, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ecma402 #sec-intl-list-format.prototype.formattoparts */ \
+ TFJ(ListFormatPrototypeFormatToParts, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ecma402 #sec-intl-locale-constructor */ \
CPP(LocaleConstructor) \
CPP(LocalePrototypeLanguage) \
@@ -1344,31 +1363,64 @@ namespace internal {
CPP(LocalePrototypeNumeric) \
CPP(LocalePrototypeNumberingSystem) \
CPP(LocalePrototypeToString) \
+ /* ecma402 #sec-Intl.Locale.prototype.maximize */ \
+ CPP(LocalePrototypeMaximize) \
+ /* ecma402 #sec-Intl.Locale.prototype.minimize */ \
+ CPP(LocalePrototypeMinimize) \
/* ecma402 #sec-number-format-functions */ \
CPP(NumberFormatInternalFormatNumber) \
/* ecma402 #sec-intl.numberformat.prototype.format */ \
CPP(NumberFormatPrototypeFormatNumber) \
- /* ecma402 #sec-intl-relativetimeformat-constructor */ \
+ /* ecma402 #sec-datetime-format-functions */ \
+ CPP(DateTimeFormatInternalFormat) \
+ /* ecma402 #sec-intl.datetimeformat.prototype.format */ \
+ CPP(DateTimeFormatPrototypeFormat) \
+ /* ecma402 #sec-intl.pluralrules */ \
+ CPP(PluralRulesConstructor) \
+ /* ecma402 #sec-intl.RelativeTimeFormat.constructor */ \
CPP(RelativeTimeFormatConstructor) \
- /* ecma402 #sec-intl.relativetimeformat.prototype.resolvedoptions */ \
- CPP(RelativeTimeFormatPrototypeResolvedOptions)
+ /* ecma402 #sec-intl.RelativeTimeFormat.prototype.resolvedOptions */ \
+ CPP(RelativeTimeFormatPrototypeResolvedOptions) \
+ /* ecma402 #sec-intl.RelativeTimeFormat.prototype.format */ \
+ CPP(RelativeTimeFormatPrototypeFormat) \
+ /* ecma402 #sec-intl.RelativeTimeFormat.prototype.formatToParts */ \
+ CPP(RelativeTimeFormatPrototypeFormatToParts) \
+ /* ecma402 #sup-string.prototype.tolocalelowercase */ \
+ CPP(StringPrototypeToLocaleLowerCase) \
+ /* ecma402 #sup-string.prototype.tolocaleuppercase */ \
+ CPP(StringPrototypeToLocaleUpperCase) \
+ /* ecma402 #sec-intl.collator.prototype.compare */ \
+ CPP(CollatorPrototypeCompare) \
+ /* ecma 402 #sec-collator-compare-functions*/ \
+ CPP(CollatorInternalCompare) \
+ CPP(BreakIteratorInternalAdoptText) \
+ CPP(BreakIteratorPrototypeAdoptText)
#else
-#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
- BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
- BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
- \
- /* no-op fallback version */ \
- CPP(StringPrototypeNormalize) \
- /* same as toLowercase; fallback version */ \
- CPP(StringPrototypeToLocaleLowerCase) \
- /* same as toUppercase; fallback version */ \
- CPP(StringPrototypeToLocaleUpperCase) \
- /* (obsolete) Unibrow version */ \
- CPP(StringPrototypeToLowerCase) \
- /* (obsolete) Unibrow version */ \
+#define BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
+ /* no-op fallback version */ \
+ CPP(StringPrototypeNormalize) \
+ /* same as toLowercase; fallback version */ \
+ CPP(StringPrototypeToLocaleLowerCase) \
+ /* same as toUppercase; fallback version */ \
+ CPP(StringPrototypeToLocaleUpperCase) \
+ /* (obsolete) Unibrow version */ \
+ CPP(StringPrototypeToLowerCase) \
+ /* (obsolete) Unibrow version */ \
CPP(StringPrototypeToUpperCase)
#endif // V8_INTL_SUPPORT
+#ifdef V8_EMBEDDED_BYTECODE_HANDLERS
+#define BUILTIN_LIST_BYTECODE_HANDLERS(BCH) BYTECODE_LIST(BCH)
+#else
+#define BUILTIN_LIST_BYTECODE_HANDLERS(BCH)
+#endif // V8_EMBEDDED_BYTECODE_HANDLERS
+
+#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, BCH, ASM) \
+ BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
+ BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
+ BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
+ BUILTIN_LIST_BYTECODE_HANDLERS(BCH)
+
// The exception thrown in the following builtins are caught
// internally and result in a promise rejection.
#define BUILTIN_PROMISE_REJECTION_PREDICTION_LIST(V) \
@@ -1392,33 +1444,43 @@ namespace internal {
V(PromiseRace) \
V(ResolvePromise)
+// Convenience macro listing all wasm runtime stubs. Note that the first few
+// elements of the list coincide with {compiler::TrapId}, order matters.
+#define WASM_RUNTIME_STUB_LIST(V, VTRAP) \
+ FOREACH_WASM_TRAPREASON(VTRAP) \
+ V(WasmAllocateHeapNumber) \
+ V(WasmArgumentsAdaptor) \
+ V(WasmCallJavaScript) \
+ V(WasmGrowMemory) \
+ V(WasmStackGuard) \
+ V(WasmToNumber) \
+ V(DoubleToI)
+
// The exception thrown in the following builtins are caught internally and will
// not be propagated further or re-thrown
#define BUILTIN_EXCEPTION_CAUGHT_PREDICTION_LIST(V) V(PromiseRejectReactionJob)
#define IGNORE_BUILTIN(...)
-#define BUILTIN_LIST_ALL(V) BUILTIN_LIST(V, V, V, V, V, V, V)
-
#define BUILTIN_LIST_C(V) \
BUILTIN_LIST(V, V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
- IGNORE_BUILTIN, IGNORE_BUILTIN)
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
#define BUILTIN_LIST_A(V) \
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
- IGNORE_BUILTIN, IGNORE_BUILTIN, V)
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V)
#define BUILTIN_LIST_TFS(V) \
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
- V, IGNORE_BUILTIN, IGNORE_BUILTIN)
+ V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
#define BUILTIN_LIST_TFJ(V) \
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \
- IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
#define BUILTIN_LIST_TFC(V) \
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V, \
- IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-descriptors.h b/deps/v8/src/builtins/builtins-descriptors.h
index abc5d58e27..97b85bc295 100644
--- a/deps/v8/src/builtins/builtins-descriptors.h
+++ b/deps/v8/src/builtins/builtins-descriptors.h
@@ -43,7 +43,7 @@ namespace internal {
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DEFINE_TFJ_INTERFACE_DESCRIPTOR,
DEFINE_TFC_INTERFACE_DESCRIPTOR, DEFINE_TFS_INTERFACE_DESCRIPTOR,
- DEFINE_TFH_INTERFACE_DESCRIPTOR, IGNORE_BUILTIN)
+ DEFINE_TFH_INTERFACE_DESCRIPTOR, IGNORE_BUILTIN, IGNORE_BUILTIN)
#undef DEFINE_TFJ_INTERFACE_DESCRIPTOR
#undef DEFINE_TFC_INTERFACE_DESCRIPTOR
diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc
index 0043c42810..6f07618172 100644
--- a/deps/v8/src/builtins/builtins-error.cc
+++ b/deps/v8/src/builtins/builtins-error.cc
@@ -2,16 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
-#include "src/builtins/builtins-utils.h"
-
#include "src/accessors.h"
+#include "src/builtins/builtins-utils-inl.h"
+#include "src/builtins/builtins.h"
#include "src/counters.h"
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/property-descriptor.h"
-#include "src/string-builder.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc
index 4924d4c0c4..eb4ace31e4 100644
--- a/deps/v8/src/builtins/builtins-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-function-gen.cc
@@ -123,8 +123,8 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
GotoIf(Uint32LessThanOrEqual(argc, Int32Constant(1)), &empty_arguments);
TNode<IntPtrT> elements_length =
Signed(ChangeUint32ToWord(Unsigned(Int32Sub(argc, Int32Constant(1)))));
- Node* elements = AllocateFixedArray(PACKED_ELEMENTS, elements_length,
- kAllowLargeObjectAllocation);
+ TNode<FixedArray> elements = CAST(AllocateFixedArray(
+ PACKED_ELEMENTS, elements_length, kAllowLargeObjectAllocation));
VARIABLE(index, MachineType::PointerRepresentation());
index.Bind(IntPtrConstant(0));
VariableList foreach_vars({&index}, zone());
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index 663eedc29a..c97bd8a587 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/compiler.h"
@@ -11,7 +11,7 @@
#include "src/lookup.h"
#include "src/objects-inl.h"
#include "src/objects/api-callbacks.h"
-#include "src/string-builder.h"
+#include "src/string-builder-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index b7f296ac4d..04b378db3a 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -8,6 +8,7 @@
#include "src/code-stub-assembler.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/objects/js-generator.h"
namespace v8 {
namespace internal {
@@ -28,11 +29,8 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
CodeStubArguments* args, Node* receiver, Node* value, Node* context,
JSGeneratorObject::ResumeMode resume_mode, char const* const method_name) {
// Check if the {receiver} is actually a JSGeneratorObject.
- Label if_receiverisincompatible(this, Label::kDeferred);
- GotoIf(TaggedIsSmi(receiver), &if_receiverisincompatible);
- Node* receiver_instance_type = LoadInstanceType(receiver);
- GotoIfNot(InstanceTypeEqual(receiver_instance_type, JS_GENERATOR_OBJECT_TYPE),
- &if_receiverisincompatible);
+ ThrowIfNotInstanceType(context, receiver, JS_GENERATOR_OBJECT_TYPE,
+ method_name);
// Check if the {receiver} is running or already closed.
TNode<Smi> receiver_continuation =
@@ -81,13 +79,6 @@ void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
result, TrueConstant()));
}
- BIND(&if_receiverisincompatible);
- {
- // The {receiver} is not a valid JSGeneratorObject.
- ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
- StringConstant(method_name), receiver);
- }
-
BIND(&if_receiverisclosed);
{
// The {receiver} is closed already.
diff --git a/deps/v8/src/builtins/builtins-global.cc b/deps/v8/src/builtins/builtins-global.cc
index 3c71a322f9..83820de135 100644
--- a/deps/v8/src/builtins/builtins-global.cc
+++ b/deps/v8/src/builtins/builtins-global.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/compiler.h"
diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc
index 7ee50345aa..bbfabc7a0d 100644
--- a/deps/v8/src/builtins/builtins-ic-gen.cc
+++ b/deps/v8/src/builtins/builtins-ic-gen.cc
@@ -35,6 +35,7 @@ IC_BUILTIN(StoreICTrampoline)
IC_BUILTIN(KeyedStoreIC)
IC_BUILTIN(KeyedStoreICTrampoline)
IC_BUILTIN(StoreInArrayLiteralIC)
+IC_BUILTIN(CloneObjectIC)
IC_BUILTIN_PARAM(LoadGlobalIC, LoadGlobalIC, NOT_INSIDE_TYPEOF)
IC_BUILTIN_PARAM(LoadGlobalICInsideTypeof, LoadGlobalIC, INSIDE_TYPEOF)
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index c2a16e3570..7ff88c5a53 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -8,6 +8,7 @@
#include "src/code-stub-assembler.h"
#include "src/heap/heap-inl.h"
#include "src/ic/accessor-assembler.h"
+#include "src/ic/keyed-store-generic.h"
#include "src/macro-assembler.h"
#include "src/objects/debug-objects.h"
#include "src/objects/shared-function-info.h"
@@ -100,7 +101,7 @@ TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
BIND(&if_notempty);
{
// Allocate a FixedArray in new space.
- Node* result = AllocateFixedArray(kind, length);
+ TNode<FixedArray> result = CAST(AllocateFixedArray(kind, length));
// The elements might be used to back mapped arguments. In that case fill
// the mapped elements (i.e. the first {mapped_count}) with the hole, but
@@ -109,14 +110,13 @@ TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
Node* the_hole = TheHoleConstant();
// Fill the first elements up to {number_of_holes} with the hole.
- VARIABLE(var_index, MachineType::PointerRepresentation());
+ TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
Label loop1(this, &var_index), done_loop1(this);
- var_index.Bind(IntPtrConstant(0));
Goto(&loop1);
BIND(&loop1);
{
// Load the current {index}.
- Node* index = var_index.value();
+ TNode<IntPtrT> index = var_index.value();
// Check if we are done.
GotoIf(WordEqual(index, number_of_holes), &done_loop1);
@@ -125,13 +125,13 @@ TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
StoreFixedArrayElement(result, index, the_hole, SKIP_WRITE_BARRIER);
// Continue with next {index}.
- var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
+ var_index = IntPtrAdd(index, IntPtrConstant(1));
Goto(&loop1);
}
BIND(&done_loop1);
// Compute the effective {offset} into the {frame}.
- Node* offset = IntPtrAdd(length, IntPtrConstant(1));
+ TNode<IntPtrT> offset = IntPtrAdd(length, IntPtrConstant(1));
// Copy the parameters from {frame} (starting at {offset}) to {result}.
Label loop2(this, &var_index), done_loop2(this);
@@ -139,20 +139,21 @@ TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
BIND(&loop2);
{
// Load the current {index}.
- Node* index = var_index.value();
+ TNode<IntPtrT> index = var_index.value();
// Check if we are done.
GotoIf(WordEqual(index, length), &done_loop2);
// Load the parameter at the given {index}.
- Node* value = Load(MachineType::AnyTagged(), frame,
- TimesPointerSize(IntPtrSub(offset, index)));
+ TNode<Object> value =
+ CAST(Load(MachineType::AnyTagged(), frame,
+ TimesPointerSize(IntPtrSub(offset, index))));
// Store the {value} into the {result}.
StoreFixedArrayElement(result, index, value, SKIP_WRITE_BARRIER);
// Continue with next {index}.
- var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
+ var_index = IntPtrAdd(index, IntPtrConstant(1));
Goto(&loop2);
}
BIND(&done_loop2);
@@ -185,8 +186,8 @@ TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) {
// Check break-at-entry flag on the debug info.
TNode<SharedFunctionInfo> shared =
CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
- TNode<Object> maybe_heap_object_or_smi = LoadObjectField(
- shared, SharedFunctionInfo::kFunctionIdentifierOrDebugInfoOffset);
+ TNode<Object> maybe_heap_object_or_smi =
+ LoadObjectField(shared, SharedFunctionInfo::kScriptOrDebugInfoOffset);
TNode<HeapObject> maybe_debug_info =
TaggedToHeapObject(maybe_heap_object_or_smi, &tailcall_to_shared);
GotoIfNot(HasInstanceType(maybe_debug_info, InstanceType::DEBUG_INFO_TYPE),
@@ -221,7 +222,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
}
Node* IsPageFlagSet(Node* object, int mask) {
- Node* page = WordAnd(object, IntPtrConstant(~Page::kPageAlignmentMask));
+ Node* page = WordAnd(object, IntPtrConstant(~kPageAlignmentMask));
Node* flags = Load(MachineType::Pointer(), page,
IntPtrConstant(MemoryChunk::kFlagsOffset));
return WordNotEqual(WordAnd(flags, IntPtrConstant(mask)),
@@ -241,7 +242,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
}
void GetMarkBit(Node* object, Node** cell, Node** mask) {
- Node* page = WordAnd(object, IntPtrConstant(~Page::kPageAlignmentMask));
+ Node* page = WordAnd(object, IntPtrConstant(~kPageAlignmentMask));
{
// Temp variable to calculate cell offset in bitmap.
@@ -249,7 +250,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
int shift = Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 -
Bitmap::kBytesPerCellLog2;
r0 = WordShr(object, IntPtrConstant(shift));
- r0 = WordAnd(r0, IntPtrConstant((Page::kPageAlignmentMask >> shift) &
+ r0 = WordAnd(r0, IntPtrConstant((kPageAlignmentMask >> shift) &
~(Bitmap::kBytesPerCell - 1)));
*cell = IntPtrAdd(IntPtrAdd(page, r0),
IntPtrConstant(MemoryChunk::kHeaderSize));
@@ -332,7 +333,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
store_buffer_top_addr, new_store_buffer_top);
Node* test = WordAnd(new_store_buffer_top,
- IntPtrConstant(StoreBuffer::kStoreBufferMask));
+ IntPtrConstant(Heap::store_buffer_mask_constant()));
Label overflow(this);
Branch(WordEqual(test, IntPtrConstant(0)), &overflow, next);
@@ -579,7 +580,7 @@ TF_BUILTIN(ForInFilter, CodeStubAssembler) {
CSA_ASSERT(this, IsString(key));
Label if_true(this), if_false(this);
- TNode<Oddball> result = HasProperty(object, key, context, kForInHasProperty);
+ TNode<Oddball> result = HasProperty(context, object, key, kForInHasProperty);
Branch(IsTrue(result), &if_true, &if_false);
BIND(&if_true);
@@ -844,7 +845,7 @@ TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
// This is the likely case where the new queue fits into new space,
// and thus we don't need any write barriers for initializing it.
TNode<FixedArray> new_queue =
- AllocateFixedArray(PACKED_ELEMENTS, new_queue_length);
+ CAST(AllocateFixedArray(PACKED_ELEMENTS, new_queue_length));
CopyFixedArrayElements(PACKED_ELEMENTS, queue, new_queue, num_tasks,
SKIP_WRITE_BARRIER);
StoreFixedArrayElement(new_queue, num_tasks, microtask,
@@ -858,9 +859,9 @@ TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) {
BIND(&if_lospace);
{
// The fallback case where the new queue ends up in large object space.
- TNode<FixedArray> new_queue = AllocateFixedArray(
+ TNode<FixedArray> new_queue = CAST(AllocateFixedArray(
PACKED_ELEMENTS, new_queue_length, INTPTR_PARAMETERS,
- AllocationFlag::kAllowLargeObjectAllocation);
+ AllocationFlag::kAllowLargeObjectAllocation));
CopyFixedArrayElements(PACKED_ELEMENTS, queue, new_queue, num_tasks);
StoreFixedArrayElement(new_queue, num_tasks, microtask);
FillFixedArrayWithValue(PACKED_ELEMENTS, new_queue, new_num_tasks,
@@ -1258,5 +1259,16 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) {
Return(var_result.value());
}
+// ES6 [[Set]] operation.
+TF_BUILTIN(SetProperty, CodeStubAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> key = CAST(Parameter(Descriptor::kKey));
+ TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+
+ KeyedStoreGenericGenerator::SetProperty(state(), context, receiver, key,
+ value, LanguageMode::kStrict);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc
index 6d8636949e..77e2e81a6c 100644
--- a/deps/v8/src/builtins/builtins-intl-gen.cc
+++ b/deps/v8/src/builtins/builtins-intl-gen.cc
@@ -6,16 +6,30 @@
#error Internationalization is expected to be enabled.
#endif // V8_INTL_SUPPORT
+#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/code-stub-assembler.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+#include "src/objects/js-list-format-inl.h"
+#include "src/objects/js-list-format.h"
namespace v8 {
namespace internal {
+template <class T>
+using TNode = compiler::TNode<T>;
+
class IntlBuiltinsAssembler : public CodeStubAssembler {
public:
explicit IntlBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
+
+ void ListFormatCommon(TNode<Context> context, TNode<Int32T> argc,
+ Runtime::FunctionId format_func_id,
+ const char* method_name);
+
+ TNode<JSArray> AllocateEmptyJSArray(TNode<Context> context);
};
TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
@@ -129,5 +143,71 @@ TF_BUILTIN(StringPrototypeToLowerCaseIntl, IntlBuiltinsAssembler) {
Return(CallBuiltin(Builtins::kStringToLowerCaseIntl, context, string));
}
+void IntlBuiltinsAssembler::ListFormatCommon(TNode<Context> context,
+ TNode<Int32T> argc,
+ Runtime::FunctionId format_func_id,
+ const char* method_name) {
+ CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+
+ // Label has_list(this);
+ // 1. Let lf be this value.
+ // 2. If Type(lf) is not Object, throw a TypeError exception.
+ TNode<Object> receiver = args.GetReceiver();
+
+ // 3. If lf does not have an [[InitializedListFormat]] internal slot, throw a
+ // TypeError exception.
+ ThrowIfNotInstanceType(context, receiver, JS_INTL_LIST_FORMAT_TYPE,
+ method_name);
+ TNode<JSListFormat> list_format = CAST(receiver);
+
+ // 4. If list is not provided or is undefined, then
+ TNode<Object> list = args.GetOptionalArgumentValue(0);
+ Label has_list(this);
+ {
+ GotoIfNot(IsUndefined(list), &has_list);
+ if (format_func_id == Runtime::kFormatList) {
+ // a. Return an empty String.
+ args.PopAndReturn(EmptyStringConstant());
+ } else {
+ DCHECK_EQ(format_func_id, Runtime::kFormatListToParts);
+ // a. Return an empty Array.
+ args.PopAndReturn(AllocateEmptyJSArray(context));
+ }
+ }
+ BIND(&has_list);
+ {
+ // 5. Let x be ? IterableToList(list).
+ IteratorBuiltinsAssembler iterator_assembler(state());
+ // TODO(adamk): Consider exposing IterableToList as a buitin and calling
+ // it from here instead of inlining the operation.
+ TNode<JSArray> x = iterator_assembler.IterableToList(context, list);
+
+ // 6. Return ? FormatList(lf, x).
+ args.PopAndReturn(CallRuntime(format_func_id, context, list_format, x));
+ }
+}
+
+TNode<JSArray> IntlBuiltinsAssembler::AllocateEmptyJSArray(
+ TNode<Context> context) {
+ return CAST(CodeStubAssembler::AllocateJSArray(
+ PACKED_ELEMENTS,
+ LoadJSArrayElementsMap(PACKED_ELEMENTS, LoadNativeContext(context)),
+ SmiConstant(0), SmiConstant(0)));
+}
+
+TF_BUILTIN(ListFormatPrototypeFormat, IntlBuiltinsAssembler) {
+ ListFormatCommon(
+ CAST(Parameter(Descriptor::kContext)),
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)),
+ Runtime::kFormatList, "Intl.ListFormat.prototype.format");
+}
+
+TF_BUILTIN(ListFormatPrototypeFormatToParts, IntlBuiltinsAssembler) {
+ ListFormatCommon(
+ CAST(Parameter(Descriptor::kContext)),
+ UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount)),
+ Runtime::kFormatListToParts, "Intl.ListFormat.prototype.formatToParts");
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index e6664950d0..1d54d0da80 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -6,26 +6,38 @@
#error Internationalization is expected to be enabled.
#endif // V8_INTL_SUPPORT
+#include <cmath>
+#include <list>
+#include <memory>
+
#include "src/builtins/builtins-intl.h"
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/date.h"
+#include "src/elements.h"
#include "src/intl.h"
#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
+#include "src/objects/js-array-inl.h"
+#include "src/objects/js-collator-inl.h"
+#include "src/objects/js-list-format-inl.h"
#include "src/objects/js-locale-inl.h"
+#include "src/objects/js-plural-rules-inl.h"
#include "src/objects/js-relative-time-format-inl.h"
#include "unicode/datefmt.h"
#include "unicode/decimfmt.h"
#include "unicode/fieldpos.h"
#include "unicode/fpositer.h"
+#include "unicode/listformatter.h"
#include "unicode/normalizer2.h"
#include "unicode/numfmt.h"
+#include "unicode/reldatefmt.h"
#include "unicode/smpdtfmt.h"
#include "unicode/udat.h"
#include "unicode/ufieldpositer.h"
#include "unicode/unistr.h"
+#include "unicode/ureldatefmt.h"
#include "unicode/ustring.h"
namespace v8 {
@@ -35,7 +47,7 @@ BUILTIN(StringPrototypeToUpperCaseIntl) {
HandleScope scope(isolate);
TO_THIS_STRING(string, "String.prototype.toUpperCase");
string = String::Flatten(isolate, string);
- return ConvertCase(string, true, isolate);
+ RETURN_RESULT_OR_FAILURE(isolate, ConvertCase(string, true, isolate));
}
BUILTIN(StringPrototypeNormalizeIntl) {
@@ -106,7 +118,8 @@ BUILTIN(StringPrototypeNormalizeIntl) {
}
if (U_FAILURE(status)) {
- return ReadOnlyRoots(isolate).undefined_value();
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+ NewTypeError(MessageTemplate::kIcuError));
}
RETURN_RESULT_OR_FAILURE(
@@ -212,30 +225,6 @@ Handle<String> IcuDateFieldIdToDateType(int32_t field_id, Isolate* isolate) {
}
}
-bool AddElement(Handle<JSArray> array, int index,
- Handle<String> field_type_string,
- const icu::UnicodeString& formatted, int32_t begin, int32_t end,
- Isolate* isolate) {
- HandleScope scope(isolate);
- Factory* factory = isolate->factory();
- Handle<JSObject> element = factory->NewJSObject(isolate->object_function());
- Handle<String> value;
- JSObject::AddProperty(isolate, element, factory->type_string(),
- field_type_string, NONE);
-
- icu::UnicodeString field(formatted.tempSubStringBetween(begin, end));
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value,
- factory->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(field.getBuffer()),
- field.length())),
- false);
-
- JSObject::AddProperty(isolate, element, factory->value_string(), value, NONE);
- JSObject::AddDataElement(array, index, element, NONE);
- return true;
-}
-
bool cmp_NumberFormatSpan(const NumberFormatSpan& a,
const NumberFormatSpan& b) {
// Regions that start earlier should be encountered earlier.
@@ -251,19 +240,21 @@ bool cmp_NumberFormatSpan(const NumberFormatSpan& a,
return a.field_id < b.field_id;
}
-Object* FormatNumberToParts(Isolate* isolate, icu::NumberFormat* fmt,
- double number) {
+MaybeHandle<Object> FormatNumberToParts(Isolate* isolate,
+ icu::NumberFormat* fmt, double number) {
Factory* factory = isolate->factory();
icu::UnicodeString formatted;
icu::FieldPositionIterator fp_iter;
UErrorCode status = U_ZERO_ERROR;
fmt->format(number, formatted, &fp_iter, status);
- if (U_FAILURE(status)) return ReadOnlyRoots(isolate).undefined_value();
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), Object);
+ }
Handle<JSArray> result = factory->NewJSArray(0);
int32_t length = formatted.length();
- if (length == 0) return *result;
+ if (length == 0) return result;
std::vector<NumberFormatSpan> regions;
// Add a "literal" backdrop for the entire string. This will be used if no
@@ -289,19 +280,21 @@ Object* FormatNumberToParts(Isolate* isolate, icu::NumberFormat* fmt,
part.field_id == -1
? isolate->factory()->literal_string()
: IcuNumberFieldIdToNumberType(part.field_id, number, isolate);
- if (!AddElement(result, index, field_type_string, formatted, part.begin_pos,
- part.end_pos, isolate)) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
+ Handle<String> substring;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, substring,
+ Intl::ToString(isolate, formatted, part.begin_pos, part.end_pos),
+ Object);
+ Intl::AddElement(isolate, result, index, field_type_string, substring);
++index;
}
JSObject::ValidateElements(*result);
- return *result;
+ return result;
}
-Object* FormatDateToParts(Isolate* isolate, icu::DateFormat* format,
- double date_value) {
+MaybeHandle<Object> FormatDateToParts(Isolate* isolate, icu::DateFormat* format,
+ double date_value) {
Factory* factory = isolate->factory();
icu::UnicodeString formatted;
@@ -309,41 +302,48 @@ Object* FormatDateToParts(Isolate* isolate, icu::DateFormat* format,
icu::FieldPosition fp;
UErrorCode status = U_ZERO_ERROR;
format->format(date_value, formatted, &fp_iter, status);
- if (U_FAILURE(status)) return ReadOnlyRoots(isolate).undefined_value();
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), Object);
+ }
Handle<JSArray> result = factory->NewJSArray(0);
int32_t length = formatted.length();
- if (length == 0) return *result;
+ if (length == 0) return result;
int index = 0;
int32_t previous_end_pos = 0;
+ Handle<String> substring;
while (fp_iter.next(fp)) {
int32_t begin_pos = fp.getBeginIndex();
int32_t end_pos = fp.getEndIndex();
if (previous_end_pos < begin_pos) {
- if (!AddElement(result, index, IcuDateFieldIdToDateType(-1, isolate),
- formatted, previous_end_pos, begin_pos, isolate)) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, substring,
+ Intl::ToString(isolate, formatted, previous_end_pos, begin_pos),
+ Object);
+ Intl::AddElement(isolate, result, index,
+ IcuDateFieldIdToDateType(-1, isolate), substring);
++index;
}
- if (!AddElement(result, index,
- IcuDateFieldIdToDateType(fp.getField(), isolate), formatted,
- begin_pos, end_pos, isolate)) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, substring,
+ Intl::ToString(isolate, formatted, begin_pos, end_pos), Object);
+ Intl::AddElement(isolate, result, index,
+ IcuDateFieldIdToDateType(fp.getField(), isolate),
+ substring);
previous_end_pos = end_pos;
++index;
}
if (previous_end_pos < length) {
- if (!AddElement(result, index, IcuDateFieldIdToDateType(-1, isolate),
- formatted, previous_end_pos, length, isolate)) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, substring,
+ Intl::ToString(isolate, formatted, previous_end_pos, length), Object);
+ Intl::AddElement(isolate, result, index,
+ IcuDateFieldIdToDateType(-1, isolate), substring);
}
JSObject::ValidateElements(*result);
- return *result;
+ return result;
}
} // namespace
@@ -461,11 +461,11 @@ BUILTIN(NumberFormatPrototypeFormatToParts) {
}
icu::DecimalFormat* number_format =
- NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
+ NumberFormat::UnpackNumberFormat(number_format_holder);
CHECK_NOT_NULL(number_format);
- Object* result = FormatNumberToParts(isolate, number_format, x->Number());
- return result;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, FormatNumberToParts(isolate, number_format, x->Number()));
}
BUILTIN(DateTimeFormatPrototypeFormatToParts) {
@@ -497,10 +497,11 @@ BUILTIN(DateTimeFormatPrototypeFormatToParts) {
}
icu::SimpleDateFormat* date_format =
- DateFormat::UnpackDateFormat(isolate, date_format_holder);
+ DateFormat::UnpackDateFormat(date_format_holder);
CHECK_NOT_NULL(date_format);
- return FormatDateToParts(isolate, date_format, date_value);
+ RETURN_RESULT_OR_FAILURE(isolate,
+ FormatDateToParts(isolate, date_format, date_value));
}
BUILTIN(NumberFormatPrototypeFormatNumber) {
@@ -531,8 +532,8 @@ BUILTIN(NumberFormatPrototypeFormatNumber) {
return *bound_format;
}
- Handle<Context> native_context =
- Handle<Context>(isolate->context()->native_context(), isolate);
+ Handle<NativeContext> native_context(isolate->context()->native_context(),
+ isolate);
Handle<Context> context = isolate->factory()->NewBuiltinContext(
native_context, NumberFormat::ContextSlot::kLength);
@@ -593,93 +594,402 @@ BUILTIN(NumberFormatInternalFormatNumber) {
isolate, number_format_holder, number));
}
-// Intl.Locale implementation
-BUILTIN(LocaleConstructor) {
+BUILTIN(DateTimeFormatPrototypeFormat) {
+ const char* const method = "get Intl.DateTimeFormat.prototype.format";
HandleScope scope(isolate);
- if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
- isolate->factory()->NewStringFromAsciiChecked(
- "Intl.Locale")));
- } else { // [[Construct]]
- Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::New(target, new_target));
+ // 1. Let dtf be this value.
+ // 2. If Type(dtf) is not Object, throw a TypeError exception.
+ CHECK_RECEIVER(JSReceiver, receiver, method);
- Handle<Object> tag = args.atOrUndefined(isolate, 1);
- Handle<Object> options = args.atOrUndefined(isolate, 2);
+ // 3. Let dtf be ? UnwrapDateTimeFormat(dtf).
+ Handle<JSObject> date_format_holder;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, date_format_holder,
+ DateFormat::Unwrap(isolate, receiver, method));
+ DCHECK(Intl::IsObjectOfType(isolate, date_format_holder,
+ Intl::Type::kDateTimeFormat));
- // First parameter is a locale, as a string/object. Can't be empty.
- if (!tag->IsName() && !tag->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kLocaleNotEmpty));
- }
+ Handle<Object> bound_format = Handle<Object>(
+ date_format_holder->GetEmbedderField(DateFormat::kBoundFormatIndex),
+ isolate);
- Handle<String> locale_string;
- if (tag->IsJSLocale() &&
- Handle<JSLocale>::cast(tag)->locale()->IsString()) {
- locale_string =
- Handle<String>(Handle<JSLocale>::cast(tag)->locale(), isolate);
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, locale_string,
- Object::ToString(isolate, tag));
- }
+ // 4. If dtf.[[BoundFormat]] is undefined, then
+ if (!bound_format->IsUndefined(isolate)) {
+ DCHECK(bound_format->IsJSFunction());
+ // 5. Return dtf.[[BoundFormat]].
+ return *bound_format;
+ }
- Handle<JSReceiver> options_object;
- if (options->IsNullOrUndefined(isolate)) {
- // Make empty options bag.
- options_object = isolate->factory()->NewJSObjectWithNullProto();
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, options_object,
- Object::ToObject(isolate, options));
- }
+ Handle<NativeContext> native_context(isolate->context()->native_context(),
+ isolate);
+ Handle<Context> context = isolate->factory()->NewBuiltinContext(
+ native_context, DateFormat::ContextSlot::kLength);
- RETURN_RESULT_OR_FAILURE(
- isolate,
- JSLocale::InitializeLocale(isolate, Handle<JSLocale>::cast(result),
- locale_string, options_object));
- }
+ // 4.b. Set F.[[DateTimeFormat]] to dtf.
+ context->set(DateFormat::ContextSlot::kDateFormat, *date_format_holder);
+
+ Handle<SharedFunctionInfo> info = Handle<SharedFunctionInfo>(
+ native_context->date_format_internal_format_shared_fun(), isolate);
+ Handle<Map> map = isolate->strict_function_without_prototype_map();
+
+ // 4.a. Let F be a new built-in function object as defined in DateTime Format
+ // Functions (12.1.5).
+ Handle<JSFunction> new_bound_format_function =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(map, info, context);
+
+ // 4.c. Set dtf.[[BoundFormat]] to F.
+ date_format_holder->SetEmbedderField(DateFormat::kBoundFormatIndex,
+ *new_bound_format_function);
+
+ // 5. Return dtf.[[BoundFormat]].
+ return *new_bound_format_function;
}
-BUILTIN(RelativeTimeFormatConstructor) {
+BUILTIN(DateTimeFormatInternalFormat) {
+ HandleScope scope(isolate);
+ Handle<Context> context = Handle<Context>(isolate->context(), isolate);
+
+ // 1. Let dtf be F.[[DateTimeFormat]].
+ Handle<JSObject> date_format_holder = Handle<JSObject>(
+ JSObject::cast(context->get(DateFormat::ContextSlot::kDateFormat)),
+ isolate);
+
+ // 2. Assert: Type(dtf) is Object and dtf has an [[InitializedDateTimeFormat]]
+ // internal slot.
+ DCHECK(Intl::IsObjectOfType(isolate, date_format_holder,
+ Intl::Type::kDateTimeFormat));
+
+ Handle<Object> date = args.atOrUndefined(isolate, 1);
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, DateFormat::DateTimeFormat(isolate, date_format_holder, date));
+}
+
+BUILTIN(ListFormatConstructor) {
HandleScope scope(isolate);
// 1. If NewTarget is undefined, throw a TypeError exception.
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
isolate->factory()->NewStringFromStaticChars(
- "Intl.RelativeTimeFormat")));
+ "Intl.ListFormat")));
}
// [[Construct]]
Handle<JSFunction> target = args.target();
Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
Handle<JSObject> result;
- // 2. Let relativeTimeFormat be
- // ! OrdinaryCreateFromConstructor(NewTarget,
- // "%RelativeTimeFormatPrototype%").
+ // 2. Let listFormat be OrdinaryCreateFromConstructor(NewTarget,
+ // "%ListFormatPrototype%").
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
JSObject::New(target, new_target));
+ Handle<JSListFormat> format = Handle<JSListFormat>::cast(result);
+ format->set_flags(0);
Handle<Object> locales = args.atOrUndefined(isolate, 1);
Handle<Object> options = args.atOrUndefined(isolate, 2);
- // 3. Return ? InitializeRelativeTimeFormat(relativeTimeFormat, locales,
- // options).
+ // 3. Return InitializeListFormat(listFormat, locales, options).
+ RETURN_RESULT_OR_FAILURE(isolate, JSListFormat::InitializeListFormat(
+ isolate, format, locales, options));
+}
+
+BUILTIN(ListFormatPrototypeResolvedOptions) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSListFormat, format_holder,
+ "Intl.ListFormat.prototype.resolvedOptions");
+ return *JSListFormat::ResolvedOptions(isolate, format_holder);
+}
+
+namespace {
+
+MaybeHandle<JSLocale> CreateLocale(Isolate* isolate,
+ Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target,
+ Handle<Object> tag, Handle<Object> options) {
+ Handle<JSObject> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ JSObject::New(constructor, new_target), JSLocale);
+
+ // First parameter is a locale, as a string/object. Can't be empty.
+ if (!tag->IsString() && !tag->IsJSReceiver()) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kLocaleNotEmpty),
+ JSLocale);
+ }
+
+ Handle<String> locale_string;
+ if (tag->IsJSLocale() && Handle<JSLocale>::cast(tag)->locale()->IsString()) {
+ locale_string =
+ Handle<String>(Handle<JSLocale>::cast(tag)->locale(), isolate);
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, locale_string,
+ Object::ToString(isolate, tag), JSLocale);
+ }
+
+ Handle<JSReceiver> options_object;
+ if (options->IsNullOrUndefined(isolate)) {
+ // Make empty options bag.
+ options_object = isolate->factory()->NewJSObjectWithNullProto();
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options_object,
+ Object::ToObject(isolate, options), JSLocale);
+ }
+
+ return JSLocale::InitializeLocale(isolate, Handle<JSLocale>::cast(result),
+ locale_string, options_object);
+}
+
+} // namespace
+
+// Intl.Locale implementation
+BUILTIN(LocaleConstructor) {
+ HandleScope scope(isolate);
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Intl.Locale")));
+ }
+ // [[Construct]]
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+
+ Handle<Object> tag = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
+
RETURN_RESULT_OR_FAILURE(
- isolate, JSRelativeTimeFormat::InitializeRelativeTimeFormat(
- isolate, Handle<JSRelativeTimeFormat>::cast(result), locales,
- options));
+ isolate, CreateLocale(isolate, target, new_target, tag, options));
}
-BUILTIN(RelativeTimeFormatPrototypeResolvedOptions) {
+BUILTIN(LocalePrototypeMaximize) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSLocale, locale_holder, "Intl.Locale.prototype.maximize");
+ Handle<JSFunction> constructor(
+ isolate->native_context()->intl_locale_function(), isolate);
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ CreateLocale(isolate, constructor, constructor,
+ JSLocale::Maximize(isolate, locale_holder->locale()),
+ isolate->factory()->NewJSObjectWithNullProto()));
+}
+
+BUILTIN(LocalePrototypeMinimize) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSLocale, locale_holder, "Intl.Locale.prototype.minimize");
+ Handle<JSFunction> constructor(
+ isolate->native_context()->intl_locale_function(), isolate);
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ CreateLocale(isolate, constructor, constructor,
+ JSLocale::Minimize(isolate, locale_holder->locale()),
+ isolate->factory()->NewJSObjectWithNullProto()));
+}
+
+namespace {
+
+MaybeHandle<JSArray> GenerateRelativeTimeFormatParts(
+ Isolate* isolate, icu::UnicodeString formatted,
+ icu::UnicodeString integer_part, Handle<String> unit) {
+ Factory* factory = isolate->factory();
+ Handle<JSArray> array = factory->NewJSArray(0);
+ int32_t found = formatted.indexOf(integer_part);
+
+ Handle<String> substring;
+ if (found < 0) {
+ // Cannot find the integer_part in the formatted.
+ // Return [{'type': 'literal', 'value': formatted}]
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, substring,
+ Intl::ToString(isolate, formatted), JSArray);
+ Intl::AddElement(isolate, array,
+ 0, // index
+ factory->literal_string(), // field_type_string
+ substring);
+ } else {
+ // Found the formatted integer in the result.
+ int index = 0;
+
+ // array.push({
+ // 'type': 'literal',
+ // 'value': formatted.substring(0, found)})
+ if (found > 0) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, substring,
+ Intl::ToString(isolate, formatted, 0, found),
+ JSArray);
+ Intl::AddElement(isolate, array, index++,
+ factory->literal_string(), // field_type_string
+ substring);
+ }
+
+ // array.push({
+ // 'type': 'integer',
+ // 'value': formatted.substring(found, found + integer_part.length),
+ // 'unit': unit})
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, substring,
+ Intl::ToString(isolate, formatted, found,
+ found + integer_part.length()),
+ JSArray);
+ Intl::AddElement(isolate, array, index++,
+ factory->integer_string(), // field_type_string
+ substring, factory->unit_string(), unit);
+
+ // array.push({
+ // 'type': 'literal',
+ // 'value': formatted.substring(
+ // found + integer_part.length, formatted.length)})
+ if (found + integer_part.length() < formatted.length()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, substring,
+ Intl::ToString(isolate, formatted, found + integer_part.length(),
+ formatted.length()),
+ JSArray);
+ Intl::AddElement(isolate, array, index,
+ factory->literal_string(), // field_type_string
+ substring);
+ }
+ }
+ return array;
+}
+
+bool GetURelativeDateTimeUnit(Handle<String> unit,
+ URelativeDateTimeUnit* unit_enum) {
+ std::unique_ptr<char[]> unit_str = unit->ToCString();
+ if ((strcmp("second", unit_str.get()) == 0) ||
+ (strcmp("seconds", unit_str.get()) == 0)) {
+ *unit_enum = UDAT_REL_UNIT_SECOND;
+ } else if ((strcmp("minute", unit_str.get()) == 0) ||
+ (strcmp("minutes", unit_str.get()) == 0)) {
+ *unit_enum = UDAT_REL_UNIT_MINUTE;
+ } else if ((strcmp("hour", unit_str.get()) == 0) ||
+ (strcmp("hours", unit_str.get()) == 0)) {
+ *unit_enum = UDAT_REL_UNIT_HOUR;
+ } else if ((strcmp("day", unit_str.get()) == 0) ||
+ (strcmp("days", unit_str.get()) == 0)) {
+ *unit_enum = UDAT_REL_UNIT_DAY;
+ } else if ((strcmp("week", unit_str.get()) == 0) ||
+ (strcmp("weeks", unit_str.get()) == 0)) {
+ *unit_enum = UDAT_REL_UNIT_WEEK;
+ } else if ((strcmp("month", unit_str.get()) == 0) ||
+ (strcmp("months", unit_str.get()) == 0)) {
+ *unit_enum = UDAT_REL_UNIT_MONTH;
+ } else if ((strcmp("quarter", unit_str.get()) == 0) ||
+ (strcmp("quarters", unit_str.get()) == 0)) {
+ *unit_enum = UDAT_REL_UNIT_QUARTER;
+ } else if ((strcmp("year", unit_str.get()) == 0) ||
+ (strcmp("years", unit_str.get()) == 0)) {
+ *unit_enum = UDAT_REL_UNIT_YEAR;
+ } else {
+ return false;
+ }
+ return true;
+}
+
+MaybeHandle<Object> RelativeTimeFormatPrototypeFormatCommon(
+ BuiltinArguments args, Isolate* isolate,
+ Handle<JSRelativeTimeFormat> format_holder, const char* func_name,
+ bool to_parts) {
+ Factory* factory = isolate->factory();
+ Handle<Object> value_obj = args.atOrUndefined(isolate, 1);
+ Handle<Object> unit_obj = args.atOrUndefined(isolate, 2);
+
+ // 3. Let value be ? ToNumber(value).
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
+ Object::ToNumber(isolate, value_obj), Object);
+ double number = value->Number();
+ // 4. Let unit be ? ToString(unit).
+ Handle<String> unit;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, unit, Object::ToString(isolate, unit_obj),
+ Object);
+
+ // 4. If isFinite(value) is false, then throw a RangeError exception.
+ if (!std::isfinite(number)) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kNotFiniteNumber,
+ isolate->factory()->NewStringFromAsciiChecked(func_name)),
+ Object);
+ }
+
+ icu::RelativeDateTimeFormatter* formatter =
+ JSRelativeTimeFormat::UnpackFormatter(format_holder);
+ CHECK_NOT_NULL(formatter);
+
+ URelativeDateTimeUnit unit_enum;
+ if (!GetURelativeDateTimeUnit(unit, &unit_enum)) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidUnit,
+ isolate->factory()->NewStringFromAsciiChecked(func_name),
+ unit),
+ Object);
+ }
+
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString formatted;
+ if (unit_enum == UDAT_REL_UNIT_QUARTER) {
+ // ICU have not yet implement UDAT_REL_UNIT_QUARTER.
+ } else {
+ if (format_holder->numeric() == JSRelativeTimeFormat::Numeric::ALWAYS) {
+ formatter->formatNumeric(number, unit_enum, formatted, status);
+ } else {
+ DCHECK_EQ(JSRelativeTimeFormat::Numeric::AUTO, format_holder->numeric());
+ formatter->format(number, unit_enum, formatted, status);
+ }
+ }
+
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), Object);
+ }
+
+ if (to_parts) {
+ icu::UnicodeString integer;
+ icu::FieldPosition pos;
+ formatter->getNumberFormat().format(std::abs(number), integer, pos, status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError),
+ Object);
+ }
+
+ Handle<JSArray> elements;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, elements,
+ GenerateRelativeTimeFormatParts(isolate, formatted, integer, unit),
+ Object);
+ return elements;
+ }
+
+ return factory->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(formatted.getBuffer()),
+ formatted.length()));
+}
+
+} // namespace
+
+BUILTIN(RelativeTimeFormatPrototypeFormat) {
HandleScope scope(isolate);
+ // 1. Let relativeTimeFormat be the this value.
+ // 2. If Type(relativeTimeFormat) is not Object or relativeTimeFormat does not
+ // have an [[InitializedRelativeTimeFormat]] internal slot whose value is
+ // true, throw a TypeError exception.
CHECK_RECEIVER(JSRelativeTimeFormat, format_holder,
- "Intl.RelativeTimeFormat.prototype.resolvedOptions");
- return *JSRelativeTimeFormat::ResolvedOptions(isolate, format_holder);
+ "Intl.RelativeTimeFormat.prototype.format");
+ RETURN_RESULT_OR_FAILURE(isolate,
+ RelativeTimeFormatPrototypeFormatCommon(
+ args, isolate, format_holder, "format", false));
+}
+
+BUILTIN(RelativeTimeFormatPrototypeFormatToParts) {
+ HandleScope scope(isolate);
+ // 1. Let relativeTimeFormat be the this value.
+ // 2. If Type(relativeTimeFormat) is not Object or relativeTimeFormat does not
+ // have an [[InitializedRelativeTimeFormat]] internal slot whose value is
+ // true, throw a TypeError exception.
+ CHECK_RECEIVER(JSRelativeTimeFormat, format_holder,
+ "Intl.RelativeTimeFormat.prototype.formatToParts");
+ RETURN_RESULT_OR_FAILURE(
+ isolate, RelativeTimeFormatPrototypeFormatCommon(
+ args, isolate, format_holder, "formatToParts", true));
}
// Locale getters.
@@ -762,5 +1072,262 @@ BUILTIN(LocalePrototypeToString) {
return locale_holder->locale();
}
+BUILTIN(RelativeTimeFormatConstructor) {
+ HandleScope scope(isolate);
+ // 1. If NewTarget is undefined, throw a TypeError exception.
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromStaticChars(
+ "Intl.RelativeTimeFormat")));
+ }
+ // [[Construct]]
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+
+ Handle<JSObject> result;
+ // 2. Let relativeTimeFormat be
+ // ! OrdinaryCreateFromConstructor(NewTarget,
+ // "%RelativeTimeFormatPrototype%").
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::New(target, new_target));
+ Handle<JSRelativeTimeFormat> format =
+ Handle<JSRelativeTimeFormat>::cast(result);
+ format->set_flags(0);
+
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
+
+ // 3. Return ? InitializeRelativeTimeFormat(relativeTimeFormat, locales,
+ // options).
+ RETURN_RESULT_OR_FAILURE(isolate,
+ JSRelativeTimeFormat::InitializeRelativeTimeFormat(
+ isolate, format, locales, options));
+}
+
+BUILTIN(RelativeTimeFormatPrototypeResolvedOptions) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSRelativeTimeFormat, format_holder,
+ "Intl.RelativeTimeFormat.prototype.resolvedOptions");
+ return *JSRelativeTimeFormat::ResolvedOptions(isolate, format_holder);
+}
+
+BUILTIN(StringPrototypeToLocaleLowerCase) {
+ HandleScope scope(isolate);
+ TO_THIS_STRING(string, "String.prototype.toLocaleLowerCase");
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Intl::StringLocaleConvertCase(isolate, string, false,
+ args.atOrUndefined(isolate, 1)));
+}
+
+BUILTIN(StringPrototypeToLocaleUpperCase) {
+ HandleScope scope(isolate);
+ TO_THIS_STRING(string, "String.prototype.toLocaleUpperCase");
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Intl::StringLocaleConvertCase(isolate, string, true,
+ args.atOrUndefined(isolate, 1)));
+}
+
+BUILTIN(PluralRulesConstructor) {
+ HandleScope scope(isolate);
+
+ // 1. If NewTarget is undefined, throw a TypeError exception.
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromStaticChars(
+ "Intl.PluralRules")));
+ }
+
+ // [[Construct]]
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
+
+ // 2. Let pluralRules be ? OrdinaryCreateFromConstructor(newTarget,
+ // "%PluralRulesPrototype%", « [[InitializedPluralRules]],
+ // [[Locale]], [[Type]], [[MinimumIntegerDigits]],
+ // [[MinimumFractionDigits]], [[MaximumFractionDigits]],
+ // [[MinimumSignificantDigits]], [[MaximumSignificantDigits]] »).
+ Handle<JSObject> plural_rules_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, plural_rules_obj,
+ JSObject::New(target, new_target));
+ Handle<JSPluralRules> plural_rules =
+ Handle<JSPluralRules>::cast(plural_rules_obj);
+
+ // 3. Return ? InitializePluralRules(pluralRules, locales, options).
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSPluralRules::InitializePluralRules(isolate, plural_rules,
+ locales, options));
+}
+
+BUILTIN(CollatorConstructor) {
+ HandleScope scope(isolate);
+ Handle<JSReceiver> new_target;
+ // 1. If NewTarget is undefined, let newTarget be the active
+ // function object, else let newTarget be NewTarget.
+ if (args.new_target()->IsUndefined(isolate)) {
+ new_target = args.target();
+ } else {
+ new_target = Handle<JSReceiver>::cast(args.new_target());
+ }
+
+ // [[Construct]]
+ Handle<JSFunction> target = args.target();
+
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+ Handle<Object> options = args.atOrUndefined(isolate, 2);
+
+ // 5. Let collator be ? OrdinaryCreateFromConstructor(newTarget,
+ // "%CollatorPrototype%", internalSlotsList).
+ Handle<JSObject> collator_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, collator_obj,
+ JSObject::New(target, new_target));
+ Handle<JSCollator> collator = Handle<JSCollator>::cast(collator_obj);
+ collator->set_flags(0);
+
+ // 6. Return ? InitializeCollator(collator, locales, options).
+ RETURN_RESULT_OR_FAILURE(isolate, JSCollator::InitializeCollator(
+ isolate, collator, locales, options));
+}
+
+BUILTIN(CollatorPrototypeCompare) {
+ const char* const method = "get Intl.Collator.prototype.compare";
+ HandleScope scope(isolate);
+
+ // 1. Let collator be this value.
+ // 2. If Type(collator) is not Object, throw a TypeError exception.
+ // 3. If collator does not have an [[InitializedCollator]] internal slot,
+ // throw a TypeError exception.
+ CHECK_RECEIVER(JSCollator, collator, method);
+
+ // 4. If collator.[[BoundCompare]] is undefined, then
+ Handle<Object> bound_compare(collator->bound_compare(), isolate);
+ if (!bound_compare->IsUndefined(isolate)) {
+ DCHECK(bound_compare->IsJSFunction());
+ // 5. Return collator.[[BoundCompare]].
+ return *bound_compare;
+ }
+
+ Handle<NativeContext> native_context(isolate->context()->native_context(),
+ isolate);
+ Handle<Context> context = isolate->factory()->NewBuiltinContext(
+ native_context, JSCollator::ContextSlot::kLength);
+
+ // 4.b. Set F.[[Collator]] to collator.
+ context->set(JSCollator::ContextSlot::kCollator, *collator);
+
+ Handle<SharedFunctionInfo> info = Handle<SharedFunctionInfo>(
+ native_context->collator_internal_compare_shared_fun(), isolate);
+ Handle<Map> map = isolate->strict_function_without_prototype_map();
+
+ // 4.a. Let F be a new built-in function object as defined in 10.3.3.1.
+ Handle<JSFunction> new_bound_compare_function =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(map, info, context);
+
+ // 4.c. Set collator.[[BoundCompare]] to F.
+ collator->set_bound_compare(*new_bound_compare_function);
+
+ // 5. Return collator.[[BoundCompare]].
+ return *new_bound_compare_function;
+}
+
+BUILTIN(CollatorInternalCompare) {
+ HandleScope scope(isolate);
+ Handle<Context> context = Handle<Context>(isolate->context(), isolate);
+
+ // 1. Let collator be F.[[Collator]].
+ // 2. Assert: Type(collator) is Object and collator has an
+ // [[InitializedCollator]] internal slot.
+ Handle<JSCollator> collator_holder = Handle<JSCollator>(
+ JSCollator::cast(context->get(JSCollator::ContextSlot::kCollator)),
+ isolate);
+
+ // 3. If x is not provided, let x be undefined.
+ Handle<Object> x = args.atOrUndefined(isolate, 1);
+ // 4. If y is not provided, let y be undefined.
+ Handle<Object> y = args.atOrUndefined(isolate, 2);
+
+ // 5. Let X be ? ToString(x).
+ Handle<String> string_x;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, string_x,
+ Object::ToString(isolate, x));
+ // 6. Let Y be ? ToString(y).
+ Handle<String> string_y;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, string_y,
+ Object::ToString(isolate, y));
+
+ // 7. Return CompareStrings(collator, X, Y).
+ return *Intl::CompareStrings(isolate, collator_holder, string_x, string_y);
+}
+
+BUILTIN(BreakIteratorPrototypeAdoptText) {
+ const char* const method = "get Intl.v8BreakIterator.prototype.adoptText";
+ HandleScope scope(isolate);
+
+ CHECK_RECEIVER(JSObject, break_iterator_holder, method);
+ if (!Intl::IsObjectOfType(isolate, break_iterator_holder,
+ Intl::Type::kBreakIterator)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ isolate->factory()->NewStringFromAsciiChecked(method),
+ break_iterator_holder));
+ }
+
+ Handle<Object> bound_adopt_text =
+ Handle<Object>(break_iterator_holder->GetEmbedderField(
+ V8BreakIterator::kBoundAdoptTextIndex),
+ isolate);
+
+ if (!bound_adopt_text->IsUndefined(isolate)) {
+ DCHECK(bound_adopt_text->IsJSFunction());
+ return *bound_adopt_text;
+ }
+
+ Handle<NativeContext> native_context(isolate->context()->native_context(),
+ isolate);
+ Handle<Context> context = isolate->factory()->NewBuiltinContext(
+ native_context, static_cast<int>(V8BreakIterator::ContextSlot::kLength));
+
+ context->set(static_cast<int>(V8BreakIterator::ContextSlot::kV8BreakIterator),
+ *break_iterator_holder);
+
+ Handle<SharedFunctionInfo> info = Handle<SharedFunctionInfo>(
+ native_context->break_iterator_internal_adopt_text_shared_fun(), isolate);
+ Handle<Map> map = isolate->strict_function_without_prototype_map();
+
+ Handle<JSFunction> new_bound_adopt_text_function =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(map, info, context);
+
+ break_iterator_holder->SetEmbedderField(V8BreakIterator::kBoundAdoptTextIndex,
+ *new_bound_adopt_text_function);
+
+ return *new_bound_adopt_text_function;
+}
+
+BUILTIN(BreakIteratorInternalAdoptText) {
+ HandleScope scope(isolate);
+ Handle<Context> context = Handle<Context>(isolate->context(), isolate);
+
+ Handle<JSObject> break_iterator_holder = Handle<JSObject>(
+ JSObject::cast(context->get(
+ static_cast<int>(V8BreakIterator::ContextSlot::kV8BreakIterator))),
+ isolate);
+
+ DCHECK(Intl::IsObjectOfType(isolate, break_iterator_holder,
+ Intl::Type::kBreakIterator));
+
+ Handle<Object> input_text = args.atOrUndefined(isolate, 1);
+ Handle<String> text;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, text,
+ Object::ToString(isolate, input_text));
+
+ V8BreakIterator::AdoptText(isolate, break_iterator_holder, text);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index 57702556a9..1e16a6b1de 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/builtins/builtins-iterator-gen.h"
+#include "src/builtins/growable-fixed-array-gen.h"
#include "src/heap/factory-inl.h"
@@ -11,8 +12,8 @@ namespace internal {
using compiler::Node;
-Node* IteratorBuiltinsAssembler::GetIteratorMethod(Node* context,
- Node* object) {
+TNode<Object> IteratorBuiltinsAssembler::GetIteratorMethod(Node* context,
+ Node* object) {
return GetProperty(context, object, factory()->iterator_symbol());
}
@@ -194,5 +195,65 @@ void IteratorBuiltinsAssembler::IteratorCloseOnException(
Unreachable();
}
+TNode<JSArray> IteratorBuiltinsAssembler::IterableToList(
+ TNode<Context> context, TNode<Object> iterable, TNode<Object> iterator_fn) {
+ Label fast_path(this), slow_path(this), done(this);
+
+ TVARIABLE(JSArray, created_list);
+
+ Branch(IsFastJSArrayWithNoCustomIteration(iterable, context), &fast_path,
+ &slow_path);
+
+ // This is a fast-path for ignoring the iterator.
+ BIND(&fast_path);
+ {
+ TNode<JSArray> input_array = CAST(iterable);
+ created_list = CAST(CloneFastJSArray(context, input_array));
+ Goto(&done);
+ }
+
+ BIND(&slow_path);
+ {
+ // 1. Let iteratorRecord be ? GetIterator(items, method).
+ IteratorRecord iterator_record =
+ GetIterator(context, iterable, iterator_fn);
+
+ // 2. Let values be a new empty List.
+ GrowableFixedArray values(state());
+
+ Variable* vars[] = {values.var_array(), values.var_length(),
+ values.var_capacity()};
+ Label loop_start(this, 3, vars), loop_end(this);
+ Goto(&loop_start);
+ // 3. Let next be true.
+ // 4. Repeat, while next is not false
+ BIND(&loop_start);
+ {
+ // a. Set next to ? IteratorStep(iteratorRecord).
+ TNode<Object> next =
+ CAST(IteratorStep(context, iterator_record, &loop_end));
+ // b. If next is not false, then
+ // i. Let nextValue be ? IteratorValue(next).
+ TNode<Object> next_value = CAST(IteratorValue(context, next));
+ // ii. Append nextValue to the end of the List values.
+ values.Push(next_value);
+ Goto(&loop_start);
+ }
+ BIND(&loop_end);
+
+ created_list = values.ToJSArray(context);
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return created_list.value();
+}
+
+TNode<JSArray> IteratorBuiltinsAssembler::IterableToList(
+ TNode<Context> context, TNode<Object> iterable) {
+ TNode<Object> method = GetIteratorMethod(context, iterable);
+ return IterableToList(context, iterable, method);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index 13464516d6..71d4b9753c 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -18,7 +18,7 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
// Returns object[Symbol.iterator].
- Node* GetIteratorMethod(Node* context, Node* object);
+ TNode<Object> GetIteratorMethod(Node* context, Node* object);
// https://tc39.github.io/ecma262/#sec-getiterator --- never used for
// @@asyncIterator.
@@ -53,6 +53,11 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
Label* if_exception, Variable* exception);
void IteratorCloseOnException(Node* context, const IteratorRecord& iterator,
Variable* exception);
+
+ // /#sec-iterabletolist
+ TNode<JSArray> IterableToList(TNode<Context> context, TNode<Object> iterable,
+ TNode<Object> iterator_fn);
+ TNode<JSArray> IterableToList(TNode<Context> context, TNode<Object> iterable);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-json.cc b/deps/v8/src/builtins/builtins-json.cc
index 3c317d5b88..c3f6672b0f 100644
--- a/deps/v8/src/builtins/builtins-json.cc
+++ b/deps/v8/src/builtins/builtins-json.cc
@@ -2,9 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/builtins/builtins-utils.h"
-
#include "src/counters.h"
#include "src/json-parser.h"
#include "src/json-stringifier.h"
@@ -31,12 +30,11 @@ BUILTIN(JsonParse) {
// ES6 section 24.3.2 JSON.stringify.
BUILTIN(JsonStringify) {
HandleScope scope(isolate);
- JsonStringifier stringifier(isolate);
Handle<Object> object = args.atOrUndefined(isolate, 1);
Handle<Object> replacer = args.atOrUndefined(isolate, 2);
Handle<Object> indent = args.atOrUndefined(isolate, 3);
RETURN_RESULT_OR_FAILURE(isolate,
- stringifier.Stringify(object, replacer, indent));
+ JsonStringify(isolate, object, replacer, indent));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
index 7e701c1546..2f21c2d4b1 100644
--- a/deps/v8/src/builtins/builtins-number.cc
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -2,12 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/conversions.h"
#include "src/counters.h"
#include "src/objects-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/intl-objects.h"
+#endif
namespace v8 {
namespace internal {
@@ -114,6 +117,7 @@ BUILTIN(NumberPrototypeToLocaleString) {
if (value->IsJSValue()) {
value = handle(Handle<JSValue>::cast(value)->value(), isolate);
}
+ // 1. Let x be ? thisNumberValue(this value)
if (!value->IsNumber()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotGeneric,
@@ -122,8 +126,15 @@ BUILTIN(NumberPrototypeToLocaleString) {
isolate->factory()->Number_string()));
}
+#ifdef V8_INTL_SUPPORT
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ Intl::NumberToLocaleString(isolate, value, args.atOrUndefined(isolate, 1),
+ args.atOrUndefined(isolate, 2)));
+#else
// Turn the {value} into a String.
return *isolate->factory()->NumberToString(value);
+#endif // V8_INTL_SUPPORT
}
// ES6 section 20.1.3.5 Number.prototype.toPrecision ( precision )
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index fb89694c31..a8d83e641f 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -8,6 +8,7 @@
#include "src/heap/factory-inl.h"
#include "src/ic/accessor-assembler.h"
#include "src/ic/keyed-store-generic.h"
+#include "src/objects/js-generator.h"
#include "src/objects/property-descriptor-object.h"
#include "src/objects/shared-function-info.h"
@@ -199,7 +200,7 @@ TNode<Uint32T> ObjectEntriesValuesBuiltinsAssembler::HasHiddenPrototype(
void ObjectEntriesValuesBuiltinsAssembler::GetOwnValuesOrEntries(
TNode<Context> context, TNode<Object> maybe_object,
CollectType collect_type) {
- TNode<JSReceiver> receiver = ToObject(context, maybe_object);
+ TNode<JSReceiver> receiver = ToObject_Inline(context, maybe_object);
Label if_call_runtime_with_fast_path(this, Label::kDeferred),
if_call_runtime(this, Label::kDeferred),
@@ -292,8 +293,8 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
BIND(&if_has_enum_cache);
{
GotoIf(WordEqual(object_enum_length, IntPtrConstant(0)), if_no_properties);
- TNode<FixedArray> values_or_entries = AllocateFixedArray(
- PACKED_ELEMENTS, object_enum_length, kAllowLargeObjectAllocation);
+ TNode<FixedArray> values_or_entries = CAST(AllocateFixedArray(
+ PACKED_ELEMENTS, object_enum_length, kAllowLargeObjectAllocation));
// If in case we have enum_cache,
// we can't detect accessor of object until loop through descriptors.
@@ -360,8 +361,8 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
PACKED_ELEMENTS, array_map, SmiConstant(2), nullptr,
IntPtrConstant(2));
- StoreFixedArrayElement(elements, 0, next_key, SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(elements, 1, value, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(CAST(elements), 0, next_key, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(CAST(elements), 1, value, SKIP_WRITE_BARRIER);
value = TNode<JSArray>::UncheckedCast(array);
}
@@ -492,7 +493,7 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
TNode<Object> target = args.GetOptionalArgumentValue(0);
// 1. Let to be ? ToObject(target).
- TNode<JSReceiver> to = ToObject(context, target);
+ TNode<JSReceiver> to = ToObject_Inline(context, target);
Label done(this);
// 2. If only one argument was passed, return to.
@@ -1221,10 +1222,10 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
// ES6 #sec-object.prototype.valueof
TF_BUILTIN(ObjectPrototypeValueOf, CodeStubAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- Return(ToObject(context, receiver));
+ Return(ToObject_Inline(context, receiver));
}
// ES #sec-object.create
@@ -1422,7 +1423,7 @@ TF_BUILTIN(HasProperty, ObjectBuiltinsAssembler) {
Node* object = Parameter(Descriptor::kObject);
Node* context = Parameter(Descriptor::kContext);
- Return(HasProperty(object, key, context, kHasProperty));
+ Return(HasProperty(context, object, key, kHasProperty));
}
TF_BUILTIN(InstanceOf, ObjectBuiltinsAssembler) {
@@ -1518,7 +1519,7 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
Node* key = args.GetOptionalArgumentValue(1);
// 1. Let obj be ? ToObject(O).
- object = ToObject(context, object);
+ object = ToObject_Inline(CAST(context), CAST(object));
// 2. Let key be ? ToPropertyKey(P).
key = ToName(context, key);
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 074c926587..7513f60095 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
@@ -394,10 +394,8 @@ BUILTIN(ObjectGetOwnPropertyDescriptors) {
if (!did_get_descriptor.FromJust()) continue;
Handle<Object> from_descriptor = descriptor.ToObject(isolate);
- LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, descriptors, key, descriptors, LookupIterator::OWN);
- Maybe<bool> success =
- JSReceiver::CreateDataProperty(&it, from_descriptor, kDontThrow);
+ Maybe<bool> success = JSReceiver::CreateDataProperty(
+ isolate, descriptors, key, from_descriptor, kDontThrow);
CHECK(success.FromJust());
}
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index ccfb3b11b0..241a2041bd 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -709,9 +709,10 @@ void PromiseBuiltinsAssembler::SetForwardingHandlerIfTrue(
Node* context, Node* condition, const NodeGenerator& object) {
Label done(this);
GotoIfNot(condition, &done);
- CallRuntime(Runtime::kSetProperty, context, object(),
- HeapConstant(factory()->promise_forwarding_handler_symbol()),
- TrueConstant(), SmiConstant(LanguageMode::kStrict));
+ SetPropertyStrict(
+ CAST(context), CAST(object()),
+ HeapConstant(factory()->promise_forwarding_handler_symbol()),
+ TrueConstant());
Goto(&done);
BIND(&done);
}
@@ -723,9 +724,9 @@ void PromiseBuiltinsAssembler::SetPromiseHandledByIfTrue(
GotoIfNot(condition, &done);
GotoIf(TaggedIsSmi(promise), &done);
GotoIfNot(HasInstanceType(promise, JS_PROMISE_TYPE), &done);
- CallRuntime(Runtime::kSetProperty, context, promise,
- HeapConstant(factory()->promise_handled_by_symbol()),
- handled_by(), SmiConstant(LanguageMode::kStrict));
+ SetPropertyStrict(CAST(context), CAST(promise),
+ HeapConstant(factory()->promise_handled_by_symbol()),
+ CAST(handled_by()));
Goto(&done);
BIND(&done);
}
@@ -2118,8 +2119,8 @@ TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
CSA_ASSERT(this, IntPtrLessThan(index, new_elements_length));
CSA_ASSERT(this, IntPtrLessThan(elements_length, new_elements_length));
TNode<FixedArray> new_elements =
- AllocateFixedArray(PACKED_ELEMENTS, new_elements_length,
- AllocationFlag::kAllowLargeObjectAllocation);
+ CAST(AllocateFixedArray(PACKED_ELEMENTS, new_elements_length,
+ AllocationFlag::kAllowLargeObjectAllocation));
CopyFixedArrayElements(PACKED_ELEMENTS, elements, PACKED_ELEMENTS,
new_elements, elements_length,
new_elements_length);
diff --git a/deps/v8/src/builtins/builtins-promise.cc b/deps/v8/src/builtins/builtins-promise.cc
index 671bfa21fb..88c0632c15 100644
--- a/deps/v8/src/builtins/builtins-promise.cc
+++ b/deps/v8/src/builtins/builtins-promise.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 1d4c6d0802..34caf58688 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -460,7 +460,7 @@ TF_BUILTIN(ProxyHasProperty, ProxiesCodeStubAssembler) {
BIND(&trap_undefined);
{
// 7.a. Return ? target.[[HasProperty]](P).
- TailCallBuiltin(Builtins::kHasProperty, context, name, target);
+ TailCallBuiltin(Builtins::kHasProperty, context, target, name);
}
BIND(&return_false);
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.h b/deps/v8/src/builtins/builtins-proxy-gen.h
index 92b175bfde..95845fca4e 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.h
+++ b/deps/v8/src/builtins/builtins-proxy-gen.h
@@ -6,6 +6,7 @@
#define V8_BUILTINS_BUILTINS_PROXY_GEN_H_
#include "src/code-stub-assembler.h"
+#include "src/objects/js-proxy.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-reflect-gen.cc b/deps/v8/src/builtins/builtins-reflect-gen.cc
index 3ab21f975d..52e08275c7 100644
--- a/deps/v8/src/builtins/builtins-reflect-gen.cc
+++ b/deps/v8/src/builtins/builtins-reflect-gen.cc
@@ -18,7 +18,7 @@ TF_BUILTIN(ReflectHas, CodeStubAssembler) {
ThrowIfNotJSReceiver(context, target, MessageTemplate::kCalledOnNonObject,
"Reflect.has");
- Return(CallBuiltin(Builtins::kHasProperty, context, key, target));
+ Return(CallBuiltin(Builtins::kHasProperty, context, target, key));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc
index cc97caf6a9..3dd07a796a 100644
--- a/deps/v8/src/builtins/builtins-reflect.cc
+++ b/deps/v8/src/builtins/builtins-reflect.cc
@@ -2,9 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/builtins/builtins-utils.h"
-
#include "src/counters.h"
#include "src/keys.h"
#include "src/lookup.h"
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 1cf5d4f61f..206602aaa7 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -24,31 +24,32 @@ using compiler::Node;
template <class T>
using TNode = compiler::TNode<T>;
+TNode<Smi> RegExpBuiltinsAssembler::SmiZero() { return SmiConstant(0); }
+
+TNode<IntPtrT> RegExpBuiltinsAssembler::IntPtrZero() {
+ return IntPtrConstant(0);
+}
+
// -----------------------------------------------------------------------------
// ES6 section 21.2 RegExp Objects
-Node* RegExpBuiltinsAssembler::AllocateRegExpResult(Node* context, Node* length,
- Node* index, Node* input) {
- CSA_ASSERT(this, IsContext(context));
- CSA_ASSERT(this, TaggedIsSmi(index));
- CSA_ASSERT(this, TaggedIsSmi(length));
- CSA_ASSERT(this, IsString(input));
-
+TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
+ TNode<Context> context, TNode<Smi> length, TNode<Smi> index,
+ TNode<String> input) {
#ifdef DEBUG
- TNode<Smi> const max_length =
- SmiConstant(JSArray::kInitialMaxFastElementArray);
- CSA_ASSERT(this, SmiLessThanOrEqual(CAST(length), max_length));
+ TNode<Smi> max_length = SmiConstant(JSArray::kInitialMaxFastElementArray);
+ CSA_ASSERT(this, SmiLessThanOrEqual(length, max_length));
#endif // DEBUG
// Allocate the JSRegExpResult together with its elements fixed array.
// Initial preparations first.
- Node* const length_intptr = SmiUntag(length);
+ TNode<IntPtrT> length_intptr = SmiUntag(length);
const ElementsKind elements_kind = PACKED_ELEMENTS;
- Node* const elements_size = GetFixedArrayAllocationSize(
+ TNode<IntPtrT> elements_size = GetFixedArrayAllocationSize(
length_intptr, elements_kind, INTPTR_PARAMETERS);
- Node* const total_size =
+ TNode<IntPtrT> total_size =
IntPtrAdd(elements_size, IntPtrConstant(JSRegExpResult::kSize));
static const int kRegExpResultOffset = 0;
@@ -57,14 +58,14 @@ Node* RegExpBuiltinsAssembler::AllocateRegExpResult(Node* context, Node* length,
// The folded allocation.
- Node* const result = Allocate(total_size);
- Node* const elements = InnerAllocate(result, kElementsOffset);
+ Node* result = Allocate(total_size);
+ Node* elements = InnerAllocate(result, kElementsOffset);
// Initialize the JSRegExpResult.
- Node* const native_context = LoadNativeContext(context);
- Node* const map =
- LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX);
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<Map> map = CAST(
+ LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX));
StoreMapNoWriteBarrier(result, map);
StoreObjectFieldNoWriteBarrier(result, JSArray::kPropertiesOrHashOffset,
@@ -85,11 +86,10 @@ Node* RegExpBuiltinsAssembler::AllocateRegExpResult(Node* context, Node* length,
StoreMapNoWriteBarrier(elements, map_index);
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
- Node* const zero = IntPtrConstant(0);
- FillFixedArrayWithValue(elements_kind, elements, zero, length_intptr,
+ FillFixedArrayWithValue(elements_kind, elements, IntPtrZero(), length_intptr,
Heap::kUndefinedValueRootIndex);
- return result;
+ return CAST(result);
}
TNode<Object> RegExpBuiltinsAssembler::RegExpCreate(
@@ -114,21 +114,23 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpCreate(TNode<Context> context,
pattern, flags);
}
-TNode<Object> RegExpBuiltinsAssembler::FastLoadLastIndex(Node* regexp) {
+TNode<Object> RegExpBuiltinsAssembler::FastLoadLastIndex(
+ TNode<JSRegExp> regexp) {
// Load the in-object field.
static const int field_offset =
JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
return LoadObjectField(regexp, field_offset);
}
-Node* RegExpBuiltinsAssembler::SlowLoadLastIndex(Node* context, Node* regexp) {
- // Load through the GetProperty stub.
+TNode<Object> RegExpBuiltinsAssembler::SlowLoadLastIndex(TNode<Context> context,
+ TNode<Object> regexp) {
return GetProperty(context, regexp, isolate()->factory()->lastIndex_string());
}
-Node* RegExpBuiltinsAssembler::LoadLastIndex(Node* context, Node* regexp,
- bool is_fastpath) {
- return is_fastpath ? FastLoadLastIndex(regexp)
+TNode<Object> RegExpBuiltinsAssembler::LoadLastIndex(TNode<Context> context,
+ TNode<Object> regexp,
+ bool is_fastpath) {
+ return is_fastpath ? FastLoadLastIndex(CAST(regexp))
: SlowLoadLastIndex(context, regexp);
}
@@ -143,12 +145,8 @@ void RegExpBuiltinsAssembler::FastStoreLastIndex(Node* regexp, Node* value) {
void RegExpBuiltinsAssembler::SlowStoreLastIndex(Node* context, Node* regexp,
Node* value) {
- // Store through runtime.
- // TODO(ishell): Use SetPropertyStub here once available.
Node* const name = HeapConstant(isolate()->factory()->lastIndex_string());
- Node* const language_mode = SmiConstant(LanguageMode::kStrict);
- CallRuntime(Runtime::kSetProperty, context, regexp, name, value,
- language_mode);
+ SetPropertyStrict(CAST(context), CAST(regexp), CAST(name), CAST(value));
}
void RegExpBuiltinsAssembler::StoreLastIndex(Node* context, Node* regexp,
@@ -160,21 +158,18 @@ void RegExpBuiltinsAssembler::StoreLastIndex(Node* context, Node* regexp,
}
}
-Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
- Node* const context, Node* const regexp, Node* const match_info,
- TNode<String> const string) {
- CSA_ASSERT(this, IsFixedArrayMap(LoadMap(match_info)));
- CSA_ASSERT(this, IsJSRegExp(regexp));
-
+TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
+ TNode<Context> context, TNode<JSReceiver> maybe_regexp,
+ TNode<RegExpMatchInfo> match_info, TNode<String> string) {
Label named_captures(this), out(this);
TNode<IntPtrT> num_indices = SmiUntag(CAST(LoadFixedArrayElement(
match_info, RegExpMatchInfo::kNumberOfCapturesIndex)));
- TNode<Smi> const num_results = SmiTag(WordShr(num_indices, 1));
- Node* const start =
- LoadFixedArrayElement(match_info, RegExpMatchInfo::kFirstCaptureIndex);
- Node* const end = LoadFixedArrayElement(
- match_info, RegExpMatchInfo::kFirstCaptureIndex + 1);
+ TNode<Smi> num_results = SmiTag(WordShr(num_indices, 1));
+ TNode<Smi> start = CAST(
+ LoadFixedArrayElement(match_info, RegExpMatchInfo::kFirstCaptureIndex));
+ TNode<Smi> end = CAST(LoadFixedArrayElement(
+ match_info, RegExpMatchInfo::kFirstCaptureIndex + 1));
// Calculate the substring of the first match before creating the result array
// to avoid an unnecessary write barrier storing the first result.
@@ -182,9 +177,9 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
TNode<String> first =
CAST(CallBuiltin(Builtins::kSubString, context, string, start, end));
- Node* const result =
+ TNode<JSRegExpResult> result =
AllocateRegExpResult(context, num_results, start, string);
- Node* const result_elements = LoadElements(result);
+ TNode<FixedArray> result_elements = CAST(LoadElements(result));
StoreFixedArrayElement(result_elements, 0, first, SKIP_WRITE_BARRIER);
@@ -192,13 +187,12 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
GotoIf(SmiEqual(num_results, SmiConstant(1)), &out);
// Store all remaining captures.
- Node* const limit = IntPtrAdd(
+ TNode<IntPtrT> limit = IntPtrAdd(
IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), num_indices);
- VARIABLE(var_from_cursor, MachineType::PointerRepresentation(),
- IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 2));
- VARIABLE(var_to_cursor, MachineType::PointerRepresentation(),
- IntPtrConstant(1));
+ TVARIABLE(IntPtrT, var_from_cursor,
+ IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 2));
+ TVARIABLE(IntPtrT, var_to_cursor, IntPtrConstant(1));
Variable* vars[] = {&var_from_cursor, &var_to_cursor};
Label loop(this, 2, vars);
@@ -206,72 +200,74 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
Goto(&loop);
BIND(&loop);
{
- Node* const from_cursor = var_from_cursor.value();
- Node* const to_cursor = var_to_cursor.value();
- TNode<Smi> const start =
- CAST(LoadFixedArrayElement(match_info, from_cursor));
+ TNode<IntPtrT> from_cursor = var_from_cursor.value();
+ TNode<IntPtrT> to_cursor = var_to_cursor.value();
+ TNode<Smi> start = CAST(LoadFixedArrayElement(match_info, from_cursor));
Label next_iter(this);
GotoIf(SmiEqual(start, SmiConstant(-1)), &next_iter);
- Node* const from_cursor_plus1 = IntPtrAdd(from_cursor, IntPtrConstant(1));
- Node* const end = LoadFixedArrayElement(match_info, from_cursor_plus1);
+ TNode<IntPtrT> from_cursor_plus1 =
+ IntPtrAdd(from_cursor, IntPtrConstant(1));
+ TNode<Smi> end = CAST(LoadFixedArrayElement(match_info, from_cursor_plus1));
- TNode<String> const capture =
+ TNode<String> capture =
CAST(CallBuiltin(Builtins::kSubString, context, string, start, end));
StoreFixedArrayElement(result_elements, to_cursor, capture);
Goto(&next_iter);
BIND(&next_iter);
- var_from_cursor.Bind(IntPtrAdd(from_cursor, IntPtrConstant(2)));
- var_to_cursor.Bind(IntPtrAdd(to_cursor, IntPtrConstant(1)));
+ var_from_cursor = IntPtrAdd(from_cursor, IntPtrConstant(2));
+ var_to_cursor = IntPtrAdd(to_cursor, IntPtrConstant(1));
Branch(UintPtrLessThan(var_from_cursor.value(), limit), &loop,
&named_captures);
}
BIND(&named_captures);
{
+ CSA_ASSERT(this, SmiGreaterThan(num_results, SmiConstant(1)));
+
// We reach this point only if captures exist, implying that this is an
// IRREGEXP JSRegExp.
- CSA_ASSERT(this, IsJSRegExp(regexp));
- CSA_ASSERT(this, SmiGreaterThan(num_results, SmiConstant(1)));
+ TNode<JSRegExp> regexp = CAST(maybe_regexp);
// Preparations for named capture properties. Exit early if the result does
// not have any named captures to minimize performance impact.
- Node* const data = LoadObjectField(regexp, JSRegExp::kDataOffset);
+ TNode<FixedArray> data =
+ CAST(LoadObjectField(regexp, JSRegExp::kDataOffset));
CSA_ASSERT(this,
SmiEqual(CAST(LoadFixedArrayElement(data, JSRegExp::kTagIndex)),
SmiConstant(JSRegExp::IRREGEXP)));
// The names fixed array associates names at even indices with a capture
// index at odd indices.
- TNode<Object> const maybe_names =
+ TNode<Object> maybe_names =
LoadFixedArrayElement(data, JSRegExp::kIrregexpCaptureNameMapIndex);
- GotoIf(WordEqual(maybe_names, SmiConstant(0)), &out);
+ GotoIf(WordEqual(maybe_names, SmiZero()), &out);
// Allocate a new object to store the named capture properties.
// TODO(jgruber): Could be optimized by adding the object map to the heap
// root list.
- Node* const native_context = LoadNativeContext(context);
- Node* const map = LoadContextElement(
- native_context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP);
- Node* const properties =
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<Map> map = CAST(LoadContextElement(
+ native_context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP));
+ TNode<NameDictionary> properties =
AllocateNameDictionary(NameDictionary::kInitialCapacity);
- Node* const group_object = AllocateJSObjectFromMap(map, properties);
+ TNode<JSObject> group_object =
+ CAST(AllocateJSObjectFromMap(map, properties));
StoreObjectField(result, JSRegExpResult::kGroupsOffset, group_object);
// One or more named captures exist, add a property for each one.
TNode<FixedArray> names = CAST(maybe_names);
- TNode<IntPtrT> const names_length = LoadAndUntagFixedArrayBaseLength(names);
- CSA_ASSERT(this, IntPtrGreaterThan(names_length, IntPtrConstant(0)));
+ TNode<IntPtrT> names_length = LoadAndUntagFixedArrayBaseLength(names);
+ CSA_ASSERT(this, IntPtrGreaterThan(names_length, IntPtrZero()));
- VARIABLE(var_i, MachineType::PointerRepresentation());
- var_i.Bind(IntPtrConstant(0));
+ TVARIABLE(IntPtrT, var_i, IntPtrZero());
Variable* vars[] = {&var_i};
const int vars_count = sizeof(vars) / sizeof(vars[0]);
@@ -280,14 +276,14 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
Goto(&loop);
BIND(&loop);
{
- Node* const i = var_i.value();
- Node* const i_plus_1 = IntPtrAdd(i, IntPtrConstant(1));
- Node* const i_plus_2 = IntPtrAdd(i_plus_1, IntPtrConstant(1));
+ TNode<IntPtrT> i = var_i.value();
+ TNode<IntPtrT> i_plus_1 = IntPtrAdd(i, IntPtrConstant(1));
+ TNode<IntPtrT> i_plus_2 = IntPtrAdd(i_plus_1, IntPtrConstant(1));
- Node* const name = LoadFixedArrayElement(names, i);
- Node* const index = LoadFixedArrayElement(names, i_plus_1);
- Node* const capture =
- LoadFixedArrayElement(result_elements, SmiUntag(index));
+ TNode<String> name = CAST(LoadFixedArrayElement(names, i));
+ TNode<Smi> index = CAST(LoadFixedArrayElement(names, i_plus_1));
+ TNode<HeapObject> capture =
+ CAST(LoadFixedArrayElement(result_elements, SmiUntag(index)));
// TODO(jgruber): Calling into runtime to create each property is slow.
// Either we should create properties entirely in CSA (should be doable),
@@ -295,7 +291,7 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
CallRuntime(Runtime::kCreateDataProperty, context, group_object, name,
capture);
- var_i.Bind(i_plus_2);
+ var_i = i_plus_2;
Branch(IntPtrGreaterThanOrEqual(var_i.value(), names_length), &out,
&loop);
}
@@ -325,42 +321,30 @@ void RegExpBuiltinsAssembler::GetStringPointers(
var_string_end->Bind(IntPtrAdd(string_data, to_offset));
}
-Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
- Node* const regexp,
- Node* const string,
- Node* const last_index,
- Node* const match_info) {
+TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
+ TNode<Context> context, TNode<JSRegExp> regexp, TNode<String> string,
+ TNode<Number> last_index, TNode<RegExpMatchInfo> match_info) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- return CallRuntime(Runtime::kRegExpExec, context, regexp, string, last_index,
- match_info);
+ return CAST(CallRuntime(Runtime::kRegExpExec, context, regexp, string,
+ last_index, match_info));
#else // V8_INTERPRETED_REGEXP
- CSA_ASSERT(this, TaggedIsNotSmi(regexp));
- CSA_ASSERT(this, IsJSRegExp(regexp));
-
- CSA_ASSERT(this, TaggedIsNotSmi(string));
- CSA_ASSERT(this, IsString(string));
-
- CSA_ASSERT(this, IsNumber(last_index));
- CSA_ASSERT(this, IsFixedArrayMap(LoadReceiverMap(match_info)));
-
- Node* const int_zero = IntPtrConstant(0);
-
ToDirectStringAssembler to_direct(state(), string);
- VARIABLE(var_result, MachineRepresentation::kTagged);
+ TVARIABLE(HeapObject, var_result);
Label out(this), atom(this), runtime(this, Label::kDeferred);
// External constants.
- Node* const isolate_address =
+ TNode<ExternalReference> isolate_address =
ExternalConstant(ExternalReference::isolate_address(isolate()));
- Node* const regexp_stack_memory_address_address = ExternalConstant(
- ExternalReference::address_of_regexp_stack_memory_address(isolate()));
- Node* const regexp_stack_memory_size_address = ExternalConstant(
+ TNode<ExternalReference> regexp_stack_memory_address_address =
+ ExternalConstant(
+ ExternalReference::address_of_regexp_stack_memory_address(isolate()));
+ TNode<ExternalReference> regexp_stack_memory_size_address = ExternalConstant(
ExternalReference::address_of_regexp_stack_memory_size(isolate()));
- Node* const static_offsets_vector_address = ExternalConstant(
+ TNode<ExternalReference> static_offsets_vector_address = ExternalConstant(
ExternalReference::address_of_static_offsets_vector(isolate()));
// At this point, last_index is definitely a canonicalized non-negative
@@ -374,21 +358,18 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
CSA_ASSERT(this, IsNumberPositive(last_index));
GotoIf(TaggedIsNotSmi(last_index), &if_failure);
- Node* const int_string_length = LoadStringLengthAsWord(string);
- Node* const int_last_index = SmiUntag(last_index);
+ TNode<IntPtrT> int_string_length = LoadStringLengthAsWord(string);
+ TNode<IntPtrT> int_last_index = SmiUntag(CAST(last_index));
GotoIf(UintPtrGreaterThan(int_last_index, int_string_length), &if_failure);
- Node* const data = LoadObjectField(regexp, JSRegExp::kDataOffset);
+ // Since the RegExp has been compiled, data contains a fixed array.
+ TNode<FixedArray> data = CAST(LoadObjectField(regexp, JSRegExp::kDataOffset));
{
- // Check that the RegExp has been compiled (data contains a fixed array).
- CSA_ASSERT(this, TaggedIsNotSmi(data));
- CSA_ASSERT(this, HasInstanceType(data, FIXED_ARRAY_TYPE));
-
// Dispatch on the type of the RegExp.
{
Label next(this), unreachable(this, Label::kDeferred);
- Node* const tag = LoadAndUntagToWord32FixedArrayElement(
+ TNode<Int32T> tag = LoadAndUntagToWord32FixedArrayElement(
data, IntPtrConstant(JSRegExp::kTagIndex));
int32_t values[] = {
@@ -407,22 +388,21 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
// Check (number_of_captures + 1) * 2 <= offsets vector size
// Or number_of_captures <= offsets vector size / 2 - 1
- TNode<Smi> const capture_count =
+ TNode<Smi> capture_count =
CAST(LoadFixedArrayElement(data, JSRegExp::kIrregexpCaptureCountIndex));
- STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
- GotoIf(SmiAbove(
- capture_count,
- SmiConstant(Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1)),
+ const int kOffsetsSize = Isolate::kJSRegexpStaticOffsetsVectorSize;
+ STATIC_ASSERT(kOffsetsSize >= 2);
+ GotoIf(SmiAbove(capture_count, SmiConstant(kOffsetsSize / 2 - 1)),
&runtime);
}
// Ensure that a RegExp stack is allocated. This check is after branching off
// for ATOM regexps to avoid unnecessary trips to runtime.
{
- Node* const stack_size =
- Load(MachineType::IntPtr(), regexp_stack_memory_size_address);
- GotoIf(IntPtrEqual(stack_size, int_zero), &runtime);
+ TNode<IntPtrT> stack_size = UncheckedCast<IntPtrT>(
+ Load(MachineType::IntPtr(), regexp_stack_memory_size_address));
+ GotoIf(IntPtrEqual(stack_size, IntPtrZero()), &runtime);
}
// Unpack the string if possible.
@@ -432,12 +412,12 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
// Load the irregexp code object and offsets into the subject string. Both
// depend on whether the string is one- or two-byte.
- VARIABLE(var_string_start, MachineType::PointerRepresentation());
- VARIABLE(var_string_end, MachineType::PointerRepresentation());
- VARIABLE(var_code, MachineRepresentation::kTagged);
+ TVARIABLE(RawPtrT, var_string_start);
+ TVARIABLE(RawPtrT, var_string_end);
+ TVARIABLE(Object, var_code);
{
- Node* const direct_string_data = to_direct.PointerToData(&runtime);
+ TNode<RawPtrT> direct_string_data = to_direct.PointerToData(&runtime);
Label next(this), if_isonebyte(this), if_istwobyte(this, Label::kDeferred);
Branch(IsOneByteStringInstanceType(to_direct.instance_type()),
@@ -448,8 +428,8 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
GetStringPointers(direct_string_data, to_direct.offset(), int_last_index,
int_string_length, String::ONE_BYTE_ENCODING,
&var_string_start, &var_string_end);
- var_code.Bind(
- LoadFixedArrayElement(data, JSRegExp::kIrregexpLatin1CodeIndex));
+ var_code =
+ LoadFixedArrayElement(data, JSRegExp::kIrregexpLatin1CodeIndex);
Goto(&next);
}
@@ -458,8 +438,7 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
GetStringPointers(direct_string_data, to_direct.offset(), int_last_index,
int_string_length, String::TWO_BYTE_ENCODING,
&var_string_start, &var_string_end);
- var_code.Bind(
- LoadFixedArrayElement(data, JSRegExp::kIrregexpUC16CodeIndex));
+ var_code = LoadFixedArrayElement(data, JSRegExp::kIrregexpUC16CodeIndex);
Goto(&next);
}
@@ -469,15 +448,19 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object; and otherwise it
// contains the uninitialized sentinel as a smi.
+#ifdef DEBUG
+ {
+ Label next(this);
+ GotoIfNot(TaggedIsSmi(var_code.value()), &next);
+ CSA_ASSERT(this, SmiEqual(CAST(var_code.value()),
+ SmiConstant(JSRegExp::kUninitializedValue)));
+ Goto(&next);
+ BIND(&next);
+ }
+#endif
- Node* const code = var_code.value();
- CSA_ASSERT_BRANCH(this, [=](Label* ok, Label* not_ok) {
- GotoIfNot(TaggedIsSmi(code), ok);
- Branch(SmiEqual(CAST(code), SmiConstant(JSRegExp::kUninitializedValue)), ok,
- not_ok);
- });
- GotoIf(TaggedIsSmi(code), &runtime);
- CSA_ASSERT(this, HasInstanceType(code, CODE_TYPE));
+ GotoIf(TaggedIsSmi(var_code.value()), &runtime);
+ TNode<Code> code = CAST(var_code.value());
Label if_success(this), if_exception(this, Label::kDeferred);
{
@@ -494,61 +477,62 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
// Argument 0: Original subject string.
MachineType arg0_type = type_tagged;
- Node* const arg0 = string;
+ TNode<String> arg0 = string;
// Argument 1: Previous index.
MachineType arg1_type = type_int32;
- Node* const arg1 = TruncateIntPtrToInt32(int_last_index);
+ TNode<Int32T> arg1 = TruncateIntPtrToInt32(int_last_index);
// Argument 2: Start of string data.
MachineType arg2_type = type_ptr;
- Node* const arg2 = var_string_start.value();
+ TNode<RawPtrT> arg2 = var_string_start.value();
// Argument 3: End of string data.
MachineType arg3_type = type_ptr;
- Node* const arg3 = var_string_end.value();
+ TNode<RawPtrT> arg3 = var_string_end.value();
// Argument 4: static offsets vector buffer.
MachineType arg4_type = type_ptr;
- Node* const arg4 = static_offsets_vector_address;
+ TNode<ExternalReference> arg4 = static_offsets_vector_address;
// Argument 5: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global
// regexps.
MachineType arg5_type = type_int32;
- Node* const arg5 = Int32Constant(0);
+ TNode<Int32T> arg5 = Int32Constant(0);
// Argument 6: Start (high end) of backtracking stack memory area.
- Node* const stack_start =
- Load(MachineType::Pointer(), regexp_stack_memory_address_address);
- Node* const stack_size =
- Load(MachineType::IntPtr(), regexp_stack_memory_size_address);
- Node* const stack_end = IntPtrAdd(stack_start, stack_size);
+ TNode<RawPtrT> stack_start = UncheckedCast<RawPtrT>(
+ Load(MachineType::Pointer(), regexp_stack_memory_address_address));
+ TNode<IntPtrT> stack_size = UncheckedCast<IntPtrT>(
+ Load(MachineType::IntPtr(), regexp_stack_memory_size_address));
+ TNode<RawPtrT> stack_end =
+ ReinterpretCast<RawPtrT>(IntPtrAdd(stack_start, stack_size));
MachineType arg6_type = type_ptr;
- Node* const arg6 = stack_end;
+ TNode<RawPtrT> arg6 = stack_end;
// Argument 7: Indicate that this is a direct call from JavaScript.
MachineType arg7_type = type_int32;
- Node* const arg7 = Int32Constant(1);
+ TNode<Int32T> arg7 = Int32Constant(1);
// Argument 8: Pass current isolate address.
MachineType arg8_type = type_ptr;
- Node* const arg8 = isolate_address;
+ TNode<ExternalReference> arg8 = isolate_address;
- Node* const code_entry =
+ TNode<RawPtrT> code_entry = ReinterpretCast<RawPtrT>(
IntPtrAdd(BitcastTaggedToWord(code),
- IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
+ IntPtrConstant(Code::kHeaderSize - kHeapObjectTag)));
- Node* const result = CallCFunction9(
+ TNode<Int32T> result = UncheckedCast<Int32T>(CallCFunction9(
retval_type, arg0_type, arg1_type, arg2_type, arg3_type, arg4_type,
arg5_type, arg6_type, arg7_type, arg8_type, code_entry, arg0, arg1,
- arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+ arg2, arg3, arg4, arg5, arg6, arg7, arg8));
// Check the result.
// We expect exactly one result since we force the called regexp to behave
// as non-global.
- Node* const int_result = ChangeInt32ToIntPtr(result);
+ TNode<IntPtrT> int_result = ChangeInt32ToIntPtr(result);
GotoIf(IntPtrEqual(int_result,
IntPtrConstant(NativeRegExpMacroAssembler::SUCCESS)),
&if_success);
@@ -570,13 +554,13 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
// Check that the last match info has space for the capture registers and
// the additional information. Ensure no overflow in add.
STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
- TNode<Smi> const available_slots =
+ TNode<Smi> available_slots =
SmiSub(LoadFixedArrayBaseLength(match_info),
SmiConstant(RegExpMatchInfo::kLastMatchOverhead));
- TNode<Smi> const capture_count =
+ TNode<Smi> capture_count =
CAST(LoadFixedArrayElement(data, JSRegExp::kIrregexpCaptureCountIndex));
// Calculate number of register_count = (capture_count + 1) * 2.
- TNode<Smi> const register_count =
+ TNode<Smi> register_count =
SmiShl(SmiAdd(capture_count, SmiConstant(1)), 1);
GotoIf(SmiGreaterThan(register_count, available_slots), &runtime);
@@ -591,21 +575,21 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
// Fill match and capture offsets in match_info.
{
- Node* const limit_offset = ElementOffsetFromIndex(
+ TNode<IntPtrT> limit_offset = ElementOffsetFromIndex(
register_count, INT32_ELEMENTS, SMI_PARAMETERS, 0);
- Node* const to_offset = ElementOffsetFromIndex(
+ TNode<IntPtrT> to_offset = ElementOffsetFromIndex(
IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), PACKED_ELEMENTS,
INTPTR_PARAMETERS, RegExpMatchInfo::kHeaderSize - kHeapObjectTag);
- VARIABLE(var_to_offset, MachineType::PointerRepresentation(), to_offset);
+ TVARIABLE(IntPtrT, var_to_offset, to_offset);
VariableList vars({&var_to_offset}, zone());
BuildFastLoop(
- vars, int_zero, limit_offset,
+ vars, IntPtrZero(), limit_offset,
[=, &var_to_offset](Node* offset) {
- Node* const value = Load(MachineType::Int32(),
- static_offsets_vector_address, offset);
- Node* const smi_value = SmiFromInt32(value);
+ TNode<Int32T> value = UncheckedCast<Int32T>(Load(
+ MachineType::Int32(), static_offsets_vector_address, offset));
+ TNode<Smi> smi_value = SmiFromInt32(value);
StoreNoWriteBarrier(MachineRepresentation::kTagged, match_info,
var_to_offset.value(), smi_value);
Increment(&var_to_offset, kPointerSize);
@@ -613,13 +597,13 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
kInt32Size, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
}
- var_result.Bind(match_info);
+ var_result = match_info;
Goto(&out);
}
BIND(&if_failure);
{
- var_result.Bind(NullConstant());
+ var_result = NullConstant();
Goto(&out);
}
@@ -627,7 +611,7 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
{
// A stack overflow was detected in RegExp code.
#ifdef DEBUG
- Node* const pending_exception_address =
+ TNode<ExternalReference> pending_exception_address =
ExternalConstant(ExternalReference::Create(
IsolateAddressId::kPendingExceptionAddress, isolate()));
CSA_ASSERT(this, IsTheHole(Load(MachineType::AnyTagged(),
@@ -639,9 +623,8 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
BIND(&runtime);
{
- Node* const result = CallRuntime(Runtime::kRegExpExec, context, regexp,
- string, last_index, match_info);
- var_result.Bind(result);
+ var_result = CAST(CallRuntime(Runtime::kRegExpExec, context, regexp, string,
+ last_index, match_info));
Goto(&out);
}
@@ -649,9 +632,8 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
{
// TODO(jgruber): A call with 4 args stresses register allocation, this
// should probably just be inlined.
- Node* const result = CallBuiltin(Builtins::kRegExpExecAtom, context, regexp,
- string, last_index, match_info);
- var_result.Bind(result);
+ var_result = CAST(CallBuiltin(Builtins::kRegExpExecAtom, context, regexp,
+ string, last_index, match_info));
Goto(&out);
}
@@ -663,47 +645,45 @@ Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
// ES#sec-regexp.prototype.exec
// RegExp.prototype.exec ( string )
// Implements the core of RegExp.prototype.exec but without actually
-// constructing the JSRegExpResult. Returns either null (if the RegExp did not
-// match) or a fixed array containing match indices as returned by
-// RegExpExecStub.
-Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
- Node* const context, Node* const regexp, Node* const string,
- Label* if_didnotmatch, const bool is_fastpath) {
- Node* const int_zero = IntPtrConstant(0);
- Node* const smi_zero = SmiConstant(0);
-
- if (is_fastpath) {
- CSA_ASSERT(this, HasInstanceType(regexp, JS_REGEXP_TYPE));
- } else {
- ThrowIfNotInstanceType(context, regexp, JS_REGEXP_TYPE,
+// constructing the JSRegExpResult. Returns a fixed array containing match
+// indices as returned by RegExpExecStub on successful match, and jumps to
+// if_didnotmatch otherwise.
+TNode<RegExpMatchInfo>
+RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
+ TNode<Context> context, TNode<JSReceiver> maybe_regexp,
+ TNode<String> string, Label* if_didnotmatch, const bool is_fastpath) {
+ if (!is_fastpath) {
+ ThrowIfNotInstanceType(context, maybe_regexp, JS_REGEXP_TYPE,
"RegExp.prototype.exec");
}
- CSA_ASSERT(this, IsString(string));
- CSA_ASSERT(this, IsJSRegExp(regexp));
+ TNode<JSRegExp> regexp = CAST(maybe_regexp);
- VARIABLE(var_result, MachineRepresentation::kTagged);
+ TVARIABLE(HeapObject, var_result);
Label out(this);
// Load lastIndex.
- VARIABLE(var_lastindex, MachineRepresentation::kTagged);
+ TVARIABLE(Number, var_lastindex);
{
- Node* const regexp_lastindex = LoadLastIndex(context, regexp, is_fastpath);
- var_lastindex.Bind(regexp_lastindex);
+ TNode<Object> regexp_lastindex =
+ LoadLastIndex(context, regexp, is_fastpath);
if (is_fastpath) {
// ToLength on a positive smi is a nop and can be skipped.
CSA_ASSERT(this, TaggedIsPositiveSmi(regexp_lastindex));
+ var_lastindex = CAST(regexp_lastindex);
} else {
// Omit ToLength if lastindex is a non-negative smi.
- Label call_tolength(this, Label::kDeferred), next(this);
- Branch(TaggedIsPositiveSmi(regexp_lastindex), &next, &call_tolength);
+ Label call_tolength(this, Label::kDeferred), is_smi(this), next(this);
+ Branch(TaggedIsPositiveSmi(regexp_lastindex), &is_smi, &call_tolength);
BIND(&call_tolength);
- {
- var_lastindex.Bind(ToLength_Inline(context, regexp_lastindex));
- Goto(&next);
- }
+ var_lastindex = ToLength_Inline(context, regexp_lastindex);
+ Goto(&next);
+
+ BIND(&is_smi);
+ var_lastindex = CAST(regexp_lastindex);
+ Goto(&next);
BIND(&next);
}
@@ -711,11 +691,11 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
// Check whether the regexp is global or sticky, which determines whether we
// update last index later on.
- Node* const flags = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
- Node* const is_global_or_sticky = WordAnd(
+ TNode<Smi> flags = CAST(LoadObjectField(regexp, JSRegExp::kFlagsOffset));
+ TNode<IntPtrT> is_global_or_sticky = WordAnd(
SmiUntag(flags), IntPtrConstant(JSRegExp::kGlobal | JSRegExp::kSticky));
- Node* const should_update_last_index =
- WordNotEqual(is_global_or_sticky, int_zero);
+ TNode<BoolT> should_update_last_index =
+ WordNotEqual(is_global_or_sticky, IntPtrZero());
// Grab and possibly update last index.
Label run_exec(this);
@@ -725,42 +705,40 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
BIND(&if_doupdate);
{
- Node* const lastindex = var_lastindex.value();
-
Label if_isoob(this, Label::kDeferred);
- GotoIfNot(TaggedIsSmi(lastindex), &if_isoob);
- TNode<Smi> const string_length = LoadStringLengthAsSmi(string);
- GotoIfNot(SmiLessThanOrEqual(CAST(lastindex), string_length), &if_isoob);
+ GotoIfNot(TaggedIsSmi(var_lastindex.value()), &if_isoob);
+ TNode<Smi> string_length = LoadStringLengthAsSmi(string);
+ GotoIfNot(SmiLessThanOrEqual(CAST(var_lastindex.value()), string_length),
+ &if_isoob);
Goto(&run_exec);
BIND(&if_isoob);
{
- StoreLastIndex(context, regexp, smi_zero, is_fastpath);
- var_result.Bind(NullConstant());
+ StoreLastIndex(context, regexp, SmiZero(), is_fastpath);
Goto(if_didnotmatch);
}
}
BIND(&if_dontupdate);
{
- var_lastindex.Bind(smi_zero);
+ var_lastindex = SmiZero();
Goto(&run_exec);
}
}
- Node* match_indices;
+ TNode<HeapObject> match_indices;
Label successful_match(this);
BIND(&run_exec);
{
// Get last match info from the context.
- Node* const native_context = LoadNativeContext(context);
- Node* const last_match_info = LoadContextElement(
- native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<RegExpMatchInfo> last_match_info = CAST(LoadContextElement(
+ native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX));
// Call the exec stub.
match_indices = RegExpExecInternal(context, regexp, string,
var_lastindex.value(), last_match_info);
- var_result.Bind(match_indices);
+ var_result = match_indices;
// {match_indices} is either null or the RegExpMatchInfo array.
// Return early if exec failed, possibly updating last index.
@@ -768,7 +746,7 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
GotoIfNot(should_update_last_index, if_didnotmatch);
- StoreLastIndex(context, regexp, smi_zero, is_fastpath);
+ StoreLastIndex(context, regexp, SmiZero(), is_fastpath);
Goto(if_didnotmatch);
}
@@ -777,40 +755,38 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
GotoIfNot(should_update_last_index, &out);
// Update the new last index from {match_indices}.
- Node* const new_lastindex = LoadFixedArrayElement(
- match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
+ TNode<Number> new_lastindex = CAST(LoadFixedArrayElement(
+ CAST(match_indices), RegExpMatchInfo::kFirstCaptureIndex + 1));
StoreLastIndex(context, regexp, new_lastindex, is_fastpath);
Goto(&out);
}
BIND(&out);
- return var_result.value();
+ return CAST(var_result.value());
}
// ES#sec-regexp.prototype.exec
// RegExp.prototype.exec ( string )
-Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBody(
- Node* const context, Node* const regexp, TNode<String> const string,
- const bool is_fastpath) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
+TNode<HeapObject> RegExpBuiltinsAssembler::RegExpPrototypeExecBody(
+ TNode<Context> context, TNode<JSReceiver> maybe_regexp,
+ TNode<String> string, const bool is_fastpath) {
+ TVARIABLE(HeapObject, var_result);
Label if_didnotmatch(this), out(this);
- Node* const indices_or_null = RegExpPrototypeExecBodyWithoutResult(
- context, regexp, string, &if_didnotmatch, is_fastpath);
+ TNode<RegExpMatchInfo> match_indices = RegExpPrototypeExecBodyWithoutResult(
+ context, maybe_regexp, string, &if_didnotmatch, is_fastpath);
// Successful match.
{
- Node* const match_indices = indices_or_null;
- Node* const result =
- ConstructNewResultFromMatchInfo(context, regexp, match_indices, string);
- var_result.Bind(result);
+ var_result = ConstructNewResultFromMatchInfo(context, maybe_regexp,
+ match_indices, string);
Goto(&out);
}
BIND(&if_didnotmatch);
{
- var_result.Bind(NullConstant());
+ var_result = NullConstant();
Goto(&out);
}
@@ -868,7 +844,7 @@ Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(Node* const context,
// The smi check is required to omit ToLength(lastIndex) calls with possible
// user-code execution on the fast path.
- Node* const last_index = FastLoadLastIndex(object);
+ Node* const last_index = FastLoadLastIndex(CAST(object));
var_result.Bind(TaggedIsPositiveSmi(last_index));
Goto(&out);
@@ -962,7 +938,7 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp(Node* const context,
// The smi check is required to omit ToLength(lastIndex) calls with possible
// user-code execution on the fast path.
- Node* const last_index = FastLoadLastIndex(object);
+ Node* const last_index = FastLoadLastIndex(CAST(object));
Branch(TaggedIsPositiveSmi(last_index), if_isunmodified, if_ismodified);
}
@@ -1011,9 +987,9 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExpResult(Node* const context,
// Slow path stub for RegExpPrototypeExec to decrease code size.
TF_BUILTIN(RegExpPrototypeExecSlow, RegExpBuiltinsAssembler) {
- Node* const regexp = Parameter(Descriptor::kReceiver);
- TNode<String> const string = CAST(Parameter(Descriptor::kString));
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kReceiver));
+ TNode<String> string = CAST(Parameter(Descriptor::kString));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Return(RegExpPrototypeExecBody(context, regexp, string, false));
}
@@ -1022,19 +998,15 @@ TF_BUILTIN(RegExpPrototypeExecSlow, RegExpBuiltinsAssembler) {
// and {match_info} is updated on success.
// The slow path is implemented in RegExpImpl::AtomExec.
TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
- Node* const regexp = Parameter(Descriptor::kRegExp);
- Node* const subject_string = Parameter(Descriptor::kString);
- Node* const last_index = Parameter(Descriptor::kLastIndex);
- Node* const match_info = Parameter(Descriptor::kMatchInfo);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kRegExp));
+ TNode<String> subject_string = CAST(Parameter(Descriptor::kString));
+ TNode<Smi> last_index = CAST(Parameter(Descriptor::kLastIndex));
+ TNode<FixedArray> match_info = CAST(Parameter(Descriptor::kMatchInfo));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- CSA_ASSERT(this, IsJSRegExp(regexp));
- CSA_ASSERT(this, IsString(subject_string));
CSA_ASSERT(this, TaggedIsPositiveSmi(last_index));
- CSA_ASSERT(this, IsFixedArray(match_info));
- Node* const data = LoadObjectField(regexp, JSRegExp::kDataOffset);
- CSA_ASSERT(this, IsFixedArray(data));
+ TNode<FixedArray> data = CAST(LoadObjectField(regexp, JSRegExp::kDataOffset));
CSA_ASSERT(this,
SmiEqual(CAST(LoadFixedArrayElement(data, JSRegExp::kTagIndex)),
SmiConstant(JSRegExp::ATOM)));
@@ -1089,7 +1061,7 @@ TF_BUILTIN(RegExpExecInternal, RegExpBuiltinsAssembler) {
TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kRegExp));
TNode<String> string = CAST(Parameter(Descriptor::kString));
TNode<Number> last_index = CAST(Parameter(Descriptor::kLastIndex));
- TNode<FixedArray> match_info = CAST(Parameter(Descriptor::kMatchInfo));
+ TNode<RegExpMatchInfo> match_info = CAST(Parameter(Descriptor::kMatchInfo));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CSA_ASSERT(this, IsNumberNormalized(last_index));
@@ -1101,35 +1073,28 @@ TF_BUILTIN(RegExpExecInternal, RegExpBuiltinsAssembler) {
// ES#sec-regexp.prototype.exec
// RegExp.prototype.exec ( string )
TF_BUILTIN(RegExpPrototypeExec, RegExpBuiltinsAssembler) {
- Node* const maybe_receiver = Parameter(Descriptor::kReceiver);
- Node* const maybe_string = Parameter(Descriptor::kString);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> maybe_string = CAST(Parameter(Descriptor::kString));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// Ensure {maybe_receiver} is a JSRegExp.
ThrowIfNotInstanceType(context, maybe_receiver, JS_REGEXP_TYPE,
"RegExp.prototype.exec");
- Node* const receiver = maybe_receiver;
+ TNode<JSRegExp> receiver = CAST(maybe_receiver);
// Convert {maybe_string} to a String.
- TNode<String> const string = ToString_Inline(context, maybe_string);
+ TNode<String> string = ToString_Inline(context, maybe_string);
Label if_isfastpath(this), if_isslowpath(this);
Branch(IsFastRegExpNoPrototype(context, receiver), &if_isfastpath,
&if_isslowpath);
BIND(&if_isfastpath);
- {
- Node* const result =
- RegExpPrototypeExecBody(context, receiver, string, true);
- Return(result);
- }
+ Return(RegExpPrototypeExecBody(context, receiver, string, true));
BIND(&if_isslowpath);
- {
- Node* const result = CallBuiltin(Builtins::kRegExpPrototypeExecSlow,
- context, receiver, string);
- Return(result);
- }
+ Return(CallBuiltin(Builtins::kRegExpPrototypeExecSlow, context, receiver,
+ string));
}
Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
@@ -1138,7 +1103,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
Isolate* isolate = this->isolate();
TNode<IntPtrT> const int_one = IntPtrConstant(1);
- TVARIABLE(Smi, var_length, SmiConstant(0));
+ TVARIABLE(Smi, var_length, SmiZero());
TVARIABLE(IntPtrT, var_flags);
// First, count the number of characters we will need and check which flags
@@ -1170,7 +1135,7 @@ Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
DCHECK(!is_fastpath);
// Fall back to GetProperty stub on the slow-path.
- var_flags = IntPtrConstant(0);
+ var_flags = IntPtrZero();
#define CASE_FOR_FLAG(NAME, FLAG) \
do { \
@@ -1289,13 +1254,13 @@ Node* RegExpBuiltinsAssembler::RegExpInitialize(Node* const context,
// ES #sec-get-regexp.prototype.flags
TF_BUILTIN(RegExpPrototypeFlagsGetter, RegExpBuiltinsAssembler) {
- Node* const maybe_receiver = Parameter(Descriptor::kReceiver);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- Node* const map = ThrowIfNotJSReceiver(context, maybe_receiver,
- MessageTemplate::kRegExpNonObject,
- "RegExp.prototype.flags");
- Node* const receiver = maybe_receiver;
+ TNode<Map> map = CAST(ThrowIfNotJSReceiver(context, maybe_receiver,
+ MessageTemplate::kRegExpNonObject,
+ "RegExp.prototype.flags"));
+ TNode<JSReceiver> receiver = CAST(maybe_receiver);
Label if_isfastpath(this), if_isslowpath(this, Label::kDeferred);
BranchIfFastRegExp(context, receiver, map, &if_isfastpath, &if_isslowpath);
@@ -1310,10 +1275,10 @@ TF_BUILTIN(RegExpPrototypeFlagsGetter, RegExpBuiltinsAssembler) {
// ES#sec-regexp-pattern-flags
// RegExp ( pattern, flags )
TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
- Node* const pattern = Parameter(Descriptor::kPattern);
- Node* const flags = Parameter(Descriptor::kFlags);
- Node* const new_target = Parameter(Descriptor::kJSNewTarget);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Object> pattern = CAST(Parameter(Descriptor::kPattern));
+ TNode<Object> flags = CAST(Parameter(Descriptor::kFlags));
+ TNode<Object> new_target = CAST(Parameter(Descriptor::kJSNewTarget));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Isolate* isolate = this->isolate();
@@ -1350,13 +1315,14 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
if_patternisslowregexp(this);
GotoIf(TaggedIsSmi(pattern), &next);
- GotoIf(IsJSRegExp(pattern), &if_patternisfastregexp);
+ GotoIf(IsJSRegExp(CAST(pattern)), &if_patternisfastregexp);
Branch(pattern_is_regexp, &if_patternisslowregexp, &next);
BIND(&if_patternisfastregexp);
{
- Node* const source = LoadObjectField(pattern, JSRegExp::kSourceOffset);
+ Node* const source =
+ LoadObjectField(CAST(pattern), JSRegExp::kSourceOffset);
var_pattern.Bind(source);
{
@@ -1437,10 +1403,10 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
// ES#sec-regexp.prototype.compile
// RegExp.prototype.compile ( pattern, flags )
TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
- Node* const maybe_receiver = Parameter(Descriptor::kReceiver);
- Node* const maybe_pattern = Parameter(Descriptor::kPattern);
- Node* const maybe_flags = Parameter(Descriptor::kFlags);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> maybe_pattern = CAST(Parameter(Descriptor::kPattern));
+ TNode<Object> maybe_flags = CAST(Parameter(Descriptor::kFlags));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ThrowIfNotInstanceType(context, maybe_receiver, JS_REGEXP_TYPE,
"RegExp.prototype.compile");
@@ -1454,7 +1420,7 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
Label next(this);
GotoIf(TaggedIsSmi(maybe_pattern), &next);
- GotoIfNot(IsJSRegExp(maybe_pattern), &next);
+ GotoIfNot(IsJSRegExp(CAST(maybe_pattern)), &next);
Node* const pattern = maybe_pattern;
@@ -1486,20 +1452,17 @@ TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
// ES6 21.2.5.10.
// ES #sec-get-regexp.prototype.source
TF_BUILTIN(RegExpPrototypeSourceGetter, RegExpBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// Check whether we have an unmodified regexp instance.
Label if_isjsregexp(this), if_isnotjsregexp(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver), &if_isnotjsregexp);
- Branch(IsJSRegExp(receiver), &if_isjsregexp, &if_isnotjsregexp);
+ Branch(IsJSRegExp(CAST(receiver)), &if_isjsregexp, &if_isnotjsregexp);
BIND(&if_isjsregexp);
- {
- Node* const source = LoadObjectField(receiver, JSRegExp::kSourceOffset);
- Return(source);
- }
+ Return(LoadObjectField(CAST(receiver), JSRegExp::kSourceOffset));
BIND(&if_isnotjsregexp);
{
@@ -1653,8 +1616,8 @@ void RegExpBuiltinsAssembler::FlagGetter(Node* context, Node* receiver,
// ES6 21.2.5.4.
// ES #sec-get-regexp.prototype.global
TF_BUILTIN(RegExpPrototypeGlobalGetter, RegExpBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
FlagGetter(context, receiver, JSRegExp::kGlobal,
v8::Isolate::kRegExpPrototypeOldFlagGetter,
"RegExp.prototype.global");
@@ -1663,8 +1626,8 @@ TF_BUILTIN(RegExpPrototypeGlobalGetter, RegExpBuiltinsAssembler) {
// ES6 21.2.5.5.
// ES #sec-get-regexp.prototype.ignorecase
TF_BUILTIN(RegExpPrototypeIgnoreCaseGetter, RegExpBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
FlagGetter(context, receiver, JSRegExp::kIgnoreCase,
v8::Isolate::kRegExpPrototypeOldFlagGetter,
"RegExp.prototype.ignoreCase");
@@ -1673,8 +1636,8 @@ TF_BUILTIN(RegExpPrototypeIgnoreCaseGetter, RegExpBuiltinsAssembler) {
// ES6 21.2.5.7.
// ES #sec-get-regexp.prototype.multiline
TF_BUILTIN(RegExpPrototypeMultilineGetter, RegExpBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
FlagGetter(context, receiver, JSRegExp::kMultiline,
v8::Isolate::kRegExpPrototypeOldFlagGetter,
"RegExp.prototype.multiline");
@@ -1682,8 +1645,8 @@ TF_BUILTIN(RegExpPrototypeMultilineGetter, RegExpBuiltinsAssembler) {
// ES #sec-get-regexp.prototype.dotAll
TF_BUILTIN(RegExpPrototypeDotAllGetter, RegExpBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
static const int kNoCounter = -1;
FlagGetter(context, receiver, JSRegExp::kDotAll, kNoCounter,
"RegExp.prototype.dotAll");
@@ -1692,8 +1655,8 @@ TF_BUILTIN(RegExpPrototypeDotAllGetter, RegExpBuiltinsAssembler) {
// ES6 21.2.5.12.
// ES #sec-get-regexp.prototype.sticky
TF_BUILTIN(RegExpPrototypeStickyGetter, RegExpBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
FlagGetter(context, receiver, JSRegExp::kSticky,
v8::Isolate::kRegExpPrototypeStickyGetter,
"RegExp.prototype.sticky");
@@ -1702,8 +1665,8 @@ TF_BUILTIN(RegExpPrototypeStickyGetter, RegExpBuiltinsAssembler) {
// ES6 21.2.5.15.
// ES #sec-get-regexp.prototype.unicode
TF_BUILTIN(RegExpPrototypeUnicodeGetter, RegExpBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
FlagGetter(context, receiver, JSRegExp::kUnicode,
v8::Isolate::kRegExpPrototypeUnicodeGetter,
"RegExp.prototype.unicode");
@@ -1762,18 +1725,18 @@ Node* RegExpBuiltinsAssembler::RegExpExec(Node* context, Node* regexp,
// ES#sec-regexp.prototype.test
// RegExp.prototype.test ( S )
TF_BUILTIN(RegExpPrototypeTest, RegExpBuiltinsAssembler) {
- Node* const maybe_receiver = Parameter(Descriptor::kReceiver);
- Node* const maybe_string = Parameter(Descriptor::kString);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> maybe_string = CAST(Parameter(Descriptor::kString));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// Ensure {maybe_receiver} is a JSReceiver.
ThrowIfNotJSReceiver(context, maybe_receiver,
MessageTemplate::kIncompatibleMethodReceiver,
"RegExp.prototype.test");
- Node* const receiver = maybe_receiver;
+ TNode<JSReceiver> receiver = CAST(maybe_receiver);
// Convert {maybe_string} to a String.
- TNode<String> const string = ToString_Inline(context, maybe_string);
+ TNode<String> string = ToString_Inline(context, maybe_string);
Label fast_path(this), slow_path(this);
BranchIfFastRegExp(context, receiver, &fast_path, &slow_path);
@@ -1792,18 +1755,19 @@ TF_BUILTIN(RegExpPrototypeTest, RegExpBuiltinsAssembler) {
BIND(&slow_path);
{
// Call exec.
- Node* const match_indices = RegExpExec(context, receiver, string);
+ TNode<HeapObject> match_indices =
+ CAST(RegExpExec(context, receiver, string));
// Return true iff exec matched successfully.
- Node* const result = SelectBooleanConstant(IsNotNull(match_indices));
- Return(result);
+ Return(SelectBooleanConstant(IsNotNull(match_indices)));
}
}
TF_BUILTIN(RegExpPrototypeTestFast, RegExpBuiltinsAssembler) {
- TNode<JSRegExp> const regexp = CAST(Parameter(Descriptor::kReceiver));
- TNode<String> const string = CAST(Parameter(Descriptor::kString));
- TNode<Context> const context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kReceiver));
+ TNode<String> string = CAST(Parameter(Descriptor::kString));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
Label if_didnotmatch(this);
CSA_ASSERT(this, IsFastRegExpWithOriginalExec(context, regexp));
RegExpPrototypeExecBodyWithoutResult(context, regexp, string, &if_didnotmatch,
@@ -1881,8 +1845,6 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
const bool is_fastpath) {
if (is_fastpath) CSA_ASSERT(this, IsFastRegExp(context, regexp));
- Node* const int_zero = IntPtrConstant(0);
- Node* const smi_zero = SmiConstant(0);
Node* const is_global =
FlagGetter(context, regexp, JSRegExp::kGlobal, is_fastpath);
@@ -1892,8 +1854,9 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
BIND(&if_isnotglobal);
{
Node* const result =
- is_fastpath ? RegExpPrototypeExecBody(context, regexp, string, true)
- : RegExpExec(context, regexp, string);
+ is_fastpath
+ ? RegExpPrototypeExecBody(CAST(context), CAST(regexp), string, true)
+ : RegExpExec(context, regexp, string);
Return(result);
}
@@ -1902,7 +1865,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
Node* const is_unicode =
FlagGetter(context, regexp, JSRegExp::kUnicode, is_fastpath);
- StoreLastIndex(context, regexp, smi_zero, is_fastpath);
+ StoreLastIndex(context, regexp, SmiZero(), is_fastpath);
// Allocate an array to store the resulting match strings.
@@ -1924,8 +1887,9 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
if (is_fastpath) {
// On the fast path, grab the matching string from the raw match index
// array.
- Node* const match_indices = RegExpPrototypeExecBodyWithoutResult(
- context, regexp, string, &if_didnotmatch, true);
+ TNode<RegExpMatchInfo> match_indices =
+ RegExpPrototypeExecBodyWithoutResult(CAST(context), CAST(regexp),
+ string, &if_didnotmatch, true);
Node* const match_from = LoadFixedArrayElement(
match_indices, RegExpMatchInfo::kFirstCaptureIndex);
@@ -1944,14 +1908,14 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
BIND(&load_match);
var_match.Bind(
- ToString_Inline(context, GetProperty(context, result, smi_zero)));
+ ToString_Inline(context, GetProperty(context, result, SmiZero())));
Goto(&if_didmatch);
}
BIND(&if_didnotmatch);
{
// Return null if there were no matches, otherwise just exit the loop.
- GotoIfNot(IntPtrEqual(array.length(), int_zero), &out);
+ GotoIfNot(IntPtrEqual(array.length(), IntPtrZero()), &out);
Return(NullConstant());
}
@@ -1966,9 +1930,10 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
// Advance last index if the match is the empty string.
TNode<Smi> const match_length = LoadStringLengthAsSmi(match);
- GotoIfNot(SmiEqual(match_length, SmiConstant(0)), &loop);
+ GotoIfNot(SmiEqual(match_length, SmiZero()), &loop);
- Node* last_index = LoadLastIndex(context, regexp, is_fastpath);
+ Node* last_index =
+ LoadLastIndex(CAST(context), CAST(regexp), is_fastpath);
if (is_fastpath) {
CSA_ASSERT(this, TaggedIsPositiveSmi(last_index));
} else {
@@ -2005,9 +1970,9 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
// ES#sec-regexp.prototype-@@match
// RegExp.prototype [ @@match ] ( string )
TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
- Node* const maybe_receiver = Parameter(Descriptor::kReceiver);
- Node* const maybe_string = Parameter(Descriptor::kString);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> maybe_string = CAST(Parameter(Descriptor::kString));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// Ensure {maybe_receiver} is a JSReceiver.
ThrowIfNotJSReceiver(context, maybe_receiver,
@@ -2124,9 +2089,8 @@ TNode<Object> RegExpBuiltinsAssembler::MatchAllIterator(
#ifdef DEBUG
// Assert: ! Get(matcher, "lastIndex") is 0.
- TNode<Object> last_index =
- CAST(LoadLastIndex(context, var_matcher.value(), false));
- CSA_ASSERT(this, WordEqual(SmiConstant(0), last_index));
+ TNode<Object> last_index = SlowLoadLastIndex(context, var_matcher.value());
+ CSA_ASSERT(this, WordEqual(SmiZero(), last_index));
#endif // DEBUG
Goto(&create_iterator);
@@ -2209,9 +2173,9 @@ TF_BUILTIN(RegExpPrototypeMatchAll, RegExpBuiltinsAssembler) {
// 1) receiver is a "fast" RegExp
// 2) pattern is a string
TF_BUILTIN(RegExpMatchFast, RegExpBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- TNode<String> const string = CAST(Parameter(Descriptor::kPattern));
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<String> string = CAST(Parameter(Descriptor::kPattern));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
RegExpPrototypeMatchBody(context, receiver, string, true);
}
@@ -2222,15 +2186,15 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast(
CSA_ASSERT(this, IsString(string));
// Grab the initial value of last index.
- Node* const previous_last_index = FastLoadLastIndex(regexp);
+ Node* const previous_last_index = FastLoadLastIndex(CAST(regexp));
// Ensure last index is 0.
- FastStoreLastIndex(regexp, SmiConstant(0));
+ FastStoreLastIndex(regexp, SmiZero());
// Call exec.
Label if_didnotmatch(this);
- Node* const match_indices = RegExpPrototypeExecBodyWithoutResult(
- context, regexp, string, &if_didnotmatch, true);
+ TNode<RegExpMatchInfo> match_indices = RegExpPrototypeExecBodyWithoutResult(
+ CAST(context), CAST(regexp), CAST(string), &if_didnotmatch, true);
// Successful match.
{
@@ -2258,10 +2222,11 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow(
Isolate* const isolate = this->isolate();
- Node* const smi_zero = SmiConstant(0);
+ Node* const smi_zero = SmiZero();
// Grab the initial value of last index.
- Node* const previous_last_index = SlowLoadLastIndex(context, regexp);
+ Node* const previous_last_index =
+ SlowLoadLastIndex(CAST(context), CAST(regexp));
// Ensure last index is 0.
{
@@ -2280,7 +2245,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow(
// Reset last index if necessary.
{
Label next(this), slow(this, Label::kDeferred);
- Node* const current_last_index = SlowLoadLastIndex(context, regexp);
+ Node* const current_last_index =
+ SlowLoadLastIndex(CAST(context), CAST(regexp));
BranchIfSameValue(current_last_index, previous_last_index, &next, &slow);
@@ -2321,9 +2287,9 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow(
// ES#sec-regexp.prototype-@@search
// RegExp.prototype [ @@search ] ( string )
TF_BUILTIN(RegExpPrototypeSearch, RegExpBuiltinsAssembler) {
- Node* const maybe_receiver = Parameter(Descriptor::kReceiver);
- Node* const maybe_string = Parameter(Descriptor::kString);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> maybe_string = CAST(Parameter(Descriptor::kString));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// Ensure {maybe_receiver} is a JSReceiver.
ThrowIfNotJSReceiver(context, maybe_receiver,
@@ -2350,9 +2316,9 @@ TF_BUILTIN(RegExpPrototypeSearch, RegExpBuiltinsAssembler) {
// 1) receiver is a "fast" RegExp
// 2) pattern is a string
TF_BUILTIN(RegExpSearchFast, RegExpBuiltinsAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const string = Parameter(Descriptor::kPattern);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<JSRegExp> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<String> string = CAST(Parameter(Descriptor::kPattern));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
RegExpPrototypeSearchBodyFast(context, receiver, string);
}
@@ -2366,8 +2332,6 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
CSA_ASSERT(this, IsFastRegExp(context, regexp));
CSA_ASSERT(this, Word32BinaryNot(FastFlagGetter(regexp, JSRegExp::kSticky)));
- TNode<Smi> const smi_zero = SmiConstant(0);
- TNode<IntPtrT> const int_zero = IntPtrConstant(0);
TNode<IntPtrT> const int_limit = SmiUntag(limit);
const ElementsKind kind = PACKED_ELEMENTS;
@@ -2382,7 +2346,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
// If limit is zero, return an empty array.
{
Label next(this), if_limitiszero(this, Label::kDeferred);
- Branch(SmiEqual(limit, smi_zero), &return_empty_array, &next);
+ Branch(SmiEqual(limit, SmiZero()), &return_empty_array, &next);
BIND(&next);
}
@@ -2392,7 +2356,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
// array depending on whether the {regexp} matches.
{
Label next(this), if_stringisempty(this, Label::kDeferred);
- Branch(SmiEqual(string_length, smi_zero), &if_stringisempty, &next);
+ Branch(SmiEqual(string_length, SmiZero()), &if_stringisempty, &next);
BIND(&if_stringisempty);
{
@@ -2401,7 +2365,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
Node* const match_indices =
CallBuiltin(Builtins::kRegExpExecInternal, context, regexp, string,
- smi_zero, last_match_info);
+ SmiZero(), last_match_info);
Label return_singleton_array(this);
Branch(IsNull(match_indices), &return_singleton_array,
@@ -2414,7 +2378,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
Node* const result = AllocateJSArray(kind, array_map, capacity, length,
allocation_site, mode);
- Node* const fixed_array = LoadElements(result);
+ TNode<FixedArray> const fixed_array = CAST(LoadElements(result));
StoreFixedArrayElement(fixed_array, 0, string);
Return(result);
@@ -2428,8 +2392,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
GrowableFixedArray array(state());
- TVARIABLE(Smi, var_last_matched_until, smi_zero);
- TVARIABLE(Smi, var_next_search_from, smi_zero);
+ TVARIABLE(Smi, var_last_matched_until, SmiZero());
+ TVARIABLE(Smi, var_next_search_from, SmiZero());
Variable* vars[] = {array.var_array(), array.var_length(),
array.var_capacity(), &var_last_matched_until,
@@ -2456,17 +2420,18 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
Node* const last_match_info = LoadContextElement(
native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
- Node* const match_indices =
- CallBuiltin(Builtins::kRegExpExecInternal, context, regexp, string,
- next_search_from, last_match_info);
+ TNode<HeapObject> const match_indices_ho =
+ CAST(CallBuiltin(Builtins::kRegExpExecInternal, context, regexp, string,
+ next_search_from, last_match_info));
// We're done if no match was found.
{
Label next(this);
- Branch(IsNull(match_indices), &push_suffix_and_out, &next);
+ Branch(IsNull(match_indices_ho), &push_suffix_and_out, &next);
BIND(&next);
}
+ TNode<FixedArray> match_indices = CAST(match_indices_ho);
TNode<Smi> const match_from = CAST(LoadFixedArrayElement(
match_indices, RegExpMatchInfo::kFirstCaptureIndex));
@@ -2585,8 +2550,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
BIND(&return_empty_array);
{
- Node* const length = smi_zero;
- Node* const capacity = int_zero;
+ Node* const length = SmiZero();
+ Node* const capacity = IntPtrZero();
Node* const result = AllocateJSArray(kind, array_map, capacity, length,
allocation_site, mode);
Return(result);
@@ -2595,10 +2560,10 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
// Helper that skips a few initial checks.
TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) {
- Node* const regexp = Parameter(Descriptor::kRegExp);
- TNode<String> const string = CAST(Parameter(Descriptor::kString));
- Node* const maybe_limit = Parameter(Descriptor::kLimit);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kRegExp));
+ TNode<String> string = CAST(Parameter(Descriptor::kString));
+ TNode<Object> maybe_limit = CAST(Parameter(Descriptor::kLimit));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CSA_ASSERT(this, IsFastRegExp(context, regexp));
@@ -2654,14 +2619,14 @@ TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
const int kStringArg = 0;
const int kLimitArg = 1;
- Node* argc =
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- Node* const maybe_receiver = args.GetReceiver();
- Node* const maybe_string = args.GetOptionalArgumentValue(kStringArg);
- Node* const maybe_limit = args.GetOptionalArgumentValue(kLimitArg);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Object> maybe_receiver = args.GetReceiver();
+ TNode<Object> maybe_string = args.GetOptionalArgumentValue(kStringArg);
+ TNode<Object> maybe_limit = args.GetOptionalArgumentValue(kLimitArg);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// Ensure {maybe_receiver} is a JSReceiver.
ThrowIfNotJSReceiver(context, maybe_receiver,
@@ -2696,9 +2661,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
Isolate* const isolate = this->isolate();
Node* const undefined = UndefinedConstant();
- TNode<IntPtrT> const int_zero = IntPtrConstant(0);
- TNode<IntPtrT> const int_one = IntPtrConstant(1);
- TNode<Smi> const smi_zero = SmiConstant(0);
+ TNode<IntPtrT> int_one = IntPtrConstant(1);
Node* const native_context = LoadNativeContext(context);
@@ -2706,15 +2669,15 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
VARIABLE(var_result, MachineRepresentation::kTagged);
// Set last index to 0.
- FastStoreLastIndex(regexp, smi_zero);
+ FastStoreLastIndex(regexp, SmiZero());
// Allocate {result_array}.
Node* result_array;
{
ElementsKind kind = PACKED_ELEMENTS;
Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
- TNode<IntPtrT> const capacity = IntPtrConstant(16);
- TNode<Smi> const length = smi_zero;
+ TNode<IntPtrT> capacity = IntPtrConstant(16);
+ TNode<Smi> length = SmiZero();
Node* const allocation_site = nullptr;
ParameterMode capacity_mode = CodeStubAssembler::INTPTR_PARAMETERS;
@@ -2723,25 +2686,24 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
}
// Call into runtime for RegExpExecMultiple.
- Node* last_match_info =
- LoadContextElement(native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+ TNode<FixedArray> last_match_info = CAST(LoadContextElement(
+ native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX));
Node* const res = CallRuntime(Runtime::kRegExpExecMultiple, context, regexp,
string, last_match_info, result_array);
// Reset last index to 0.
- FastStoreLastIndex(regexp, smi_zero);
+ FastStoreLastIndex(regexp, SmiZero());
// If no matches, return the subject string.
var_result.Bind(string);
GotoIf(IsNull(res), &out);
// Reload last match info since it might have changed.
- last_match_info =
- LoadContextElement(native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+ last_match_info = CAST(LoadContextElement(
+ native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX));
Node* const res_length = LoadJSArrayLength(res);
- Node* const res_elems = LoadElements(res);
- CSA_ASSERT(this, HasInstanceType(res_elems, FIXED_ARRAY_TYPE));
+ TNode<FixedArray> const res_elems = CAST(LoadElements(res));
TNode<Smi> const num_capture_registers = CAST(LoadFixedArrayElement(
last_match_info, RegExpMatchInfo::kNumberOfCapturesIndex));
@@ -2760,10 +2722,10 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
// input string and some replacements that were returned from the replace
// function.
- TVARIABLE(Smi, var_match_start, smi_zero);
+ TVARIABLE(Smi, var_match_start, SmiZero());
TNode<IntPtrT> const end = SmiUntag(res_length);
- TVARIABLE(IntPtrT, var_i, int_zero);
+ TVARIABLE(IntPtrT, var_i, IntPtrZero());
Variable* vars[] = {&var_i, &var_match_start};
Label loop(this, 2, vars);
@@ -2782,7 +2744,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
TNode<Smi> smi_elem = CAST(elem);
// Integers represent slices of the original string.
Label if_isnegativeorzero(this), if_ispositive(this);
- BranchIfSmiLessThanOrEqual(smi_elem, smi_zero, &if_isnegativeorzero,
+ BranchIfSmiLessThanOrEqual(smi_elem, SmiZero(), &if_isnegativeorzero,
&if_ispositive);
BIND(&if_ispositive);
@@ -2837,7 +2799,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
BIND(&if_hasexplicitcaptures);
{
- Node* const from = int_zero;
+ Node* const from = IntPtrZero();
Node* const to = SmiUntag(res_length);
const int increment = 1;
@@ -2903,12 +2865,10 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
CSA_ASSERT(this, IsFastRegExp(context, regexp));
- TNode<Smi> const smi_zero = SmiConstant(0);
const bool kIsFastPath = true;
TVARIABLE(String, var_result, EmptyStringConstant());
- VARIABLE(var_match_indices, MachineRepresentation::kTagged);
- VARIABLE(var_last_match_end, MachineRepresentation::kTagged, smi_zero);
+ VARIABLE(var_last_match_end, MachineRepresentation::kTagged, SmiZero());
VARIABLE(var_is_unicode, MachineRepresentation::kWord32, Int32Constant(0));
Variable* vars[] = {&var_result, &var_last_match_end};
Label out(this), loop(this, 2, vars), loop_end(this),
@@ -2919,20 +2879,22 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
GotoIfNot(is_global, &loop);
var_is_unicode.Bind(FastFlagGetter(regexp, JSRegExp::kUnicode));
- FastStoreLastIndex(regexp, smi_zero);
+ FastStoreLastIndex(regexp, SmiZero());
Goto(&loop);
BIND(&loop);
{
- var_match_indices.Bind(RegExpPrototypeExecBodyWithoutResult(
- context, regexp, string, &if_nofurthermatches, kIsFastPath));
+ TNode<RegExpMatchInfo> var_match_indices =
+ RegExpPrototypeExecBodyWithoutResult(CAST(context), CAST(regexp),
+ string, &if_nofurthermatches,
+ kIsFastPath);
// Successful match.
{
TNode<Smi> const match_start = CAST(LoadFixedArrayElement(
- var_match_indices.value(), RegExpMatchInfo::kFirstCaptureIndex));
+ var_match_indices, RegExpMatchInfo::kFirstCaptureIndex));
TNode<Smi> const match_end = CAST(LoadFixedArrayElement(
- var_match_indices.value(), RegExpMatchInfo::kFirstCaptureIndex + 1));
+ var_match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1));
TNode<Smi> const replace_length = LoadStringLengthAsSmi(replace_string);
@@ -2944,7 +2906,7 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
var_result = CAST(CallBuiltin(Builtins::kStringAdd_CheckNone_NotTenured,
context, var_result.value(), first_part));
- GotoIf(SmiEqual(replace_length, smi_zero), &loop_end);
+ GotoIf(SmiEqual(replace_length, SmiZero()), &loop_end);
var_result =
CAST(CallBuiltin(Builtins::kStringAdd_CheckNone_NotTenured, context,
@@ -2959,7 +2921,7 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
GotoIf(SmiNotEqual(match_end, match_start), &loop);
// If match is the empty string, we have to increment lastIndex.
- Node* const this_index = FastLoadLastIndex(regexp);
+ Node* const this_index = FastLoadLastIndex(CAST(regexp));
Node* const next_index = AdvanceStringIndex(
string, this_index, var_is_unicode.value(), kIsFastPath);
FastStoreLastIndex(regexp, next_index);
@@ -2985,10 +2947,10 @@ Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
// Helper that skips a few initial checks.
TF_BUILTIN(RegExpReplace, RegExpBuiltinsAssembler) {
- Node* const regexp = Parameter(Descriptor::kRegExp);
- TNode<String> const string = CAST(Parameter(Descriptor::kString));
- Node* const replace_value = Parameter(Descriptor::kReplaceValue);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kRegExp));
+ TNode<String> string = CAST(Parameter(Descriptor::kString));
+ TNode<Object> replace_value = CAST(Parameter(Descriptor::kReplaceValue));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CSA_ASSERT(this, IsFastRegExp(context, regexp));
@@ -2997,7 +2959,7 @@ TF_BUILTIN(RegExpReplace, RegExpBuiltinsAssembler) {
// 2. Is {replace_value} callable?
GotoIf(TaggedIsSmi(replace_value), &checkreplacestring);
- Branch(IsCallableMap(LoadMap(replace_value)), &if_iscallable,
+ Branch(IsCallableMap(LoadMap(CAST(replace_value))), &if_iscallable,
&checkreplacestring);
// 3. Does ToString({replace_value}) contain '$'?
@@ -3019,7 +2981,7 @@ TF_BUILTIN(RegExpReplace, RegExpBuiltinsAssembler) {
isolate()->factory()->LookupSingleCharacterStringFromCode('$'));
TNode<Smi> const dollar_ix =
CAST(CallBuiltin(Builtins::kStringIndexOf, context, replace_string,
- dollar_string, SmiConstant(0)));
+ dollar_string, SmiZero()));
GotoIfNot(SmiEqual(dollar_ix, SmiConstant(-1)), &runtime);
Return(
@@ -3056,14 +3018,14 @@ TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
const int kStringArg = 0;
const int kReplaceValueArg = 1;
- Node* argc =
+ TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- Node* const maybe_receiver = args.GetReceiver();
- Node* const maybe_string = args.GetOptionalArgumentValue(kStringArg);
- Node* const replace_value = args.GetOptionalArgumentValue(kReplaceValueArg);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Object> maybe_receiver = args.GetReceiver();
+ TNode<Object> maybe_string = args.GetOptionalArgumentValue(kStringArg);
+ TNode<Object> replace_value = args.GetOptionalArgumentValue(kReplaceValueArg);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// RegExpPrototypeReplace is a bit of a beast - a summary of dispatch logic:
//
@@ -3108,28 +3070,25 @@ TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
// Simple string matching functionality for internal use which does not modify
// the last match info.
TF_BUILTIN(RegExpInternalMatch, RegExpBuiltinsAssembler) {
- TNode<JSRegExp> const regexp = CAST(Parameter(Descriptor::kRegExp));
- TNode<String> const string = CAST(Parameter(Descriptor::kString));
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kRegExp));
+ TNode<String> string = CAST(Parameter(Descriptor::kString));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- Node* const smi_zero = SmiConstant(0);
- Node* const native_context = LoadNativeContext(context);
- Node* const internal_match_info = LoadContextElement(
- native_context, Context::REGEXP_INTERNAL_MATCH_INFO_INDEX);
- Node* const match_indices =
- CallBuiltin(Builtins::kRegExpExecInternal, context, regexp, string,
- smi_zero, internal_match_info);
- Node* const null = NullConstant();
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<RegExpMatchInfo> internal_match_info = CAST(LoadContextElement(
+ native_context, Context::REGEXP_INTERNAL_MATCH_INFO_INDEX));
+ TNode<HeapObject> maybe_match_indices =
+ CAST(CallBuiltin(Builtins::kRegExpExecInternal, context, regexp, string,
+ SmiZero(), internal_match_info));
+ TNode<Oddball> null = NullConstant();
Label if_matched(this);
- GotoIfNot(WordEqual(match_indices, null), &if_matched);
+ GotoIfNot(WordEqual(maybe_match_indices, null), &if_matched);
Return(null);
BIND(&if_matched);
- {
- Node* result =
- ConstructNewResultFromMatchInfo(context, regexp, match_indices, string);
- Return(result);
- }
+ TNode<RegExpMatchInfo> match_indices = CAST(maybe_match_indices);
+ Return(
+ ConstructNewResultFromMatchInfo(context, regexp, match_indices, string));
}
class RegExpStringIteratorAssembler : public RegExpBuiltinsAssembler {
@@ -3168,24 +3127,20 @@ class RegExpStringIteratorAssembler : public RegExpBuiltinsAssembler {
// https://tc39.github.io/proposal-string-matchall/
// %RegExpStringIteratorPrototype%.next ( )
TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) {
+ const char* method_name = "%RegExpStringIterator%.prototype.next";
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
Label if_match(this), if_no_match(this, Label::kDeferred),
- return_empty_done_result(this, Label::kDeferred),
- throw_bad_receiver(this, Label::kDeferred);
+ return_empty_done_result(this, Label::kDeferred);
// 1. Let O be the this value.
// 2. If Type(O) is not Object, throw a TypeError exception.
- GotoIf(TaggedIsSmi(maybe_receiver), &throw_bad_receiver);
- TNode<HeapObject> receiver = CAST(maybe_receiver);
- GotoIfNot(IsJSReceiver(receiver), &throw_bad_receiver);
-
// 3. If O does not have all of the internal slots of a RegExp String Iterator
// Object Instance (see 5.3), throw a TypeError exception.
- GotoIfNot(InstanceTypeEqual(LoadInstanceType(receiver),
- JS_REGEXP_STRING_ITERATOR_TYPE),
- &throw_bad_receiver);
+ ThrowIfNotInstanceType(context, maybe_receiver,
+ JS_REGEXP_STRING_ITERATOR_TYPE, method_name);
+ TNode<HeapObject> receiver = CAST(maybe_receiver);
// 4. If O.[[Done]] is true, then
// a. Return ! CreateIterResultObject(undefined, true).
@@ -3196,6 +3151,10 @@ TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) {
TNode<Object> iterating_regexp =
LoadObjectField(receiver, JSRegExpStringIterator::kIteratingRegExpOffset);
+ // TODO(jgruber): Verify that this is guaranteed.
+ CSA_CHECK(this, TaggedIsNotSmi(iterating_regexp));
+ CSA_CHECK(this, IsJSReceiver(CAST(iterating_regexp)));
+
// 6. Let S be O.[[IteratedString]].
TNode<String> iterating_string = CAST(
LoadObjectField(receiver, JSRegExpStringIterator::kIteratedStringOffset));
@@ -3212,15 +3171,19 @@ TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) {
{
Label if_fast(this), if_slow(this, Label::kDeferred);
BranchIfFastRegExp(context, iterating_regexp, &if_fast, &if_slow);
+
BIND(&if_fast);
{
- TNode<Object> indices_or_null = CAST(RegExpPrototypeExecBodyWithoutResult(
- context, iterating_regexp, iterating_string, &if_no_match, true));
- var_match = CAST(ConstructNewResultFromMatchInfo(
- context, iterating_regexp, indices_or_null, iterating_string));
+ TNode<RegExpMatchInfo> match_indices =
+ RegExpPrototypeExecBodyWithoutResult(context, CAST(iterating_regexp),
+ iterating_string, &if_no_match,
+ true);
+ var_match = ConstructNewResultFromMatchInfo(
+ context, CAST(iterating_regexp), match_indices, iterating_string);
var_is_fast_regexp = Int32TrueConstant();
Goto(&if_match);
}
+
BIND(&if_slow);
{
var_match = CAST(RegExpExec(context, iterating_regexp, iterating_string));
@@ -3260,9 +3223,9 @@ TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) {
});
CSA_ASSERT(this,
SmiNotEqual(LoadFastJSArrayLength(CAST(var_match.value())),
- SmiConstant(0)));
- TNode<FixedArrayBase> result_fixed_array =
- LoadElements(CAST(var_match.value()));
+ SmiZero()));
+ TNode<FixedArray> result_fixed_array =
+ CAST(LoadElements(CAST(var_match.value())));
TNode<String> match_str =
CAST(LoadFixedArrayElement(result_fixed_array, 0));
@@ -3272,7 +3235,7 @@ TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) {
GotoIfNot(IsEmptyString(match_str), &return_result);
// 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")).
- TNode<Smi> this_index = CAST(FastLoadLastIndex(iterating_regexp));
+ TNode<Smi> this_index = CAST(FastLoadLastIndex(CAST(iterating_regexp)));
CSA_ASSERT(this, TaggedIsSmi(this_index));
// 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode).
@@ -3290,13 +3253,12 @@ TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) {
{
// i. Let matchStr be ? ToString(? Get(match, "0")).
TNode<String> match_str = ToString_Inline(
- context, GetProperty(context, var_match.value(), SmiConstant(0)));
+ context, GetProperty(context, var_match.value(), SmiZero()));
GotoIfNot(IsEmptyString(match_str), &return_result);
// 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")).
- TNode<Object> last_index =
- CAST(SlowLoadLastIndex(context, iterating_regexp));
+ TNode<Object> last_index = SlowLoadLastIndex(context, iterating_regexp);
TNode<Number> this_index = ToLength_Inline(context, last_index);
// 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode).
@@ -3328,13 +3290,6 @@ TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) {
BIND(&return_empty_done_result);
Return(
AllocateJSIteratorResult(context, UndefinedConstant(), TrueConstant()));
-
- BIND(&throw_bad_receiver);
- {
- ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
- StringConstant("%RegExpStringIterator%.prototype.next"),
- receiver);
- }
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index 251e8c035e..fd0e4b6755 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -34,16 +34,21 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
char const* method_name);
protected:
+ TNode<Smi> SmiZero();
+ TNode<IntPtrT> IntPtrZero();
+
// Allocate a RegExpResult with the given length (the number of captures,
// including the match itself), index (the index where the match starts),
- // and input string. |length| and |index| are expected to be tagged, and
- // |input| must be a string.
- Node* AllocateRegExpResult(Node* context, Node* length, Node* index,
- Node* input);
+ // and input string.
+ TNode<JSRegExpResult> AllocateRegExpResult(TNode<Context> context,
+ TNode<Smi> length,
+ TNode<Smi> index,
+ TNode<String> input);
- TNode<Object> FastLoadLastIndex(Node* regexp);
- Node* SlowLoadLastIndex(Node* context, Node* regexp);
- Node* LoadLastIndex(Node* context, Node* regexp, bool is_fastpath);
+ TNode<Object> FastLoadLastIndex(TNode<JSRegExp> regexp);
+ TNode<Object> SlowLoadLastIndex(TNode<Context> context, TNode<Object> regexp);
+ TNode<Object> LoadLastIndex(TNode<Context> context, TNode<Object> regexp,
+ bool is_fastpath);
void FastStoreLastIndex(Node* regexp, Node* value);
void SlowStoreLastIndex(Node* context, Node* regexp, Node* value);
@@ -58,21 +63,23 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Variable* var_string_end);
// Low level logic around the actual call into pattern matching code.
- Node* RegExpExecInternal(Node* const context, Node* const regexp,
- Node* const string, Node* const last_index,
- Node* const match_info);
-
- Node* ConstructNewResultFromMatchInfo(Node* const context, Node* const regexp,
- Node* const match_info,
- TNode<String> const string);
-
- Node* RegExpPrototypeExecBodyWithoutResult(Node* const context,
- Node* const regexp,
- Node* const string,
- Label* if_didnotmatch,
- const bool is_fastpath);
- Node* RegExpPrototypeExecBody(Node* const context, Node* const regexp,
- TNode<String> string, const bool is_fastpath);
+ TNode<HeapObject> RegExpExecInternal(TNode<Context> context,
+ TNode<JSRegExp> regexp,
+ TNode<String> string,
+ TNode<Number> last_index,
+ TNode<RegExpMatchInfo> match_info);
+
+ TNode<JSRegExpResult> ConstructNewResultFromMatchInfo(
+ TNode<Context> context, TNode<JSReceiver> maybe_regexp,
+ TNode<RegExpMatchInfo> match_info, TNode<String> string);
+
+ TNode<RegExpMatchInfo> RegExpPrototypeExecBodyWithoutResult(
+ TNode<Context> context, TNode<JSReceiver> maybe_regexp,
+ TNode<String> string, Label* if_didnotmatch, const bool is_fastpath);
+ TNode<HeapObject> RegExpPrototypeExecBody(TNode<Context> context,
+ TNode<JSReceiver> maybe_regexp,
+ TNode<String> string,
+ const bool is_fastpath);
Node* ThrowIfNotJSReceiver(Node* context, Node* maybe_receiver,
MessageTemplate::Template msg_template,
diff --git a/deps/v8/src/builtins/builtins-regexp.cc b/deps/v8/src/builtins/builtins-regexp.cc
index 86bf06b658..e763ab10ea 100644
--- a/deps/v8/src/builtins/builtins-regexp.cc
+++ b/deps/v8/src/builtins/builtins-regexp.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/counters.h"
#include "src/objects-inl.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-utils.h"
-#include "src/string-builder.h"
+#include "src/string-builder-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index cb9ecfbc61..92c1c65d1f 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -5,7 +5,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/time.h"
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/conversions-inl.h"
@@ -14,6 +14,7 @@
#include "src/globals.h"
#include "src/heap/factory.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
namespace v8 {
namespace internal {
@@ -114,7 +115,7 @@ BUILTIN(AtomicsWake) {
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
size_t addr = (i << 2) + NumberToSize(sta->byte_offset());
- return FutexEmulation::Wake(isolate, array_buffer, addr, c);
+ return FutexEmulation::Wake(array_buffer, addr, c);
}
// ES #sec-atomics.wait
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 5524db56da..c46a3fd35d 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -1640,7 +1640,8 @@ class StringPadAssembler : public StringBuiltinsAssembler {
TVARIABLE(String, var_fill_string, StringConstant(" "));
TVARIABLE(IntPtrT, var_fill_length, IntPtrConstant(1));
- Label argc_2(this), dont_pad(this), invalid_string_length(this), pad(this);
+ Label check_fill(this), dont_pad(this), invalid_string_length(this),
+ pad(this);
// If no max_length was provided, return the string.
GotoIf(IntPtrEqual(argc, IntPtrConstant(0)), &dont_pad);
@@ -1649,41 +1650,41 @@ class StringPadAssembler : public StringBuiltinsAssembler {
ToLength_Inline(context, arguments.AtIndex(0));
CSA_ASSERT(this, IsNumberNormalized(max_length));
- // Throw if max_length is not a smi or greater than the max string length.
- GotoIfNot(TaggedIsSmi(max_length), &invalid_string_length);
- TNode<Smi> smi_max_length = CAST(max_length);
- GotoIfNot(
- SmiLessThanOrEqual(smi_max_length, SmiConstant(String::kMaxLength)),
- &invalid_string_length);
+ // If max_length <= string_length, return the string.
+ GotoIfNot(TaggedIsSmi(max_length), &check_fill);
+ Branch(SmiLessThanOrEqual(CAST(max_length), string_length), &dont_pad,
+ &check_fill);
- // If the max_length is less than length of the string, return the string.
- CSA_ASSERT(this, TaggedIsPositiveSmi(smi_max_length));
- GotoIf(SmiLessThanOrEqual(smi_max_length, string_length), &dont_pad);
-
- Branch(IntPtrEqual(argc, IntPtrConstant(1)), &pad, &argc_2);
- BIND(&argc_2);
+ BIND(&check_fill);
{
+ GotoIf(IntPtrEqual(argc, IntPtrConstant(1)), &pad);
Node* const fill = arguments.AtIndex(1);
GotoIf(IsUndefined(fill), &pad);
var_fill_string = ToString_Inline(context, fill);
var_fill_length = LoadStringLengthAsWord(var_fill_string.value());
-
- Branch(IntPtrGreaterThan(var_fill_length.value(), IntPtrConstant(0)),
- &pad, &dont_pad);
+ Branch(WordEqual(var_fill_length.value(), IntPtrConstant(0)), &dont_pad,
+ &pad);
}
+
BIND(&pad);
{
CSA_ASSERT(this,
IntPtrGreaterThan(var_fill_length.value(), IntPtrConstant(0)));
- CSA_ASSERT(this, SmiGreaterThan(smi_max_length, string_length));
+
+ // Throw if max_length is greater than String::kMaxLength.
+ GotoIfNot(TaggedIsSmi(max_length), &invalid_string_length);
+ TNode<Smi> smi_max_length = CAST(max_length);
+ GotoIfNot(
+ SmiLessThanOrEqual(smi_max_length, SmiConstant(String::kMaxLength)),
+ &invalid_string_length);
Callable stringadd_callable =
CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+ CSA_ASSERT(this, SmiGreaterThan(smi_max_length, string_length));
TNode<Smi> const pad_length = SmiSub(smi_max_length, string_length);
VARIABLE(var_pad, MachineRepresentation::kTagged);
-
Label single_char_fill(this), multi_char_fill(this), return_result(this);
Branch(IntPtrEqual(var_fill_length.value(), IntPtrConstant(1)),
&single_char_fill, &multi_char_fill);
@@ -1837,8 +1838,8 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
ToDirectStringAssembler to_direct(state(), subject_string);
to_direct.TryToDirect(&call_runtime);
- TNode<FixedArray> elements = AllocateFixedArray(
- PACKED_ELEMENTS, length, AllocationFlag::kAllowLargeObjectAllocation);
+ TNode<FixedArray> elements = CAST(AllocateFixedArray(
+ PACKED_ELEMENTS, length, AllocationFlag::kAllowLargeObjectAllocation));
// Don't allocate anything while {string_data} is live!
TNode<RawPtrT> string_data = UncheckedCast<RawPtrT>(
to_direct.PointerToData(&fill_thehole_and_call_runtime));
@@ -1951,7 +1952,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
Node* const capacity = IntPtrConstant(1);
Node* const result = AllocateJSArray(kind, array_map, capacity, length);
- Node* const fixed_array = LoadElements(result);
+ TNode<FixedArray> const fixed_array = CAST(LoadElements(result));
StoreFixedArrayElement(fixed_array, 0, subject_string);
args.PopAndReturn(result);
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index e52fbd577d..0dafa230b5 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -2,13 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/conversions.h"
#include "src/counters.h"
#include "src/objects-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/intl-objects.h"
+#endif
#include "src/regexp/regexp-utils.h"
-#include "src/string-builder.h"
+#include "src/string-builder-inl.h"
#include "src/string-case.h"
#include "src/unicode-inl.h"
#include "src/unicode.h"
@@ -190,10 +193,18 @@ BUILTIN(StringPrototypeLastIndexOf) {
//
// This function is implementation specific. For now, we do not
// do anything locale specific.
-// If internationalization is enabled, then intl.js will override this function
-// and provide the proper functionality, so this is just a fallback.
BUILTIN(StringPrototypeLocaleCompare) {
HandleScope handle_scope(isolate);
+#ifdef V8_INTL_SUPPORT
+ TO_THIS_STRING(str1, "String.prototype.localeCompare");
+ Handle<String> str2;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, str2, Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Intl::StringLocaleCompare(isolate, str1, str2,
+ args.atOrUndefined(isolate, 2),
+ args.atOrUndefined(isolate, 3)));
+#else
DCHECK_EQ(2, args.length());
TO_THIS_STRING(str1, "String.prototype.localeCompare");
@@ -235,6 +246,7 @@ BUILTIN(StringPrototypeLocaleCompare) {
}
return Smi::FromInt(str1_length - str2_length);
+#endif // !V8_INTL_SUPPORT
}
#ifndef V8_INTL_SUPPORT
diff --git a/deps/v8/src/builtins/builtins-symbol.cc b/deps/v8/src/builtins/builtins-symbol.cc
index 9ebb8c499d..55c0307484 100644
--- a/deps/v8/src/builtins/builtins-symbol.cc
+++ b/deps/v8/src/builtins/builtins-symbol.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/counters.h"
#include "src/objects-inl.h"
@@ -16,20 +16,20 @@ namespace internal {
// ES #sec-symbol-constructor
BUILTIN(SymbolConstructor) {
HandleScope scope(isolate);
- if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
- Handle<Symbol> result = isolate->factory()->NewSymbol();
- Handle<Object> description = args.atOrUndefined(isolate, 1);
- if (!description->IsUndefined(isolate)) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, description, Object::ToString(isolate, description));
- result->set_name(*description);
- }
- return *result;
- } else { // [[Construct]]
+ if (!args.new_target()->IsUndefined(isolate)) { // [[Construct]]
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotConstructor,
isolate->factory()->Symbol_string()));
}
+ // [[Call]]
+ Handle<Symbol> result = isolate->factory()->NewSymbol();
+ Handle<Object> description = args.atOrUndefined(isolate, 1);
+ if (!description->IsUndefined(isolate)) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, description,
+ Object::ToString(isolate, description));
+ result->set_name(*description);
+ }
+ return *result;
}
// ES6 section 19.4.2.1 Symbol.for.
diff --git a/deps/v8/src/builtins/builtins-trace.cc b/deps/v8/src/builtins/builtins-trace.cc
index cd0f5a77d0..c2b799412f 100644
--- a/deps/v8/src/builtins/builtins-trace.cc
+++ b/deps/v8/src/builtins/builtins-trace.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api.h"
-#include "src/builtins/builtins-utils.h"
+#include "src/api-inl.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/counters.h"
#include "src/json-stringifier.h"
@@ -38,10 +38,11 @@ class MaybeUtf8 {
}
} else {
Local<v8::String> local = Utils::ToLocal(string);
- len = local->Utf8Length();
+ auto* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ len = local->Utf8Length(v8_isolate);
AllocateSufficientSpace(len);
if (len > 0) {
- local->WriteUtf8(reinterpret_cast<char*>(buf_));
+ local->WriteUtf8(v8_isolate, reinterpret_cast<char*>(buf_));
}
}
buf_[len] = 0;
@@ -166,12 +167,11 @@ BUILTIN(Trace) {
// could have perf costs. It is also subject to all the same
// limitations as JSON.stringify() as it relates to circular
// references and value limitations (e.g. BigInt is not supported).
- JsonStringifier stringifier(isolate);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- stringifier.Stringify(data_arg, isolate->factory()->undefined_value(),
- isolate->factory()->undefined_value()));
+ JsonStringify(isolate, data_arg, isolate->factory()->undefined_value(),
+ isolate->factory()->undefined_value()));
std::unique_ptr<JsonTraceValue> traced_value;
traced_value.reset(
new JsonTraceValue(isolate, Handle<String>::cast(result)));
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 595ec1f97b..c7c416d924 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -10,6 +10,7 @@
#include "src/builtins/builtins.h"
#include "src/builtins/growable-fixed-array-gen.h"
#include "src/handles-inl.h"
+#include "src/heap/factory-inl.h"
namespace v8 {
namespace internal {
@@ -312,6 +313,7 @@ void TypedArrayBuiltinsAssembler::ConstructByLength(TNode<Context> context,
TNode<JSTypedArray> holder,
TNode<Object> length,
TNode<Smi> element_size) {
+ // TODO(7881): support larger-than-smi typed array lengths
CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
Label invalid_length(this, Label::kDeferred), done(this);
@@ -322,6 +324,7 @@ void TypedArrayBuiltinsAssembler::ConstructByLength(TNode<Context> context,
// The maximum length of a TypedArray is MaxSmi().
// Note: this is not per spec, but rather a constraint of our current
// representation (which uses Smis).
+ // TODO(7881): support larger-than-smi typed array lengths
GotoIf(TaggedIsNotSmi(converted_length), &invalid_length);
// The goto above ensures that byte_length is a Smi.
TNode<Smi> smi_converted_length = CAST(converted_length);
@@ -358,8 +361,7 @@ void TypedArrayBuiltinsAssembler::ConstructByArrayBuffer(
invalid_offset_error(this, Label::kDeferred);
Label offset_is_smi(this), offset_not_smi(this, Label::kDeferred),
check_length(this), call_init(this), invalid_length(this),
- length_undefined(this), length_defined(this), detached_error(this),
- done(this);
+ length_undefined(this), length_defined(this), done(this);
GotoIf(IsUndefined(byte_offset), &check_length);
@@ -396,7 +398,7 @@ void TypedArrayBuiltinsAssembler::ConstructByArrayBuffer(
BIND(&length_undefined);
{
- GotoIf(IsDetachedBuffer(buffer), &detached_error);
+ ThrowIfArrayBufferIsDetached(context, buffer, "Construct");
Node* buffer_byte_length =
LoadObjectField(buffer, JSArrayBuffer::kByteLengthOffset);
@@ -418,7 +420,7 @@ void TypedArrayBuiltinsAssembler::ConstructByArrayBuffer(
BIND(&length_defined);
{
TNode<Smi> new_length = ToSmiIndex(length, context, &invalid_length);
- GotoIf(IsDetachedBuffer(buffer), &detached_error);
+ ThrowIfArrayBufferIsDetached(context, buffer, "Construct");
new_byte_length.Bind(SmiMul(new_length, element_size));
// Reading the byte length must come after the ToIndex operation, which
// could cause the buffer to become detached.
@@ -473,9 +475,6 @@ void TypedArrayBuiltinsAssembler::ConstructByArrayBuffer(
ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength, length);
}
- BIND(&detached_error);
- { ThrowTypeError(context, MessageTemplate::kDetachedOperation, "Construct"); }
-
BIND(&done);
}
@@ -570,7 +569,7 @@ void TypedArrayBuiltinsAssembler::ConstructByArrayLike(
TNode<HeapObject> array_like, TNode<Object> initial_length,
TNode<Smi> element_size, TNode<JSReceiver> buffer_constructor) {
Label invalid_length(this, Label::kDeferred), fill(this), fast_copy(this),
- detached_check(this), done(this), detached_error(this, Label::kDeferred);
+ detached_check(this), done(this);
// The caller has looked up length on array_like, which is observable.
TNode<Smi> length = ToSmiLength(initial_length, context, &invalid_length);
@@ -583,9 +582,8 @@ void TypedArrayBuiltinsAssembler::ConstructByArrayLike(
Goto(&fill);
BIND(&detached_check);
- GotoIf(IsDetachedBuffer(
- LoadObjectField(array_like, JSTypedArray::kBufferOffset)),
- &detached_error);
+ ThrowIfArrayBufferViewBufferIsDetached(context, CAST(array_like),
+ "Construct");
Goto(&fill);
BIND(&fill);
@@ -626,9 +624,6 @@ void TypedArrayBuiltinsAssembler::ConstructByArrayLike(
Goto(&done);
}
- BIND(&detached_error);
- { ThrowTypeError(context, MessageTemplate::kDetachedOperation, "Construct"); }
-
BIND(&invalid_length);
{
ThrowRangeError(context, MessageTemplate::kInvalidTypedArrayLength,
@@ -750,12 +745,6 @@ TF_BUILTIN(CreateTypedArray, TypedArrayBuiltinsAssembler) {
Return(result);
}
-TF_BUILTIN(TypedArrayConstructorLazyDeoptContinuation,
- TypedArrayBuiltinsAssembler) {
- Node* result = Parameter(Descriptor::kResult);
- Return(result);
-}
-
// ES #sec-typedarray-constructors
TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -789,10 +778,7 @@ TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeGetter(
Node* context, Node* receiver, const char* method_name, int object_offset) {
// Check if the {receiver} is actually a JSTypedArray.
- Label receiver_is_incompatible(this, Label::kDeferred);
- GotoIf(TaggedIsSmi(receiver), &receiver_is_incompatible);
- GotoIfNot(HasInstanceType(receiver, JS_TYPED_ARRAY_TYPE),
- &receiver_is_incompatible);
+ ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, method_name);
// Check if the {receiver}'s JSArrayBuffer was neutered.
Node* receiver_buffer =
@@ -806,13 +792,6 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeGetter(
// The {receiver}s buffer was neutered, default to zero.
Return(SmiConstant(0));
}
-
- BIND(&receiver_is_incompatible);
- {
- // The {receiver} is not a valid JSTypedArray.
- ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
- StringConstant(method_name), receiver);
- }
}
// ES6 #sec-get-%typedarray%.prototype.bytelength
@@ -977,18 +956,12 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::GetBuffer(
TNode<JSTypedArray> TypedArrayBuiltinsAssembler::ValidateTypedArray(
TNode<Context> context, TNode<Object> obj, const char* method_name) {
- Label validation_done(this);
-
// If it is not a typed array, throw
ThrowIfNotInstanceType(context, obj, JS_TYPED_ARRAY_TYPE, method_name);
// If the typed array's buffer is detached, throw
- TNode<Object> buffer =
- LoadObjectField(CAST(obj), JSTypedArray::kBufferOffset);
- GotoIfNot(IsDetachedBuffer(buffer), &validation_done);
- ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
+ ThrowIfArrayBufferViewBufferIsDetached(context, CAST(obj), method_name);
- BIND(&validation_done);
return CAST(obj);
}
@@ -1129,7 +1102,8 @@ void TypedArrayBuiltinsAssembler::
TNode<JSTypedArray> dest,
TNode<IntPtrT> source_length,
TNode<IntPtrT> offset) {
- CSA_ASSERT(this, Word32Not(IsBigInt64ElementsKind(LoadElementsKind(dest))));
+ CSA_ASSERT(this,
+ Word32BinaryNot(IsBigInt64ElementsKind(LoadElementsKind(dest))));
TNode<ExternalReference> f = ExternalConstant(
ExternalReference::copy_fast_number_jsarray_elements_to_typed_array());
CallCFunction5(MachineType::AnyTagged(), MachineType::AnyTagged(),
@@ -1164,18 +1138,17 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
Label next(this), if_unknown_type(this, Label::kDeferred);
int32_t elements_kinds[] = {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) TYPE##_ELEMENTS,
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) TYPE##_ELEMENTS,
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
};
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- Label if_##type##array(this);
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) Label if_##type##array(this);
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
Label* elements_kind_labels[] = {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) &if_##type##array,
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) &if_##type##array,
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
};
@@ -1184,11 +1157,12 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels,
arraysize(elements_kinds));
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- BIND(&if_##type##array); \
- { \
- case_function(TYPE##_ELEMENTS, size, Context::TYPE##_ARRAY_FUN_INDEX); \
- Goto(&next); \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ BIND(&if_##type##array); \
+ { \
+ case_function(TYPE##_ELEMENTS, sizeof(ctype), \
+ Context::TYPE##_ARRAY_FUN_INDEX); \
+ Goto(&next); \
}
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -1201,6 +1175,7 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
// ES #sec-get-%typedarray%.prototype.set
TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
+ const char* method_name = "%TypedArray%.prototype.set";
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CodeStubArguments args(
this,
@@ -1209,7 +1184,6 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
Label if_source_is_typed_array(this), if_source_is_fast_jsarray(this),
if_offset_is_out_of_bounds(this, Label::kDeferred),
if_source_too_large(this, Label::kDeferred),
- if_typed_array_is_neutered(this, Label::kDeferred),
if_receiver_is_not_typedarray(this, Label::kDeferred);
// Check the receiver is a typed array.
@@ -1233,9 +1207,7 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
TNode<Smi> offset_smi = CAST(offset_num);
// Check the receiver is not neutered.
- TNode<Object> receiver_buffer =
- LoadObjectField(CAST(receiver), JSTypedArray::kBufferOffset);
- GotoIf(IsDetachedBuffer(receiver_buffer), &if_typed_array_is_neutered);
+ ThrowIfArrayBufferViewBufferIsDetached(context, CAST(receiver), method_name);
// Check the source argument is valid and whether a fast path can be taken.
Label call_runtime(this);
@@ -1249,9 +1221,7 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
BIND(&if_source_is_typed_array);
{
// Check the source argument is not neutered.
- TNode<Object> source_buffer =
- LoadObjectField(CAST(source), JSTypedArray::kBufferOffset);
- GotoIf(IsDetachedBuffer(source_buffer), &if_typed_array_is_neutered);
+ ThrowIfArrayBufferViewBufferIsDetached(context, CAST(source), method_name);
SetTypedArraySource(context, CAST(source), CAST(receiver),
SmiUntag(offset_smi), &call_runtime,
@@ -1277,10 +1247,6 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
BIND(&if_source_too_large);
ThrowRangeError(context, MessageTemplate::kTypedArraySetSourceTooLarge);
- BIND(&if_typed_array_is_neutered);
- ThrowTypeError(context, MessageTemplate::kDetachedOperation,
- "%TypedArray%.prototype.set");
-
BIND(&if_receiver_is_not_typedarray);
ThrowTypeError(context, MessageTemplate::kNotTypedArray);
}
@@ -1289,7 +1255,6 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) {
TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) {
const char* method_name = "%TypedArray%.prototype.slice";
Label call_c(this), call_memmove(this), if_count_is_not_zero(this),
- if_typed_array_is_neutered(this, Label::kDeferred),
if_bigint_mixed_types(this, Label::kDeferred);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -1333,9 +1298,9 @@ TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) {
// result array is neutered or not since TypedArraySpeciesCreate checked it.
CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer(LoadObjectField(
result_array, JSTypedArray::kBufferOffset))));
- TNode<Object> receiver_buffer =
- LoadObjectField(CAST(receiver), JSTypedArray::kBufferOffset);
- GotoIf(IsDetachedBuffer(receiver_buffer), &if_typed_array_is_neutered);
+ TNode<JSArrayBuffer> receiver_buffer =
+ LoadArrayBufferViewBuffer(CAST(receiver));
+ ThrowIfArrayBufferIsDetached(context, receiver_buffer, method_name);
// result_array could be a different type from source or share the same
// buffer with the source because of custom species constructor.
@@ -1402,9 +1367,6 @@ TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) {
args.PopAndReturn(result_array);
}
- BIND(&if_typed_array_is_neutered);
- ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
-
BIND(&if_bigint_mixed_types);
ThrowTypeError(context, MessageTemplate::kBigIntMixedTypes);
}
@@ -1491,19 +1453,19 @@ TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) {
size_t const kTypedElementsKindCount = LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
1;
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- Label return_##type##array(this); \
- BIND(&return_##type##array); \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ Label return_##type##array(this); \
+ BIND(&return_##type##array); \
Return(StringConstant(#Type "Array"));
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
Label* elements_kind_labels[kTypedElementsKindCount] = {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) &return_##type##array,
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) &return_##type##array,
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
};
int32_t elements_kinds[kTypedElementsKindCount] = {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
TYPE##_ELEMENTS - FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND,
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -1532,18 +1494,12 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
GotoIfNot(IsJSTypedArray(CAST(receiver)), &throw_bad_receiver);
// Check if the {receiver}'s JSArrayBuffer was neutered.
- TNode<JSArrayBuffer> receiver_buffer = LoadObjectField<JSArrayBuffer>(
- CAST(receiver), JSTypedArray::kBufferOffset);
- Label if_receiverisneutered(this, Label::kDeferred);
- GotoIf(IsDetachedBuffer(receiver_buffer), &if_receiverisneutered);
+ ThrowIfArrayBufferViewBufferIsDetached(context, CAST(receiver), method_name);
Return(CreateArrayIterator(context, receiver, kind));
BIND(&throw_bad_receiver);
ThrowTypeError(context, MessageTemplate::kNotTypedArray, method_name);
-
- BIND(&if_receiverisneutered);
- ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
}
// ES #sec-%typedarray%.prototype.values
@@ -1650,44 +1606,6 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
"%TypedArray%.of");
}
-void TypedArrayBuiltinsAssembler::IterableToListSlowPath(
- TNode<Context> context, TNode<Object> iterable, TNode<Object> iterator_fn,
- Variable* created_list) {
- IteratorBuiltinsAssembler iterator_assembler(state());
-
- // 1. Let iteratorRecord be ? GetIterator(items, method).
- IteratorRecord iterator_record =
- iterator_assembler.GetIterator(context, iterable, iterator_fn);
-
- // 2. Let values be a new empty List.
- GrowableFixedArray values(state());
-
- Variable* vars[] = {values.var_array(), values.var_length(),
- values.var_capacity()};
- Label loop_start(this, 3, vars), loop_end(this);
- Goto(&loop_start);
- // 3. Let next be true.
- // 4. Repeat, while next is not false
- BIND(&loop_start);
- {
- // a. Set next to ? IteratorStep(iteratorRecord).
- TNode<Object> next = CAST(
- iterator_assembler.IteratorStep(context, iterator_record, &loop_end));
- // b. If next is not false, then
- // i. Let nextValue be ? IteratorValue(next).
- TNode<Object> next_value =
- CAST(iterator_assembler.IteratorValue(context, next));
- // ii. Append nextValue to the end of the List values.
- values.Push(next_value);
- Goto(&loop_start);
- }
- BIND(&loop_end);
-
- // 5. Return values.
- TNode<JSArray> js_array_values = values.ToJSArray(context);
- created_list->Bind(js_array_values);
-}
-
// This builtin always returns a new JSArray and is thus safe to use even in the
// presence of code that may call back into user-JS.
TF_BUILTIN(IterableToList, TypedArrayBuiltinsAssembler) {
@@ -1695,33 +1613,8 @@ TF_BUILTIN(IterableToList, TypedArrayBuiltinsAssembler) {
TNode<Object> iterable = CAST(Parameter(Descriptor::kIterable));
TNode<Object> iterator_fn = CAST(Parameter(Descriptor::kIteratorFn));
- Label fast_path(this), slow_path(this), done(this);
-
- TVARIABLE(JSArray, created_list);
-
- // This is a fast-path for ignoring the iterator.
- // TODO(petermarshall): Port IterableToListCanBeElided to CSA.
- Node* elided =
- CallRuntime(Runtime::kIterableToListCanBeElided, context, iterable);
- CSA_ASSERT(this, IsBoolean(elided));
- Branch(IsTrue(elided), &fast_path, &slow_path);
-
- BIND(&fast_path);
- {
- TNode<JSArray> input_array = CAST(iterable);
- TNode<JSArray> new_array = CAST(CloneFastJSArray(context, input_array));
- created_list = new_array;
- Goto(&done);
- }
-
- BIND(&slow_path);
- {
- IterableToListSlowPath(context, iterable, iterator_fn, &created_list);
- Goto(&done);
- }
-
- BIND(&done);
- Return(created_list.value());
+ IteratorBuiltinsAssembler iterator_assembler(state());
+ Return(iterator_assembler.IterableToList(context, iterable, iterator_fn));
}
// ES6 #sec-%typedarray%.from
@@ -1807,6 +1700,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
BIND(&from_array_like);
{
+ // TODO(7881): support larger-than-smi typed array lengths
Label if_length_not_smi(this, Label::kDeferred);
final_source = source;
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index e74469187f..11768d660a 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -55,10 +55,6 @@ class TypedArrayBuiltinsAssembler : public BaseBuiltinsFromDSLAssembler {
TNode<Map> map, TNode<Smi> length,
TNode<Number> byte_offset);
- void IterableToListSlowPath(TNode<Context> context, TNode<Object> iterable,
- TNode<Object> iterator_fn,
- Variable* created_list);
-
TNode<Map> LoadMapForType(TNode<JSTypedArray> array);
TNode<UintPtrT> CalculateExternalPointer(TNode<UintPtrT> backing_store,
TNode<Number> byte_offset);
diff --git a/deps/v8/src/builtins/builtins-typed-array.cc b/deps/v8/src/builtins/builtins-typed-array.cc
index 6750f2863b..34993faf01 100644
--- a/deps/v8/src/builtins/builtins-typed-array.cc
+++ b/deps/v8/src/builtins/builtins-typed-array.cc
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/counters.h"
#include "src/elements.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
namespace v8 {
namespace internal {
@@ -157,7 +158,7 @@ BUILTIN(TypedArrayPrototypeFill) {
DCHECK_LE(end, len);
DCHECK_LE(count, len);
- return ElementsAccessor::ForKind(kind)->Fill(isolate, array, obj_value,
+ return ElementsAccessor::ForKind(kind)->Fill(array, obj_value,
static_cast<uint32_t>(start),
static_cast<uint32_t>(end));
}
@@ -256,7 +257,7 @@ BUILTIN(TypedArrayPrototypeLastIndexOf) {
Handle<Object> search_element = args.atOrUndefined(isolate, 1);
ElementsAccessor* elements = array->GetElementsAccessor();
Maybe<int64_t> result = elements->LastIndexOfValue(
- isolate, array, search_element, static_cast<uint32_t>(index));
+ array, search_element, static_cast<uint32_t>(index));
MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return *isolate->factory()->NewNumberFromInt64(result.FromJust());
}
diff --git a/deps/v8/src/builtins/builtins-utils-inl.h b/deps/v8/src/builtins/builtins-utils-inl.h
new file mode 100644
index 0000000000..6696324dbd
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-utils-inl.h
@@ -0,0 +1,35 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_UTILS_INL_H_
+#define V8_BUILTINS_BUILTINS_UTILS_INL_H_
+
+#include "src/builtins/builtins-utils.h"
+
+#include "src/arguments-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Object> BuiltinArguments::atOrUndefined(Isolate* isolate, int index) {
+ if (index >= length()) {
+ return isolate->factory()->undefined_value();
+ }
+ return at<Object>(index);
+}
+
+Handle<Object> BuiltinArguments::receiver() { return at<Object>(0); }
+
+Handle<JSFunction> BuiltinArguments::target() {
+ return Arguments::at<JSFunction>(Arguments::length() - 1 - kTargetOffset);
+}
+
+Handle<HeapObject> BuiltinArguments::new_target() {
+ return Arguments::at<HeapObject>(Arguments::length() - 1 - kNewTargetOffset);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_UTILS_INL_H_
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index 35a77c7518..1ea2093702 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -34,30 +34,18 @@ class BuiltinArguments : public Arguments {
return Arguments::at<S>(index);
}
- Handle<Object> atOrUndefined(Isolate* isolate, int index) {
- if (index >= length()) {
- return isolate->factory()->undefined_value();
- }
- return at<Object>(index);
- }
-
- Handle<Object> receiver() { return Arguments::at<Object>(0); }
-
- static const int kNewTargetOffset = 0;
- static const int kTargetOffset = 1;
- static const int kArgcOffset = 2;
- static const int kPaddingOffset = 3;
-
- static const int kNumExtraArgs = 4;
- static const int kNumExtraArgsWithReceiver = 5;
-
- Handle<JSFunction> target() {
- return Arguments::at<JSFunction>(Arguments::length() - 1 - kTargetOffset);
- }
- Handle<HeapObject> new_target() {
- return Arguments::at<HeapObject>(Arguments::length() - 1 -
- kNewTargetOffset);
- }
+ static constexpr int kNewTargetOffset = 0;
+ static constexpr int kTargetOffset = 1;
+ static constexpr int kArgcOffset = 2;
+ static constexpr int kPaddingOffset = 3;
+
+ static constexpr int kNumExtraArgs = 4;
+ static constexpr int kNumExtraArgsWithReceiver = 5;
+
+ inline Handle<Object> atOrUndefined(Isolate* isolate, int index);
+ inline Handle<Object> receiver();
+ inline Handle<JSFunction> target();
+ inline Handle<HeapObject> new_target();
// Gets the total number of arguments including the receiver (but
// excluding extra arguments).
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index 5826ec546e..facfaf93f8 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -5,6 +5,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/code-stub-assembler.h"
#include "src/objects-inl.h"
+#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index ee4031d71a..b2c7433e8d 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -3,10 +3,12 @@
// found in the LICENSE file.
#include "src/builtins/builtins.h"
-#include "src/api.h"
+
+#include "src/api-inl.h"
#include "src/assembler-inl.h"
#include "src/builtins/builtins-descriptors.h"
#include "src/callable.h"
+#include "src/instruction-stream.h"
#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
@@ -49,10 +51,13 @@ struct BuiltinMetadata {
#define DECL_TFC(Name, ...) { #Name, Builtins::TFC, {} },
#define DECL_TFS(Name, ...) { #Name, Builtins::TFS, {} },
#define DECL_TFH(Name, ...) { #Name, Builtins::TFH, {} },
+#define DECL_BCH(Name, ...) { #Name "Handler", Builtins::BCH, {} }, \
+ { #Name "WideHandler", Builtins::BCH, {} }, \
+ { #Name "ExtraWideHandler", Builtins::BCH, {} },
#define DECL_ASM(Name, ...) { #Name, Builtins::ASM, {} },
const BuiltinMetadata builtin_metadata[] = {
BUILTIN_LIST(DECL_CPP, DECL_API, DECL_TFJ, DECL_TFC, DECL_TFS, DECL_TFH,
- DECL_ASM)
+ DECL_BCH, DECL_ASM)
};
#undef DECL_CPP
#undef DECL_API
@@ -60,6 +65,7 @@ const BuiltinMetadata builtin_metadata[] = {
#undef DECL_TFC
#undef DECL_TFS
#undef DECL_TFH
+#undef DECL_BCH
#undef DECL_ASM
// clang-format on
@@ -80,7 +86,13 @@ Builtins::Name Builtins::GetBuiltinFromBailoutId(BailoutId id) {
void Builtins::TearDown() { initialized_ = false; }
const char* Builtins::Lookup(Address pc) {
- // may be called during initialization (disassembler!)
+ // Off-heap pc's can be looked up through binary search.
+ if (FLAG_embedded_builtins) {
+ Code* maybe_builtin = InstructionStream::TryLookupCode(isolate_, pc);
+ if (maybe_builtin != nullptr) return name(maybe_builtin->builtin_index());
+ }
+
+ // May be called during initialization (disassembler).
if (initialized_) {
for (int i = 0; i < builtin_count; i++) {
if (isolate_->heap()->builtin(i)->contains(pc)) return name(i);
@@ -154,10 +166,11 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) {
break; \
}
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, CASE_OTHER,
- CASE_OTHER, CASE_OTHER, IGNORE_BUILTIN)
+ CASE_OTHER, CASE_OTHER, IGNORE_BUILTIN, IGNORE_BUILTIN)
#undef CASE_OTHER
default:
Builtins::Kind kind = Builtins::KindOf(name);
+ DCHECK_NE(kind, BCH);
if (kind == TFJ || kind == CPP) {
return Callable(code, JSTrampolineDescriptor{});
}
@@ -287,7 +300,7 @@ bool Builtins::IsLazy(int index) {
case kThrowWasmTrapUnreachable: // Required by wasm.
case kToBooleanLazyDeoptContinuation:
case kToNumber: // Required by wasm.
- case kTypedArrayConstructorLazyDeoptContinuation:
+ case kGenericConstructorLazyDeoptContinuation:
case kWasmCompileLazy: // Required by wasm.
case kWasmStackGuard: // Required by wasm.
return false;
@@ -301,7 +314,19 @@ bool Builtins::IsLazy(int index) {
// static
bool Builtins::IsIsolateIndependent(int index) {
DCHECK(IsBuiltinId(index));
+#ifndef V8_TARGET_ARCH_IA32
switch (index) {
+// Bytecode handlers do not yet support being embedded.
+#ifdef V8_EMBEDDED_BYTECODE_HANDLERS
+#define BYTECODE_BUILTIN(Name, ...) \
+ case k##Name##Handler: \
+ case k##Name##WideHandler: \
+ case k##Name##ExtraWideHandler: \
+ return false;
+ BUILTIN_LIST_BYTECODE_HANDLERS(BYTECODE_BUILTIN)
+#undef BYTECODE_BUILTIN
+#endif // V8_EMBEDDED_BYTECODE_HANDLERS
+
// TODO(jgruber): There's currently two blockers for moving
// InterpreterEntryTrampoline into the binary:
// 1. InterpreterEnterBytecode calculates a pointer into the middle of
@@ -316,26 +341,43 @@ bool Builtins::IsIsolateIndependent(int index) {
// of the builtin itself (and not just the trampoline).
case kInterpreterEntryTrampoline:
return false;
-#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS
- // TODO(7882): The size of these builtins on MIP64 and MIPS32 is greater
- // than 128KB, and this triggers generation of MIPS specific trampolines.
- // Trampoline code is not PIC and therefore the builtin is not isolate
- // independent.
- case kArraySpliceTorque:
- case kKeyedLoadIC_Megamorphic:
- case kKeyedStoreIC_Megamorphic:
- case kObjectAssign:
- case kObjectGetOwnPropertyDescriptor:
- case kRegExpMatchFast:
- case kRegExpReplace:
- case kRegExpSplit:
- case kRegExpStringIteratorPrototypeNext:
- case kStoreIC_Uninitialized:
- return false;
-#endif
default:
return true;
}
+#else // V8_TARGET_ARCH_IA32
+ // TODO(jgruber, v8:6666): Implement support.
+ // ia32 is a work-in-progress. This will let us make builtins
+ // isolate-independent one-by-one.
+ switch (index) {
+ case kContinueToCodeStubBuiltin:
+ case kContinueToCodeStubBuiltinWithResult:
+ case kContinueToJavaScriptBuiltin:
+ case kContinueToJavaScriptBuiltinWithResult:
+ case kWasmAllocateHeapNumber:
+ case kWasmCallJavaScript:
+ case kWasmToNumber:
+ case kDoubleToI:
+ return true;
+ default:
+ return false;
+ }
+#endif // V8_TARGET_ARCH_IA32
+ UNREACHABLE();
+}
+
+// static
+bool Builtins::IsWasmRuntimeStub(int index) {
+ DCHECK(IsBuiltinId(index));
+ switch (index) {
+#define CASE_TRAP(Name) case kThrowWasm##Name:
+#define CASE(Name) case k##Name:
+ WASM_RUNTIME_STUB_LIST(CASE, CASE_TRAP)
+#undef CASE_TRAP
+#undef CASE
+ return true;
+ default:
+ return false;
+ }
UNREACHABLE();
}
@@ -380,6 +422,7 @@ const char* Builtins::KindNameOf(int index) {
case TFC: return "TFC";
case TFS: return "TFS";
case TFH: return "TFH";
+ case BCH: return "BCH";
case ASM: return "ASM";
}
// clang-format on
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index ccb9619eeb..0bd3c317bf 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -40,19 +40,23 @@ class Builtins {
enum Name : int32_t {
#define DEF_ENUM(Name, ...) k##Name,
- BUILTIN_LIST_ALL(DEF_ENUM)
+#define DEF_ENUM_BYTECODE_HANDLER(Name, ...) \
+ k##Name##Handler, k##Name##WideHandler, k##Name##ExtraWideHandler,
+ BUILTIN_LIST(DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM,
+ DEF_ENUM_BYTECODE_HANDLER, DEF_ENUM)
#undef DEF_ENUM
+#undef DEF_ENUM_BYTECODE_HANDLER
builtin_count
};
static const int32_t kNoBuiltinId = -1;
- static bool IsBuiltinId(int maybe_id) {
+ static constexpr bool IsBuiltinId(int maybe_id) {
return 0 <= maybe_id && maybe_id < builtin_count;
}
// The different builtin kinds are documented in builtins-definitions.h.
- enum Kind { CPP, API, TFJ, TFC, TFS, TFH, ASM };
+ enum Kind { CPP, API, TFJ, TFC, TFS, TFH, BCH, ASM };
static BailoutId GetContinuationBailoutId(Name name);
static Name GetBuiltinFromBailoutId(BailoutId);
@@ -111,6 +115,11 @@ class Builtins {
// TODO(jgruber,v8:6666): Remove once all builtins have been migrated.
static bool IsIsolateIndependent(int index);
+ // Wasm runtime stubs are treated specially by wasm. To guarantee reachability
+ // through near jumps, their code is completely copied into a fresh off-heap
+ // area.
+ static bool IsWasmRuntimeStub(int index);
+
bool is_initialized() const { return initialized_; }
// Used by SetupIsolateDelegate and Deserializer.
@@ -170,7 +179,7 @@ class Builtins {
static void Generate_##Name(compiler::CodeAssemblerState* state);
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DECLARE_TF, DECLARE_TF,
- DECLARE_TF, DECLARE_TF, DECLARE_ASM)
+ DECLARE_TF, DECLARE_TF, IGNORE_BUILTIN, DECLARE_ASM)
#undef DECLARE_ASM
#undef DECLARE_TF
diff --git a/deps/v8/src/builtins/constants-table-builder.cc b/deps/v8/src/builtins/constants-table-builder.cc
index 6dd390c795..04c0655bf7 100644
--- a/deps/v8/src/builtins/constants-table-builder.cc
+++ b/deps/v8/src/builtins/constants-table-builder.cc
@@ -89,7 +89,9 @@ void BuiltinsConstantsTableBuilder::Finalize() {
isolate_->heap()->builtins_constants_table());
DCHECK(isolate_->serializer_enabled());
- DCHECK_LT(0, map_.size());
+ // An empty map means there's nothing to do.
+ if (map_.size() == 0) return;
+
Handle<FixedArray> table =
isolate_->factory()->NewFixedArray(map_.size(), TENURED);
diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq
index 7cdc74a944..874c122995 100644
--- a/deps/v8/src/builtins/data-view.tq
+++ b/deps/v8/src/builtins/data-view.tq
@@ -13,6 +13,58 @@ module data_view {
extern operator '.backing_store'
macro LoadArrayBufferBackingStore(JSArrayBuffer): RawPtr;
+ macro MakeDataViewGetterNameString(kind: constexpr ElementsKind): String {
+ if constexpr (kind == UINT8_ELEMENTS) {
+ return 'DataView.prototype.getUint8';
+ } else if constexpr (kind == INT8_ELEMENTS) {
+ return 'DataView.prototype.getInt8';
+ } else if constexpr (kind == UINT16_ELEMENTS) {
+ return 'DataView.prototype.getUint16';
+ } else if constexpr (kind == INT16_ELEMENTS) {
+ return 'DataView.prototype.getInt16';
+ } else if constexpr (kind == UINT32_ELEMENTS) {
+ return 'DataView.prototype.getUint32';
+ } else if constexpr (kind == INT32_ELEMENTS) {
+ return 'DataView.prototype.getInt32';
+ } else if constexpr (kind == FLOAT32_ELEMENTS) {
+ return 'DataView.prototype.getFloat32';
+ } else if constexpr (kind == FLOAT64_ELEMENTS) {
+ return 'DataView.prototype.getFloat64';
+ } else if constexpr (kind == BIGINT64_ELEMENTS) {
+ return 'DataView.prototype.getBigInt64';
+ } else if constexpr (kind == BIGUINT64_ELEMENTS) {
+ return 'DataView.prototype.getBigUint64';
+ } else {
+ unreachable;
+ }
+ }
+
+ macro MakeDataViewSetterNameString(kind: constexpr ElementsKind): String {
+ if constexpr (kind == UINT8_ELEMENTS) {
+ return 'DataView.prototype.setUint8';
+ } else if constexpr (kind == INT8_ELEMENTS) {
+ return 'DataView.prototype.setInt8';
+ } else if constexpr (kind == UINT16_ELEMENTS) {
+ return 'DataView.prototype.setUint16';
+ } else if constexpr (kind == INT16_ELEMENTS) {
+ return 'DataView.prototype.setInt16';
+ } else if constexpr (kind == UINT32_ELEMENTS) {
+ return 'DataView.prototype.setUint32';
+ } else if constexpr (kind == INT32_ELEMENTS) {
+ return 'DataView.prototype.setInt32';
+ } else if constexpr (kind == FLOAT32_ELEMENTS) {
+ return 'DataView.prototype.setFloat32';
+ } else if constexpr (kind == FLOAT64_ELEMENTS) {
+ return 'DataView.prototype.setFloat64';
+ } else if constexpr (kind == BIGINT64_ELEMENTS) {
+ return 'DataView.prototype.setBigInt64';
+ } else if constexpr (kind == BIGUINT64_ELEMENTS) {
+ return 'DataView.prototype.setBigUint64';
+ } else {
+ unreachable;
+ }
+ }
+
macro WasNeutered(view: JSArrayBufferView): bool {
return IsDetachedBuffer(view.buffer);
}
@@ -71,12 +123,13 @@ module data_view {
extern macro LoadUint8(RawPtr, intptr): uint32;
extern macro LoadInt8(RawPtr, intptr): int32;
- macro LoadDataViewUint8(buffer: JSArrayBuffer, offset: intptr): Smi {
- return convert<Smi>(LoadUint8(buffer.backing_store, offset));
- }
-
- macro LoadDataViewInt8(buffer: JSArrayBuffer, offset: intptr): Smi {
- return convert<Smi>(LoadInt8(buffer.backing_store, offset));
+ macro LoadDataView8(buffer: JSArrayBuffer, offset: intptr,
+ signed: constexpr bool): Smi {
+ if constexpr (signed) {
+ return convert<Smi>(LoadInt8(buffer.backing_store, offset));
+ } else {
+ return convert<Smi>(LoadUint8(buffer.backing_store, offset));
+ }
}
macro LoadDataView16(buffer: JSArrayBuffer, offset: intptr,
@@ -108,7 +161,7 @@ module data_view {
macro LoadDataView32(buffer: JSArrayBuffer, offset: intptr,
requested_little_endian: bool,
- signed: constexpr bool): Number {
+ kind: constexpr ElementsKind): Number {
let data_pointer: RawPtr = buffer.backing_store;
let b0: uint32 = LoadUint8(data_pointer, offset);
@@ -123,31 +176,16 @@ module data_view {
result = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
}
- if constexpr (signed) {
+ if constexpr (kind == INT32_ELEMENTS) {
return convert<Number>(Signed(result));
- } else {
+ } else if constexpr (kind == UINT32_ELEMENTS) {
return convert<Number>(result);
- }
- }
-
- macro LoadDataViewFloat32(buffer: JSArrayBuffer, offset: intptr,
- requested_little_endian: bool): Number {
- let data_pointer: RawPtr = buffer.backing_store;
-
- let b0: uint32 = LoadUint8(data_pointer, offset);
- let b1: uint32 = LoadUint8(data_pointer, offset + 1);
- let b2: uint32 = LoadUint8(data_pointer, offset + 2);
- let b3: uint32 = LoadUint8(data_pointer, offset + 3);
- let result: uint32;
-
- if (requested_little_endian) {
- result = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
+ } else if constexpr (kind == FLOAT32_ELEMENTS) {
+ let float_res: float64 = convert<float64>(BitcastInt32ToFloat32(result));
+ return convert<Number>(float_res);
} else {
- result = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
+ unreachable;
}
-
- let float_res: float64 = convert<float64>(BitcastInt32ToFloat32(result));
- return convert<Number>(float_res);
}
macro LoadDataViewFloat64(buffer: JSArrayBuffer, offset: intptr,
@@ -186,11 +224,11 @@ module data_view {
extern macro DataViewEncodeBigIntBits(constexpr bool,
constexpr int31): intptr;
- const kPositiveBigInt: constexpr bool generates 'false';
- const kNegativeBigInt: constexpr bool generates 'true';
- const kZeroDigitBigInt: constexpr int31 generates '0';
- const kOneDigitBigInt: constexpr int31 generates '1';
- const kTwoDigitBigInt: constexpr int31 generates '2';
+ const kPositiveBigInt: constexpr bool = false;
+ const kNegativeBigInt: constexpr bool = true;
+ const kZeroDigitBigInt: constexpr int31 = 0;
+ const kOneDigitBigInt: constexpr int31 = 1;
+ const kTwoDigitBigInt: constexpr int31 = 2;
macro CreateEmptyBigInt(is_positive: bool, length: constexpr int31): BigInt {
// Allocate a BigInt with the desired length (number of digits).
@@ -357,10 +395,8 @@ module data_view {
requested_little_endian: Object,
kind: constexpr ElementsKind): Numeric {
- // TODO(theotime): add more specific method name to match
- // the former implementation.
let data_view: JSDataView = ValidateDataView(
- context, receiver, 'get DataView.prototype.get');
+ context, receiver, MakeDataViewGetterNameString(kind));
let getIndex: Number;
try {
@@ -374,7 +410,8 @@ module data_view {
let buffer: JSArrayBuffer = data_view.buffer;
if (IsDetachedBuffer(buffer)) {
- ThrowTypeError(context, kDetachedOperation, 'DataView.prototype.get');
+ ThrowTypeError(context, kDetachedOperation,
+ MakeDataViewGetterNameString(kind));
}
let viewOffset: Number = data_view.byte_offset;
@@ -393,38 +430,30 @@ module data_view {
let bufferIndex: intptr = getIndexIntptr + viewOffsetIntptr;
if constexpr (kind == UINT8_ELEMENTS) {
- return LoadDataViewUint8(buffer, bufferIndex);
+ return LoadDataView8(buffer, bufferIndex, false);
} else if constexpr (kind == INT8_ELEMENTS) {
- return LoadDataViewInt8(buffer, bufferIndex);
+ return LoadDataView8(buffer, bufferIndex, true);
} else if constexpr (kind == UINT16_ELEMENTS) {
return LoadDataView16(buffer, bufferIndex, littleEndian, false);
} else if constexpr (kind == INT16_ELEMENTS) {
return LoadDataView16(buffer, bufferIndex, littleEndian, true);
} else if constexpr (kind == UINT32_ELEMENTS) {
- return LoadDataView32(buffer, bufferIndex, littleEndian, false);
+ return LoadDataView32(buffer, bufferIndex, littleEndian, kind);
} else if constexpr (kind == INT32_ELEMENTS) {
- return LoadDataView32(buffer, bufferIndex, littleEndian, true);
+ return LoadDataView32(buffer, bufferIndex, littleEndian, kind);
} else if constexpr (kind == FLOAT32_ELEMENTS) {
- return LoadDataViewFloat32(buffer, bufferIndex, littleEndian);
+ return LoadDataView32(buffer, bufferIndex, littleEndian, kind);
} else if constexpr (kind == FLOAT64_ELEMENTS) {
return LoadDataViewFloat64(buffer, bufferIndex, littleEndian);
- } else if constexpr (kind == BIGINT64_ELEMENTS) {
- return LoadDataViewBigInt(buffer, bufferIndex, littleEndian, true);
} else if constexpr (kind == BIGUINT64_ELEMENTS) {
return LoadDataViewBigInt(buffer, bufferIndex, littleEndian, false);
+ } else if constexpr (kind == BIGINT64_ELEMENTS) {
+ return LoadDataViewBigInt(buffer, bufferIndex, littleEndian, true);
} else {
unreachable;
}
}
- javascript builtin DataViewPrototypeGetInt8(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ?
- arguments[0] :
- Undefined;
- return DataViewGet(context, receiver, offset, Undefined, INT8_ELEMENTS);
- }
-
javascript builtin DataViewPrototypeGetUint8(
context: Context, receiver: Object, ...arguments): Object {
let offset: Object = arguments.length > 0 ?
@@ -433,7 +462,15 @@ module data_view {
return DataViewGet(context, receiver, offset, Undefined, UINT8_ELEMENTS);
}
- javascript builtin DataViewPrototypeGetInt16(
+ javascript builtin DataViewPrototypeGetInt8(
+ context: Context, receiver: Object, ...arguments): Object {
+ let offset: Object = arguments.length > 0 ?
+ arguments[0] :
+ Undefined;
+ return DataViewGet(context, receiver, offset, Undefined, INT8_ELEMENTS);
+ }
+
+ javascript builtin DataViewPrototypeGetUint16(
context: Context, receiver: Object, ...arguments): Object {
let offset: Object = arguments.length > 0 ?
arguments[0] :
@@ -442,10 +479,10 @@ module data_view {
arguments[1] :
Undefined;
return DataViewGet(context, receiver, offset, is_little_endian,
- INT16_ELEMENTS);
+ UINT16_ELEMENTS);
}
- javascript builtin DataViewPrototypeGetUint16(
+ javascript builtin DataViewPrototypeGetInt16(
context: Context, receiver: Object, ...arguments): Object {
let offset: Object = arguments.length > 0 ?
arguments[0] :
@@ -454,10 +491,10 @@ module data_view {
arguments[1] :
Undefined;
return DataViewGet(context, receiver, offset, is_little_endian,
- UINT16_ELEMENTS);
+ INT16_ELEMENTS);
}
- javascript builtin DataViewPrototypeGetInt32(
+ javascript builtin DataViewPrototypeGetUint32(
context: Context, receiver: Object, ...arguments): Object {
let offset: Object = arguments.length > 0 ?
arguments[0] :
@@ -466,10 +503,10 @@ module data_view {
arguments[1] :
Undefined;
return DataViewGet(context, receiver, offset, is_little_endian,
- INT32_ELEMENTS);
+ UINT32_ELEMENTS);
}
- javascript builtin DataViewPrototypeGetUint32(
+ javascript builtin DataViewPrototypeGetInt32(
context: Context, receiver: Object, ...arguments): Object {
let offset: Object = arguments.length > 0 ?
arguments[0] :
@@ -478,7 +515,7 @@ module data_view {
arguments[1] :
Undefined;
return DataViewGet(context, receiver, offset, is_little_endian,
- UINT32_ELEMENTS);
+ INT32_ELEMENTS);
}
javascript builtin DataViewPrototypeGetFloat32(
@@ -505,7 +542,7 @@ module data_view {
FLOAT64_ELEMENTS);
}
- javascript builtin DataViewPrototypeGetBigInt64(
+ javascript builtin DataViewPrototypeGetBigUint64(
context: Context, receiver: Object, ...arguments): Object {
let offset: Object = arguments.length > 0 ?
arguments[0] :
@@ -514,10 +551,10 @@ module data_view {
arguments[1] :
Undefined;
return DataViewGet(context, receiver, offset, is_little_endian,
- BIGINT64_ELEMENTS);
+ BIGUINT64_ELEMENTS);
}
- javascript builtin DataViewPrototypeGetBigUint64(
+ javascript builtin DataViewPrototypeGetBigInt64(
context: Context, receiver: Object, ...arguments): Object {
let offset: Object = arguments.length > 0 ?
arguments[0] :
@@ -526,7 +563,7 @@ module data_view {
arguments[1] :
Undefined;
return DataViewGet(context, receiver, offset, is_little_endian,
- BIGUINT64_ELEMENTS);
+ BIGINT64_ELEMENTS);
}
extern macro ToNumber(Context, Object): Number;
@@ -670,10 +707,8 @@ module data_view {
requested_little_endian: Object,
kind: constexpr ElementsKind): Object {
- // TODO(theotime): add more specific method name to match
- // the former implementation.
let data_view: JSDataView = ValidateDataView(
- context, receiver, 'get DataView.prototype.get');
+ context, receiver, MakeDataViewSetterNameString(kind));
let getIndex: Number;
try {
@@ -697,7 +732,8 @@ module data_view {
}
if (IsDetachedBuffer(buffer)) {
- ThrowTypeError(context, kDetachedOperation, 'DataView.prototype.get');
+ ThrowTypeError(context, kDetachedOperation,
+ MakeDataViewSetterNameString(kind));
}
let viewOffset: Number = data_view.byte_offset;
@@ -749,7 +785,7 @@ module data_view {
return Undefined;
}
- javascript builtin DataViewPrototypeSetInt8(
+ javascript builtin DataViewPrototypeSetUint8(
context: Context, receiver: Object, ...arguments): Object {
let offset: Object = arguments.length > 0 ?
arguments[0] :
@@ -758,10 +794,10 @@ module data_view {
arguments[1] :
Undefined;
return DataViewSet(context, receiver, offset, value, Undefined,
- INT8_ELEMENTS);
+ UINT8_ELEMENTS);
}
- javascript builtin DataViewPrototypeSetUint8(
+ javascript builtin DataViewPrototypeSetInt8(
context: Context, receiver: Object, ...arguments): Object {
let offset: Object = arguments.length > 0 ?
arguments[0] :
@@ -770,10 +806,10 @@ module data_view {
arguments[1] :
Undefined;
return DataViewSet(context, receiver, offset, value, Undefined,
- UINT8_ELEMENTS);
+ INT8_ELEMENTS);
}
- javascript builtin DataViewPrototypeSetInt16(
+ javascript builtin DataViewPrototypeSetUint16(
context: Context, receiver: Object, ...arguments): Object {
let offset: Object = arguments.length > 0 ?
arguments[0] :
@@ -785,10 +821,10 @@ module data_view {
arguments[2] :
Undefined;
return DataViewSet(context, receiver, offset, value,
- is_little_endian, INT16_ELEMENTS);
+ is_little_endian, UINT16_ELEMENTS);
}
- javascript builtin DataViewPrototypeSetUint16(
+ javascript builtin DataViewPrototypeSetInt16(
context: Context, receiver: Object, ...arguments): Object {
let offset: Object = arguments.length > 0 ?
arguments[0] :
@@ -800,10 +836,10 @@ module data_view {
arguments[2] :
Undefined;
return DataViewSet(context, receiver, offset, value,
- is_little_endian, UINT16_ELEMENTS);
+ is_little_endian, INT16_ELEMENTS);
}
- javascript builtin DataViewPrototypeSetInt32(
+ javascript builtin DataViewPrototypeSetUint32(
context: Context, receiver: Object, ...arguments): Object {
let offset: Object = arguments.length > 0 ?
arguments[0] :
@@ -815,10 +851,10 @@ module data_view {
arguments[2] :
Undefined;
return DataViewSet(context, receiver, offset, value,
- is_little_endian, INT32_ELEMENTS);
+ is_little_endian, UINT32_ELEMENTS);
}
- javascript builtin DataViewPrototypeSetUint32(
+ javascript builtin DataViewPrototypeSetInt32(
context: Context, receiver: Object, ...arguments): Object {
let offset: Object = arguments.length > 0 ?
arguments[0] :
@@ -830,7 +866,7 @@ module data_view {
arguments[2] :
Undefined;
return DataViewSet(context, receiver, offset, value,
- is_little_endian, UINT32_ELEMENTS);
+ is_little_endian, INT32_ELEMENTS);
}
javascript builtin DataViewPrototypeSetFloat32(
@@ -863,7 +899,7 @@ module data_view {
is_little_endian, FLOAT64_ELEMENTS);
}
- javascript builtin DataViewPrototypeSetBigInt64(
+ javascript builtin DataViewPrototypeSetBigUint64(
context: Context, receiver: Object, ...arguments): Object {
let offset: Object = arguments.length > 0 ?
arguments[0] :
@@ -875,10 +911,10 @@ module data_view {
arguments[2] :
Undefined;
return DataViewSet(context, receiver, offset, value,
- is_little_endian, BIGINT64_ELEMENTS);
+ is_little_endian, BIGUINT64_ELEMENTS);
}
- javascript builtin DataViewPrototypeSetBigUint64(
+ javascript builtin DataViewPrototypeSetBigInt64(
context: Context, receiver: Object, ...arguments): Object {
let offset: Object = arguments.length > 0 ?
arguments[0] :
@@ -890,7 +926,7 @@ module data_view {
arguments[2] :
Undefined;
return DataViewSet(context, receiver, offset, value,
- is_little_endian, BIGUINT64_ELEMENTS);
+ is_little_endian, BIGINT64_ELEMENTS);
}
}
diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.cc b/deps/v8/src/builtins/growable-fixed-array-gen.cc
index eae9ff5594..3a155e26f9 100644
--- a/deps/v8/src/builtins/growable-fixed-array-gen.cc
+++ b/deps/v8/src/builtins/growable-fixed-array-gen.cc
@@ -90,8 +90,8 @@ TNode<FixedArray> GrowableFixedArray::ResizeFixedArray(
CodeStubAssembler::ExtractFixedArrayFlags flags;
flags |= CodeStubAssembler::ExtractFixedArrayFlag::kFixedArrays;
- TNode<FixedArray> to_array = ExtractFixedArray(
- from_array, nullptr, element_count, new_capacity, flags);
+ TNode<FixedArray> to_array = CAST(ExtractFixedArray(
+ from_array, nullptr, element_count, new_capacity, flags));
return to_array;
}
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index d1c0a5d5fb..4707667bbf 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -11,7 +11,9 @@
#include "src/frame-constants.h"
#include "src/frames.h"
#include "src/objects-inl.h"
+#include "src/objects/js-generator.h"
#include "src/wasm/wasm-linkage.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -327,7 +329,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// limit" is checked.
ExternalReference real_stack_limit =
ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(scratch1, Operand::StaticVariable(real_stack_limit));
+ __ mov(scratch1, __ StaticVariable(real_stack_limit));
// Make scratch2 the space we have left. The stack might already be overflowed
// here which will cause scratch2 to become negative.
__ mov(scratch2, esp);
@@ -354,7 +356,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Setup the context (we need to use the caller context from the isolate).
ExternalReference context_address = ExternalReference::Create(
IsolateAddressId::kContextAddress, masm->isolate());
- __ mov(esi, Operand::StaticVariable(context_address));
+ __ mov(esi, __ StaticVariable(context_address));
// Load the previous frame pointer (ebx) to access C arguments
__ mov(ebx, Operand(ebp, 0));
@@ -456,13 +458,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
Label stepping_prepared;
ExternalReference debug_hook =
ExternalReference::debug_hook_on_function_call_address(masm->isolate());
- __ cmpb(Operand::StaticVariable(debug_hook), Immediate(0));
+ __ cmpb(__ StaticVariable(debug_hook), Immediate(0));
__ j(not_equal, &prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
ExternalReference::debug_suspended_generator_address(masm->isolate());
- __ cmp(edx, Operand::StaticVariable(debug_suspended_generator));
+ __ cmp(edx, __ StaticVariable(debug_suspended_generator));
__ j(equal, &prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
@@ -755,7 +757,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
#define JUMP_IF_EQUAL(NAME) \
__ cmpb(bytecode, \
Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
- __ j(equal, if_return, Label::kNear);
+ __ j(equal, if_return);
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
@@ -843,7 +845,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ sub(ecx, ebx);
ExternalReference stack_limit =
ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ cmp(ecx, Operand::StaticVariable(stack_limit));
+ __ cmp(ecx, __ StaticVariable(stack_limit));
__ j(above_equal, &ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
@@ -1113,6 +1115,9 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Tail call to the array construct stub (still in the caller
// context at this point).
__ AssertFunction(edi);
+ // TODO(v8:6666): When rewriting ia32 ASM builtins to not clobber the
+ // kRootRegister ebx, this useless move can be removed.
+ __ Move(kJavaScriptCallExtraArg1Register, ebx);
Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
__ Jump(code, RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
@@ -1688,7 +1693,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label done;
ExternalReference real_stack_limit =
ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edx, Operand::StaticVariable(real_stack_limit));
+ __ mov(edx, __ StaticVariable(real_stack_limit));
// Make edx the space we have left. The stack might already be overflowed
// here which will cause edx to become negative.
__ neg(edx);
@@ -2383,7 +2388,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Convert to Smi for the runtime call.
__ SmiTag(edi);
{
- TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
@@ -2438,7 +2443,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
// eax: number of arguments including receiver
- // ebx: pointer to C function (C callee-saved)
+ // edx: pointer to C function
// ebp: frame pointer (restored after C call)
// esp: stack pointer (restored after C call)
// esi: current context (C callee-saved)
@@ -2447,6 +2452,16 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If argv_mode == kArgvInRegister:
// ecx: pointer to the first argument
+ STATIC_ASSERT(eax == kRuntimeCallArgCountRegister);
+ STATIC_ASSERT(ecx == kRuntimeCallArgvRegister);
+ STATIC_ASSERT(edx == kRuntimeCallFunctionRegister);
+ STATIC_ASSERT(esi == kContextRegister);
+ STATIC_ASSERT(edi == kJSFunctionRegister);
+
+ DCHECK(!AreAliased(kRuntimeCallArgCountRegister, kRuntimeCallArgvRegister,
+ kRuntimeCallFunctionRegister, kContextRegister,
+ kJSFunctionRegister, kRootRegister));
+
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Reserve space on the stack for the three arguments passed to the call. If
@@ -2470,7 +2485,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
}
- // ebx: pointer to C function (C callee-saved)
+ // edx: pointer to C function
// ebp: frame pointer (restored after C call)
// esp: stack pointer (restored after C call)
// edi: number of arguments including receiver (C callee-saved)
@@ -2487,7 +2502,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
__ mov(Operand(esp, 2 * kPointerSize),
Immediate(ExternalReference::isolate_address(masm->isolate())));
- __ call(ebx);
+ __ call(kRuntimeCallFunctionRegister);
// Result is in eax or edx:eax - do not destroy these registers!
@@ -2504,7 +2519,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
Label okay;
ExternalReference pending_exception_address = ExternalReference::Create(
IsolateAddressId::kPendingExceptionAddress, masm->isolate());
- __ cmp(edx, Operand::StaticVariable(pending_exception_address));
+ __ cmp(edx, __ StaticVariable(pending_exception_address));
// Cannot use check here as it attempts to generate call into runtime.
__ j(equal, &okay, Label::kNear);
__ int3();
@@ -2544,9 +2559,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
}
// Retrieve the handler context, SP and FP.
- __ mov(esi, Operand::StaticVariable(pending_handler_context_address));
- __ mov(esp, Operand::StaticVariable(pending_handler_sp_address));
- __ mov(ebp, Operand::StaticVariable(pending_handler_fp_address));
+ __ mov(esi, __ StaticVariable(pending_handler_context_address));
+ __ mov(esp, __ StaticVariable(pending_handler_sp_address));
+ __ mov(ebp, __ StaticVariable(pending_handler_fp_address));
// If the handler is a JS frame, restore the context to the frame. Note that
// the context will be set to (esi == 0) for non-JS frames.
@@ -2563,7 +2578,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ ResetSpeculationPoisonRegister();
// Compute the handler entry address and jump to it.
- __ mov(edi, Operand::StaticVariable(pending_handler_entrypoint_address));
+ __ mov(edi, __ StaticVariable(pending_handler_entrypoint_address));
__ jmp(edi);
}
@@ -2828,6 +2843,9 @@ void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
RelocInfo::CODE_TARGET);
__ bind(&not_one_case);
+ // TODO(v8:6666): When rewriting ia32 ASM builtins to not clobber the
+ // kRootRegister ebx, this useless move can be removed.
+ __ Move(kJavaScriptCallExtraArg1Register, ebx);
Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor);
__ Jump(code, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/builtins/mips/OWNERS b/deps/v8/src/builtins/mips/OWNERS
index cf2df277c9..c653ce404d 100644
--- a/deps/v8/src/builtins/mips/OWNERS
+++ b/deps/v8/src/builtins/mips/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 12c1d60757..0c892c960f 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -13,7 +13,9 @@
#include "src/frames.h"
#include "src/mips/constants-mips.h"
#include "src/objects-inl.h"
+#include "src/objects/js-generator.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -2271,7 +2273,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Convert to Smi for the runtime call.
__ SmiTag(t0);
{
- TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
@@ -2364,10 +2366,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
if (kArchVariant >= kMips32r6) {
__ addiupc(ra, kNumInstructionsToJump + 1);
} else {
- // This branch-and-link sequence is needed to find the current PC on mips
- // before r6, saved to the ra register.
- __ bal(&find_ra); // bal exposes branch delay slot.
- __ Addu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize);
+ // This no-op-and-link sequence saves PC + 8 in ra register on pre-r6 MIPS
+ __ nal(); // nal has branch delay slot.
+ __ Addu(ra, ra, kNumInstructionsToJump * kInstrSize);
}
__ bind(&find_ra);
diff --git a/deps/v8/src/builtins/mips64/OWNERS b/deps/v8/src/builtins/mips64/OWNERS
index 4ce9d7f91d..8bbcab4c2d 100644
--- a/deps/v8/src/builtins/mips64/OWNERS
+++ b/deps/v8/src/builtins/mips64/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com \ No newline at end of file
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com \ No newline at end of file
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index a93c75b2fc..d59f7c0ce5 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -13,7 +13,9 @@
#include "src/frames.h"
#include "src/mips64/constants-mips64.h"
#include "src/objects-inl.h"
+#include "src/objects/js-generator.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -2288,7 +2290,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Convert to Smi for the runtime call
__ SmiTag(t0);
{
- TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
@@ -2382,10 +2384,9 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
if (kArchVariant >= kMips64r6) {
__ addiupc(ra, kNumInstructionsToJump + 1);
} else {
- // This branch-and-link sequence is needed to find the current PC on mips
- // before r6, saved to the ra register.
- __ bal(&find_ra); // bal exposes branch delay slot.
- __ Daddu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize);
+ // This no-op-and-link sequence saves PC + 8 in ra register on pre-r6 MIPS
+ __ nal(); // nal has branch delay slot.
+ __ Daddu(ra, ra, kNumInstructionsToJump * kInstrSize);
}
__ bind(&find_ra);
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 3ba3e99789..01a0e4e371 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -11,7 +11,9 @@
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+#include "src/objects/js-generator.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -2299,7 +2301,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Convert to Smi for the runtime call.
__ SmiTag(r15, r15);
{
- TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
@@ -2415,14 +2417,14 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// we can store the address on the stack to be able to find it again and
// we never have to restore it, because it will not change.
Label start_call;
- constexpr int after_call_offset = 5 * Assembler::kInstrSize;
+ constexpr int after_call_offset = 5 * kInstrSize;
DCHECK_NE(r7, target);
__ LoadPC(r7);
__ bind(&start_call);
__ addi(r7, r7, Operand(after_call_offset));
__ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
__ Call(target);
- DCHECK_EQ(after_call_offset - Assembler::kInstrSize,
+ DCHECK_EQ(after_call_offset - kInstrSize,
__ SizeOfCodeGeneratedSince(&start_call));
// If return value is on the stack, pop it to registers.
@@ -2530,7 +2532,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done, fastpath_done;
Register result_reg = r3;
- TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
// Immediate values for this stub fit in instructions, so it's safe to use ip.
Register scratch = GetRegisterThatIsNotOneOf(result_reg);
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index c7e955f6e4..b92011c38b 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -11,7 +11,9 @@
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/frames.h"
+#include "src/objects/js-generator.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -2304,7 +2306,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Convert to Smi for the runtime call.
__ SmiTag(r7, r7);
{
- TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
@@ -2524,7 +2526,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done, fastpath_done;
Register result_reg = r2;
- TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
// Immediate values for this stub fit in instructions, so it's safe to use ip.
Register scratch = GetRegisterThatIsNotOneOf(result_reg);
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index ab0180e825..93a2b8b5f3 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -10,6 +10,8 @@
#include "src/compiler/code-assembler.h"
#include "src/handles-inl.h"
#include "src/interface-descriptors.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter-generator.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects/shared-function-info.h"
@@ -38,13 +40,22 @@ void PostBuildProfileAndTracing(Isolate* isolate, Code* code,
AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate,
int32_t builtin_index) {
AssemblerOptions options = AssemblerOptions::Default(isolate);
- if (isolate->ShouldLoadConstantsFromRootList() &&
- Builtins::IsIsolateIndependent(builtin_index) &&
- isolate->heap()->memory_allocator()->code_range()->valid() &&
- isolate->heap()->memory_allocator()->code_range()->size() <=
- kMaxPCRelativeCodeRangeInMB * MB) {
- options.use_pc_relative_calls_and_jumps = true;
+ CHECK(!options.isolate_independent_code);
+ CHECK(!options.use_pc_relative_calls_and_jumps);
+
+ if (!isolate->ShouldLoadConstantsFromRootList() ||
+ !Builtins::IsIsolateIndependent(builtin_index)) {
+ return options;
}
+
+ CodeRange* code_range = isolate->heap()->memory_allocator()->code_range();
+ bool pc_relative_calls_fit_in_code_range =
+ code_range->valid() &&
+ code_range->size() <= kMaxPCRelativeCodeRangeInMB * MB;
+
+ options.isolate_independent_code = true;
+ options.use_pc_relative_calls_and_jumps = pc_relative_calls_fit_in_code_range;
+
return options;
}
@@ -235,6 +246,27 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
}
}
+#ifdef V8_EMBEDDED_BYTECODE_HANDLERS
+namespace {
+Code* GenerateBytecodeHandler(Isolate* isolate, int builtin_index,
+ const char* name, interpreter::Bytecode bytecode,
+ interpreter::OperandScale operand_scale) {
+ if (!interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
+ // TODO(v8:8068): Consider returning something else to avoid placeholders
+ // being serialized with the snapshot.
+ return nullptr;
+ }
+
+ Handle<Code> code = interpreter::GenerateBytecodeHandler(
+ isolate, bytecode, operand_scale, builtin_index);
+
+ PostBuildProfileAndTracing(isolate, *code, name);
+
+ return *code;
+}
+} // namespace
+#endif
+
// static
void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
Builtins* builtins = isolate->builtins();
@@ -276,13 +308,28 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
isolate, index, &Builtins::Generate_##Name, \
CallDescriptors::InterfaceDescriptor, #Name, 1); \
AddBuiltin(builtins, index++, code);
+
+#define BUILD_BCH_WITH_SCALE(Code, Scale) \
+ code = GenerateBytecodeHandler(isolate, index, Builtins::name(index), \
+ interpreter::Bytecode::k##Code, \
+ interpreter::OperandScale::k##Scale); \
+ if (code) { \
+ AddBuiltin(builtins, index, code); \
+ } \
+ ++index;
+
+#define BUILD_BCH(Code, ...) \
+ BUILD_BCH_WITH_SCALE(Code, Single) \
+ BUILD_BCH_WITH_SCALE(Code, Double) \
+ BUILD_BCH_WITH_SCALE(Code, Quadruple)
+
#define BUILD_ASM(Name) \
code = BuildWithMacroAssembler(isolate, index, Builtins::Generate_##Name, \
#Name); \
AddBuiltin(builtins, index++, code);
BUILTIN_LIST(BUILD_CPP, BUILD_API, BUILD_TFJ, BUILD_TFC, BUILD_TFS, BUILD_TFH,
- BUILD_ASM);
+ BUILD_BCH, BUILD_ASM);
#undef BUILD_CPP
#undef BUILD_API
@@ -290,6 +337,8 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
#undef BUILD_TFC
#undef BUILD_TFS
#undef BUILD_TFH
+#undef BUILD_BCH
+#undef BUILD_BCH_WITH_SCALE
#undef BUILD_ASM
CHECK_EQ(Builtins::builtin_count, index);
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index 2d2a086de5..7552b094e7 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -10,7 +10,7 @@ module typed_array {
extern macro LoadFixedTypedArrayElementAsTagged(
RawPtr, Smi, constexpr ElementsKind, constexpr ParameterMode): Object;
extern macro StoreFixedTypedArrayElementFromTagged(
- Context, FixedArrayBase, Smi, Object, constexpr ElementsKind,
+ Context, FixedTypedArrayBase, Smi, Object, constexpr ElementsKind,
constexpr ParameterMode);
type LoadFn = builtin(Context, JSTypedArray, Smi) => Object;
@@ -60,7 +60,7 @@ module typed_array {
builtin StoreFixedElement<T : type>(
context: Context, array: JSTypedArray, index: Smi,
value: Object): Object {
- let elements: FixedTypedArrayBase =
+ const elements: FixedTypedArrayBase =
unsafe_cast<FixedTypedArrayBase>(array.elements);
StoreFixedTypedArrayElementFromTagged(
context, elements, index, value, KindForArrayType<T>(), SMI_PARAMETERS);
@@ -71,7 +71,7 @@ module typed_array {
context: Context, array: JSTypedArray, comparefn: Callable, a: Object,
b: Object): Number labels Detached {
// a. Let v be ? ToNumber(? Call(comparefn, undefined, x, y)).
- let v: Number =
+ const v: Number =
ToNumber_Inline(context, Call(context, comparefn, Undefined, a, b));
// b. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
@@ -95,11 +95,11 @@ module typed_array {
if (IsDetachedBuffer(array.buffer)) goto Detached;
for (let i: Smi = from + 1; i < to; ++i) {
- let element: Object = Load(context, array, i);
+ const element: Object = Load(context, array, i);
let j: Smi = i - 1;
for (; j >= from; --j) {
- let tmp: Object = Load(context, array, j);
- let order: Number = CallCompareWithDetachedCheck(
+ const tmp: Object = Load(context, array, j);
+ const order: Number = CallCompareWithDetachedCheck(
context, array, comparefn, tmp, element) otherwise Detached;
if (order > 0) {
Store(context, array, j + 1, tmp);
@@ -131,7 +131,7 @@ module typed_array {
// TODO(szuend): Check if a more involved third_index calculation is
// worth it for very large arrays.
- let third_index: Smi = from + ((to - from) >>> 1);
+ const third_index: Smi = from + ((to - from) >>> 1);
if (IsDetachedBuffer(array.buffer)) goto Detached;
@@ -140,7 +140,7 @@ module typed_array {
let v1: Object = Load(context, array, to - 1);
let v2: Object = Load(context, array, third_index);
- let c01: Number = CallCompareWithDetachedCheck(
+ const c01: Number = CallCompareWithDetachedCheck(
context, array, comparefn, v0, v1) otherwise Detached;
if (c01 > 0) {
// v1 < v0, so swap them.
@@ -149,21 +149,21 @@ module typed_array {
v1 = tmp;
}
// v0 <= v1.
- let c02: Number = CallCompareWithDetachedCheck(
+ const c02: Number = CallCompareWithDetachedCheck(
context, array, comparefn, v0, v2) otherwise Detached;
if (c02 >= 0) {
// v2 <= v0 <= v1.
- let tmp: Object = v0;
+ const tmp: Object = v0;
v0 = v2;
v2 = v1;
v1 = tmp;
} else {
// v0 <= v1 && v0 < v2.
- let c12: Number = CallCompareWithDetachedCheck(
+ const c12: Number = CallCompareWithDetachedCheck(
context, array, comparefn, v1, v2) otherwise Detached;
if (c12 > 0) {
// v0 <= v2 < v1.
- let tmp: Object = v1;
+ const tmp: Object = v1;
v1 = v2;
v2 = tmp;
}
@@ -173,7 +173,7 @@ module typed_array {
Store(context, array, from, v0);
Store(context, array, to - 1, v2);
- let pivot: Object = v1;
+ const pivot: Object = v1;
let low_end: Smi = from + 1; // Upper bound of elems lower than pivot.
let high_start: Smi = to - 1; // Lower bound of elems greater than pivot.
@@ -203,7 +203,7 @@ module typed_array {
break;
}
- let top_elem: Object = Load(context, array, high_start);
+ const top_elem: Object = Load(context, array, high_start);
order = CallCompareWithDetachedCheck(
context, array, comparefn, top_elem, pivot) otherwise Detached;
}
@@ -212,7 +212,7 @@ module typed_array {
break;
}
- let high_start_value: Object = Load(context, array, high_start);
+ const high_start_value: Object = Load(context, array, high_start);
Store(context, array, idx, high_start_value);
Store(context, array, high_start, element);
@@ -258,17 +258,18 @@ module typed_array {
context: Context, receiver: Object, ...arguments): JSTypedArray {
// 1. If comparefn is not undefined and IsCallable(comparefn) is false,
// throw a TypeError exception.
- let comparefn_obj: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const comparefn_obj: Object =
+ arguments.length > 0 ? arguments[0] : Undefined;
if (comparefn_obj != Undefined && !TaggedIsCallable(comparefn_obj)) {
ThrowTypeError(context, kBadSortComparisonFunction, comparefn_obj);
}
// 2. Let obj be the this value.
- let obj: Object = receiver;
+ const obj: Object = receiver;
// 3. Let buffer be ? ValidateTypedArray(obj).
// ValidateTypedArray currently returns the array, not the ViewBuffer.
- let array: JSTypedArray =
+ const array: JSTypedArray =
ValidateTypedArray(context, obj, '%TypedArray%.prototype.sort');
// Default sorting is done in C++ using std::sort
@@ -277,10 +278,10 @@ module typed_array {
}
// 4. Let len be obj.[[ArrayLength]].
- let len: Smi = array.length;
+ const len: Smi = array.length;
try {
- let comparefn: Callable =
+ const comparefn: Callable =
cast<Callable>(comparefn_obj) otherwise CastError;
let loadfn: LoadFn;
let storefn: StoreFn;
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 438b577af6..5c2094105c 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -12,7 +12,9 @@
#include "src/frames.h"
#include "src/objects-inl.h"
#include "src/objects/debug-objects.h"
+#include "src/objects/js-generator.h"
#include "src/wasm/wasm-linkage.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -2338,7 +2340,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Convert to Smi for the runtime call.
__ SmiTag(r11, r11);
{
- TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index 3ff37695e2..cba6136a38 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -6,7 +6,6 @@
#define V8_CODE_FACTORY_H_
#include "src/allocation.h"
-#include "src/assembler.h"
#include "src/callable.h"
#include "src/code-stubs.h"
#include "src/globals.h"
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index d39d841fbd..2527e89a25 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -10,6 +10,7 @@
#include "src/objects/api-callbacks.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/ordered-hash-table-inl.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -159,6 +160,14 @@ void CodeStubAssembler::Check(const NodeGenerator& condition_body,
extra_node4_name, extra_node5, extra_node5_name);
}
+void CodeStubAssembler::FastCheck(TNode<BoolT> condition) {
+ Label ok(this);
+ GotoIf(condition, &ok);
+ DebugBreak();
+ Goto(&ok);
+ BIND(&ok);
+}
+
Node* CodeStubAssembler::SelectImpl(TNode<BoolT> condition,
const NodeGenerator& true_body,
const NodeGenerator& false_body,
@@ -1056,12 +1065,10 @@ TNode<BoolT> CodeStubAssembler::IsFastJSArray(SloppyTNode<Object> object,
}
TNode<BoolT> CodeStubAssembler::IsFastJSArrayWithNoCustomIteration(
- TNode<Object> object, TNode<Context> context,
- TNode<Context> native_context) {
+ TNode<Object> object, TNode<Context> context) {
Label if_false(this, Label::kDeferred), if_fast(this), exit(this);
TVARIABLE(BoolT, var_result);
- GotoIfForceSlowPath(&if_false);
- BranchIfFastJSArray(object, context, &if_fast, &if_false);
+ BranchIfFastJSArray(object, context, &if_fast, &if_false, true);
BIND(&if_fast);
{
// Check that the Array.prototype hasn't been modified in a way that would
@@ -1083,7 +1090,8 @@ TNode<BoolT> CodeStubAssembler::IsFastJSArrayWithNoCustomIteration(
}
void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
- Label* if_true, Label* if_false) {
+ Label* if_true, Label* if_false,
+ bool iteration_only) {
GotoIfForceSlowPath(if_false);
// Bailout if receiver is a Smi.
@@ -1099,6 +1107,11 @@ void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
// Verify that our prototype is the initial array prototype.
GotoIfNot(IsPrototypeInitialArrayPrototype(context, map), if_false);
+ if (iteration_only) {
+ // If we are only iterating over the array, there is no need to check
+ // the NoElements protector if the array is not holey.
+ GotoIfNot(IsHoleyFastElementsKind(elements_kind), if_true);
+ }
Branch(IsNoElementsProtectorCellInvalid(), if_false, if_true);
}
@@ -1514,7 +1527,7 @@ TNode<BoolT> CodeStubAssembler::TaggedDoesntHaveInstanceType(
TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
SloppyTNode<JSObject> object) {
- CSA_SLOW_ASSERT(this, Word32Not(IsDictionaryMap(LoadMap(object))));
+ CSA_SLOW_ASSERT(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object))));
TNode<Object> properties =
LoadObjectField(object, JSObject::kPropertiesOrHashOffset);
return Select<HeapObject>(TaggedIsSmi(properties),
@@ -1750,8 +1763,8 @@ TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
BIND(&if_property_dictionary);
{
- var_hash = SmiUntag(CAST(
- LoadFixedArrayElement(properties, NameDictionary::kObjectHashIndex)));
+ var_hash = SmiUntag(CAST(LoadFixedArrayElement(
+ CAST(properties), NameDictionary::kObjectHashIndex)));
Goto(&done);
}
@@ -1806,23 +1819,6 @@ Node* CodeStubAssembler::LoadJSValueValue(Node* object) {
return LoadObjectField(object, JSValue::kValueOffset);
}
-TNode<Object> CodeStubAssembler::LoadWeakCellValueUnchecked(
- SloppyTNode<HeapObject> weak_cell) {
- CSA_ASSERT(this, IsStrongHeapObject(weak_cell));
- // TODO(ishell): fix callers.
- return LoadObjectField(weak_cell, WeakCell::kValueOffset);
-}
-
-TNode<Object> CodeStubAssembler::LoadWeakCellValue(
- SloppyTNode<WeakCell> weak_cell, Label* if_cleared) {
- CSA_ASSERT(this, IsWeakCell(weak_cell));
- TNode<Object> value = LoadWeakCellValueUnchecked(weak_cell);
- if (if_cleared != nullptr) {
- GotoIf(WordEqual(value, IntPtrConstant(0)), if_cleared);
- }
- return value;
-}
-
void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object,
Label* if_smi, Label* if_cleared,
Label* if_weak, Label* if_strong,
@@ -1962,15 +1958,42 @@ TNode<MaybeObject> CodeStubAssembler::LoadArrayElement(
Load(MachineType::AnyTagged(), array, offset, needs_poisoning));
}
+void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array,
+ Node* index,
+ int additional_offset,
+ ParameterMode parameter_mode) {
+ if (!FLAG_fixed_array_bounds_checks) return;
+ DCHECK_EQ(0, additional_offset % kPointerSize);
+ if (parameter_mode == ParameterMode::SMI_PARAMETERS) {
+ TNode<Smi> effective_index;
+ Smi* constant_index;
+ bool index_is_constant = ToSmiConstant(index, constant_index);
+ if (index_is_constant) {
+ effective_index = SmiConstant(Smi::ToInt(constant_index) +
+ additional_offset / kPointerSize);
+ } else if (additional_offset != 0) {
+ effective_index =
+ SmiAdd(CAST(index), SmiConstant(additional_offset / kPointerSize));
+ } else {
+ effective_index = CAST(index);
+ }
+ CSA_CHECK(this, SmiBelow(effective_index, LoadFixedArrayBaseLength(array)));
+ } else {
+ // IntPtrAdd does constant-folding automatically.
+ TNode<IntPtrT> effective_index =
+ IntPtrAdd(UncheckedCast<IntPtrT>(index),
+ IntPtrConstant(additional_offset / kPointerSize));
+ CSA_CHECK(this, UintPtrLessThan(effective_index,
+ LoadAndUntagFixedArrayBaseLength(array)));
+ }
+}
+
TNode<Object> CodeStubAssembler::LoadFixedArrayElement(
- SloppyTNode<HeapObject> object, Node* index_node, int additional_offset,
+ TNode<FixedArray> object, Node* index_node, int additional_offset,
ParameterMode parameter_mode, LoadSensitivity needs_poisoning) {
- // This function is currently used for non-FixedArrays (e.g., PropertyArrays)
- // and thus the reasonable assert IsFixedArraySubclass(object) is
- // not always true. TODO(marja): Fix.
- CSA_SLOW_ASSERT(
- this, Word32Or(IsFixedArraySubclass(object), IsPropertyArray(object)));
+ CSA_ASSERT(this, IsFixedArraySubclass(object));
CSA_ASSERT(this, IsNotWeakFixedArraySubclass(object));
+ FixedArrayBoundsCheck(object, index_node, additional_offset, parameter_mode);
TNode<MaybeObject> element =
LoadArrayElement(object, FixedArray::kHeaderSize, index_node,
additional_offset, parameter_mode, needs_poisoning);
@@ -1989,6 +2012,13 @@ TNode<Object> CodeStubAssembler::LoadPropertyArrayElement(
needs_poisoning));
}
+TNode<IntPtrT> CodeStubAssembler::LoadPropertyArrayLength(
+ TNode<PropertyArray> object) {
+ TNode<IntPtrT> value =
+ LoadAndUntagObjectField(object, PropertyArray::kLengthAndHashOffset);
+ return Signed(DecodeWord<PropertyArray::LengthField>(value));
+}
+
TNode<RawPtrT> CodeStubAssembler::LoadFixedTypedArrayBackingStore(
TNode<FixedTypedArrayBase> typed_array) {
// Backing store = external_pointer + base_pointer.
@@ -2198,6 +2228,47 @@ Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
}
}
+TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
+ TNode<WordT> data_pointer, TNode<Smi> index, TNode<Int32T> elements_kind) {
+ TVARIABLE(Numeric, var_result);
+ Label done(this), if_unknown_type(this, Label::kDeferred);
+ int32_t elements_kinds[] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) TYPE##_ELEMENTS,
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ };
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) Label if_##type##array(this);
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ Label* elements_kind_labels[] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) &if_##type##array,
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ };
+ STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels));
+
+ Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels,
+ arraysize(elements_kinds));
+
+ BIND(&if_unknown_type);
+ Unreachable();
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ BIND(&if_##type##array); \
+ { \
+ var_result = CAST(LoadFixedTypedArrayElementAsTagged( \
+ data_pointer, index, TYPE##_ELEMENTS, SMI_PARAMETERS)); \
+ Goto(&done); \
+ }
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ BIND(&done);
+ return var_result.value();
+}
+
void CodeStubAssembler::StoreFixedTypedArrayElementFromTagged(
TNode<Context> context, TNode<FixedTypedArrayBase> elements,
TNode<Object> index_node, TNode<Object> value, ElementsKind elements_kind,
@@ -2316,6 +2387,70 @@ TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement(
return LoadDoubleWithHoleCheck(object, offset, if_hole, machine_type);
}
+TNode<Object> CodeStubAssembler::LoadFixedArrayBaseElementAsTagged(
+ TNode<FixedArrayBase> elements, TNode<IntPtrT> index,
+ TNode<Int32T> elements_kind, Label* if_accessor, Label* if_hole) {
+ TVARIABLE(Object, var_result);
+ Label done(this), if_packed(this), if_holey(this), if_packed_double(this),
+ if_holey_double(this), if_dictionary(this, Label::kDeferred);
+
+ int32_t kinds[] = {// Handled by if_packed.
+ PACKED_SMI_ELEMENTS, PACKED_ELEMENTS,
+ // Handled by if_holey.
+ HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS,
+ // Handled by if_packed_double.
+ PACKED_DOUBLE_ELEMENTS,
+ // Handled by if_holey_double.
+ HOLEY_DOUBLE_ELEMENTS};
+ Label* labels[] = {// PACKED_{SMI,}_ELEMENTS
+ &if_packed, &if_packed,
+ // HOLEY_{SMI,}_ELEMENTS
+ &if_holey, &if_holey,
+ // PACKED_DOUBLE_ELEMENTS
+ &if_packed_double,
+ // HOLEY_DOUBLE_ELEMENTS
+ &if_holey_double};
+ Switch(elements_kind, &if_dictionary, kinds, labels, arraysize(kinds));
+
+ BIND(&if_packed);
+ {
+ var_result = LoadFixedArrayElement(CAST(elements), index, 0);
+ Goto(&done);
+ }
+
+ BIND(&if_holey);
+ {
+ var_result = LoadFixedArrayElement(CAST(elements), index);
+ Branch(WordEqual(var_result.value(), TheHoleConstant()), if_hole, &done);
+ }
+
+ BIND(&if_packed_double);
+ {
+ var_result = AllocateHeapNumberWithValue(LoadFixedDoubleArrayElement(
+ CAST(elements), index, MachineType::Float64()));
+ Goto(&done);
+ }
+
+ BIND(&if_holey_double);
+ {
+ var_result = AllocateHeapNumberWithValue(LoadFixedDoubleArrayElement(
+ CAST(elements), index, MachineType::Float64(), 0, INTPTR_PARAMETERS,
+ if_hole));
+ Goto(&done);
+ }
+
+ BIND(&if_dictionary);
+ {
+ CSA_ASSERT(this, IsDictionaryElementsKind(elements_kind));
+ var_result = BasicLoadNumberDictionaryElement(CAST(elements), index,
+ if_accessor, if_hole);
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return var_result.value();
+}
+
TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
SloppyTNode<Object> base, SloppyTNode<IntPtrT> offset, Label* if_hole,
MachineType machine_type) {
@@ -2428,6 +2563,49 @@ TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
LoadContextElement(native_context, Context::ArrayMapIndex(kind)));
}
+TNode<BoolT> CodeStubAssembler::IsGeneratorFunction(
+ TNode<JSFunction> function) {
+ TNode<SharedFunctionInfo> const shared_function_info =
+ CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
+
+ TNode<Uint32T> const function_kind =
+ DecodeWord32<SharedFunctionInfo::FunctionKindBits>(LoadObjectField(
+ shared_function_info, SharedFunctionInfo::kFlagsOffset,
+ MachineType::Uint32()));
+
+ return TNode<BoolT>::UncheckedCast(Word32Or(
+ Word32Or(
+ Word32Or(
+ Word32Equal(function_kind,
+ Int32Constant(FunctionKind::kAsyncGeneratorFunction)),
+ Word32Equal(
+ function_kind,
+ Int32Constant(FunctionKind::kAsyncConciseGeneratorMethod))),
+ Word32Equal(function_kind,
+ Int32Constant(FunctionKind::kGeneratorFunction))),
+ Word32Equal(function_kind,
+ Int32Constant(FunctionKind::kConciseGeneratorMethod))));
+}
+
+TNode<BoolT> CodeStubAssembler::HasPrototypeProperty(TNode<JSFunction> function,
+ TNode<Map> map) {
+ // (has_prototype_slot() && IsConstructor()) ||
+ // IsGeneratorFunction(shared()->kind())
+ uint32_t mask =
+ Map::HasPrototypeSlotBit::kMask | Map::IsConstructorBit::kMask;
+ return TNode<BoolT>::UncheckedCast(
+ Word32Or(IsAllSetWord32(LoadMapBitField(map), mask),
+ IsGeneratorFunction(function)));
+}
+
+void CodeStubAssembler::GotoIfPrototypeRequiresRuntimeLookup(
+ TNode<JSFunction> function, TNode<Map> map, Label* runtime) {
+ // !has_prototype_property() || has_non_instance_prototype()
+ GotoIfNot(HasPrototypeProperty(function, map), runtime);
+ GotoIf(IsSetWord32<Map::HasNonInstancePrototypeBit>(LoadMapBitField(map)),
+ runtime);
+}
+
Node* CodeStubAssembler::LoadJSFunctionPrototype(Node* function,
Label* if_bailout) {
CSA_ASSERT(this, TaggedIsNotSmi(function));
@@ -2558,11 +2736,9 @@ Node* CodeStubAssembler::StoreElements(TNode<Object> object,
return StoreObjectField(object, JSObject::kElementsOffset, elements);
}
-Node* CodeStubAssembler::StoreFixedArrayElement(Node* object, Node* index_node,
- Node* value,
- WriteBarrierMode barrier_mode,
- int additional_offset,
- ParameterMode parameter_mode) {
+void CodeStubAssembler::StoreFixedArrayOrPropertyArrayElement(
+ Node* object, Node* index_node, Node* value, WriteBarrierMode barrier_mode,
+ int additional_offset, ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(
this, Word32Or(IsFixedArraySubclass(object), IsPropertyArray(object)));
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
@@ -2597,25 +2773,23 @@ Node* CodeStubAssembler::StoreFixedArrayElement(Node* object, Node* index_node,
}),
FixedArray::kHeaderSize));
if (barrier_mode == SKIP_WRITE_BARRIER) {
- return StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
- value);
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset, value);
} else {
- return Store(object, offset, value);
+ Store(object, offset, value);
}
}
-Node* CodeStubAssembler::StoreFixedDoubleArrayElement(
- Node* object, Node* index_node, Node* value, ParameterMode parameter_mode) {
+void CodeStubAssembler::StoreFixedDoubleArrayElement(
+ TNode<FixedDoubleArray> object, Node* index_node, TNode<Float64T> value,
+ ParameterMode parameter_mode) {
CSA_ASSERT(this, IsFixedDoubleArray(object));
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
+ FixedArrayBoundsCheck(object, index_node, 0, parameter_mode);
Node* offset =
ElementOffsetFromIndex(index_node, PACKED_DOUBLE_ELEMENTS, parameter_mode,
FixedArray::kHeaderSize - kHeapObjectTag);
- CSA_ASSERT(this, IsOffsetInBounds(
- offset, LoadAndUntagFixedArrayBaseLength(object),
- FixedDoubleArray::kHeaderSize, PACKED_DOUBLE_ELEMENTS));
MachineRepresentation rep = MachineRepresentation::kFloat64;
- return StoreNoWriteBarrier(rep, object, offset, value);
+ StoreNoWriteBarrier(rep, object, offset, value);
}
Node* CodeStubAssembler::StoreFeedbackVectorSlot(Node* object,
@@ -3189,7 +3363,8 @@ TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionaryWithCapacity(
TNode<WordT> store_size = IntPtrAdd(
TimesPointerSize(length), IntPtrConstant(NameDictionary::kHeaderSize));
- Node* result = AllocateInNewSpace(store_size);
+ TNode<NameDictionary> result =
+ UncheckedCast<NameDictionary>(AllocateInNewSpace(store_size));
Comment("Initialize NameDictionary");
// Initialize FixedArray fields.
DCHECK(Heap::RootIsImmortalImmovable(Heap::kNameDictionaryMapRootIndex));
@@ -3222,7 +3397,7 @@ TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionaryWithCapacity(
TNode<WordT> end_address = IntPtrAdd(
result_word, IntPtrSub(store_size, IntPtrConstant(kHeapObjectTag)));
StoreFieldsNoWriteBarrier(start_address, end_address, filler);
- return CAST(result);
+ return result;
}
TNode<NameDictionary> CodeStubAssembler::CopyNameDictionary(
@@ -3260,8 +3435,8 @@ Node* CodeStubAssembler::AllocateOrderedHashTable() {
TNode<Map> fixed_array_map = CAST(LoadRoot(
static_cast<Heap::RootListIndex>(CollectionType::GetMapRootIndex())));
TNode<FixedArray> table =
- AllocateFixedArray(elements_kind, length_intptr,
- kAllowLargeObjectAllocation, fixed_array_map);
+ CAST(AllocateFixedArray(elements_kind, length_intptr,
+ kAllowLargeObjectAllocation, fixed_array_map));
// Initialize the OrderedHashTable fields.
const WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER;
@@ -3383,12 +3558,13 @@ void CodeStubAssembler::FindOrderedHashTableEntry(
std::function<void(Node*, Label*, Label*)> key_compare,
Variable* entry_start_position, Label* entry_found, Label* not_found) {
// Get the index of the bucket.
- Node* const number_of_buckets = SmiUntag(CAST(
- LoadFixedArrayElement(table, CollectionType::kNumberOfBucketsIndex)));
+ Node* const number_of_buckets = SmiUntag(CAST(LoadFixedArrayElement(
+ CAST(table), CollectionType::kNumberOfBucketsIndex)));
Node* const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
Node* const first_entry = SmiUntag(CAST(LoadFixedArrayElement(
- table, bucket, CollectionType::kHashTableStartIndex * kPointerSize)));
+ CAST(table), bucket,
+ CollectionType::kHashTableStartIndex * kPointerSize)));
// Walk the bucket chain.
Node* entry_start;
@@ -3407,14 +3583,14 @@ void CodeStubAssembler::FindOrderedHashTableEntry(
// Make sure the entry index is within range.
CSA_ASSERT(
- this,
- UintPtrLessThan(
- var_entry.value(),
- SmiUntag(SmiAdd(
- CAST(LoadFixedArrayElement(
- table, CollectionType::kNumberOfElementsIndex)),
- CAST(LoadFixedArrayElement(
- table, CollectionType::kNumberOfDeletedElementsIndex))))));
+ this, UintPtrLessThan(
+ var_entry.value(),
+ SmiUntag(SmiAdd(
+ CAST(LoadFixedArrayElement(
+ CAST(table), CollectionType::kNumberOfElementsIndex)),
+ CAST(LoadFixedArrayElement(
+ CAST(table),
+ CollectionType::kNumberOfDeletedElementsIndex))))));
// Compute the index of the entry relative to kHashTableStartIndex.
entry_start =
@@ -3424,7 +3600,7 @@ void CodeStubAssembler::FindOrderedHashTableEntry(
// Load the key from the entry.
Node* const candidate_key = LoadFixedArrayElement(
- table, entry_start,
+ CAST(table), entry_start,
CollectionType::kHashTableStartIndex * kPointerSize);
key_compare(candidate_key, &if_key_found, &continue_next_entry);
@@ -3432,7 +3608,7 @@ void CodeStubAssembler::FindOrderedHashTableEntry(
BIND(&continue_next_entry);
// Load the index of the next entry in the bucket chain.
var_entry.Bind(SmiUntag(CAST(LoadFixedArrayElement(
- table, entry_start,
+ CAST(table), entry_start,
(CollectionType::kHashTableStartIndex + CollectionType::kChainOffset) *
kPointerSize))));
@@ -3777,24 +3953,26 @@ Node* CodeStubAssembler::ExtractFastJSArray(Node* context, Node* array,
Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
ParameterMode mode,
Node* allocation_site) {
- Node* length = LoadJSArrayLength(array);
- Node* elements = LoadElements(array);
-
Node* original_array_map = LoadMap(array);
Node* elements_kind = LoadMapElementsKind(original_array_map);
- Node* new_elements = CloneFixedArray(elements);
+ Node* length = LoadJSArrayLength(array);
+ Node* new_elements = ExtractFixedArray(
+ LoadElements(array), IntPtrOrSmiConstant(0, mode),
+ TaggedToParameter(length, mode), nullptr,
+ ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW, mode);
// Use the cannonical map for the Array's ElementsKind
Node* native_context = LoadNativeContext(context);
Node* array_map = LoadJSArrayElementsMap(elements_kind, native_context);
+
Node* result = AllocateUninitializedJSArrayWithoutElements(array_map, length,
allocation_site);
StoreObjectField(result, JSObject::kElementsOffset, new_elements);
return result;
}
-TNode<FixedArray> CodeStubAssembler::AllocateFixedArray(
+TNode<FixedArrayBase> CodeStubAssembler::AllocateFixedArray(
ElementsKind kind, Node* capacity, ParameterMode mode,
AllocationFlags flags, SloppyTNode<Map> fixed_array_map) {
Comment("AllocateFixedArray");
@@ -3830,7 +4008,7 @@ TNode<FixedArray> CodeStubAssembler::AllocateFixedArray(
return UncheckedCast<FixedArray>(array);
}
-TNode<FixedArray> CodeStubAssembler::ExtractFixedArray(
+TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
Node* fixed_array, Node* first, Node* count, Node* capacity,
ExtractFixedArrayFlags extract_flags, ParameterMode parameter_mode) {
VARIABLE(var_result, MachineRepresentation::kTagged);
@@ -3935,11 +4113,11 @@ TNode<FixedArray> CodeStubAssembler::ExtractFixedArray(
BIND(&cow);
{
if (extract_flags & ExtractFixedArrayFlag::kDontCopyCOW) {
- GotoIf(WordNotEqual(IntPtrOrSmiConstant(0, parameter_mode), first),
- &new_space_check);
-
- var_result.Bind(fixed_array);
- Goto(&done);
+ Branch(WordNotEqual(IntPtrOrSmiConstant(0, parameter_mode), first),
+ &new_space_check, [&] {
+ var_result.Bind(fixed_array);
+ Goto(&done);
+ });
} else {
var_fixed_array_map.Bind(LoadRoot(Heap::kFixedArrayMapRootIndex));
Goto(&new_space_check);
@@ -4056,6 +4234,48 @@ void CodeStubAssembler::FillFixedArrayWithValue(
mode);
}
+void CodeStubAssembler::FillFixedArrayWithSmiZero(TNode<FixedArray> array,
+ TNode<IntPtrT> length) {
+ CSA_ASSERT(this, WordEqual(length, LoadAndUntagFixedArrayBaseLength(array)));
+
+ TNode<IntPtrT> byte_length = TimesPointerSize(length);
+ CSA_ASSERT(this, UintPtrLessThan(length, byte_length));
+
+ static const int32_t fa_base_data_offset =
+ FixedArray::kHeaderSize - kHeapObjectTag;
+ TNode<IntPtrT> backing_store = IntPtrAdd(BitcastTaggedToWord(array),
+ IntPtrConstant(fa_base_data_offset));
+
+ // Call out to memset to perform initialization.
+ TNode<ExternalReference> memset =
+ ExternalConstant(ExternalReference::libc_memset_function());
+ STATIC_ASSERT(kSizetSize == kIntptrSize);
+ CallCFunction3(MachineType::Pointer(), MachineType::Pointer(),
+ MachineType::IntPtr(), MachineType::UintPtr(), memset,
+ backing_store, IntPtrConstant(0), byte_length);
+}
+
+void CodeStubAssembler::FillFixedDoubleArrayWithZero(
+ TNode<FixedDoubleArray> array, TNode<IntPtrT> length) {
+ CSA_ASSERT(this, WordEqual(length, LoadAndUntagFixedArrayBaseLength(array)));
+
+ TNode<IntPtrT> byte_length = TimesDoubleSize(length);
+ CSA_ASSERT(this, UintPtrLessThan(length, byte_length));
+
+ static const int32_t fa_base_data_offset =
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag;
+ TNode<IntPtrT> backing_store = IntPtrAdd(BitcastTaggedToWord(array),
+ IntPtrConstant(fa_base_data_offset));
+
+ // Call out to memset to perform initialization.
+ TNode<ExternalReference> memset =
+ ExternalConstant(ExternalReference::libc_memset_function());
+ STATIC_ASSERT(kSizetSize == kIntptrSize);
+ CallCFunction3(MachineType::Pointer(), MachineType::Pointer(),
+ MachineType::IntPtr(), MachineType::UintPtr(), memset,
+ backing_store, IntPtrConstant(0), byte_length);
+}
+
void CodeStubAssembler::CopyFixedArrayElements(
ElementsKind from_kind, Node* from_array, ElementsKind to_kind,
Node* to_array, Node* first_element, Node* element_count, Node* capacity,
@@ -4203,8 +4423,8 @@ void CodeStubAssembler::CopyFixedArrayElements(
Comment("] CopyFixedArrayElements");
}
-TNode<FixedArray> CodeStubAssembler::ConvertFixedArrayBaseToFixedArray(
- TNode<FixedArrayBase> base, Label* cast_fail) {
+TNode<FixedArray> CodeStubAssembler::HeapObjectToFixedArray(
+ TNode<HeapObject> base, Label* cast_fail) {
Label fixed_array(this);
TNode<Map> map = LoadMap(base);
GotoIf(WordEqual(map, LoadRoot(Heap::kFixedArrayMapRootIndex)), &fixed_array);
@@ -4610,55 +4830,61 @@ Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) {
return TruncateFloat64ToWord32(value);
}
-TNode<Number> CodeStubAssembler::ChangeFloat64ToTagged(
- SloppyTNode<Float64T> value) {
+void CodeStubAssembler::TryHeapNumberToSmi(TNode<HeapNumber> number,
+ TVariable<Smi>& var_result_smi,
+ Label* if_smi) {
+ TNode<Float64T> value = LoadHeapNumberValue(number);
+ TryFloat64ToSmi(value, var_result_smi, if_smi);
+}
+
+void CodeStubAssembler::TryFloat64ToSmi(TNode<Float64T> value,
+ TVariable<Smi>& var_result_smi,
+ Label* if_smi) {
TNode<Int32T> value32 = RoundFloat64ToInt32(value);
TNode<Float64T> value64 = ChangeInt32ToFloat64(value32);
- Label if_valueisint32(this), if_valueisheapnumber(this), if_join(this);
+ Label if_int32(this), if_heap_number(this, Label::kDeferred);
- Label if_valueisequal(this), if_valueisnotequal(this);
- Branch(Float64Equal(value, value64), &if_valueisequal, &if_valueisnotequal);
- BIND(&if_valueisequal);
- {
- GotoIfNot(Word32Equal(value32, Int32Constant(0)), &if_valueisint32);
- Branch(Int32LessThan(UncheckedCast<Int32T>(Float64ExtractHighWord32(value)),
- Int32Constant(0)),
- &if_valueisheapnumber, &if_valueisint32);
- }
- BIND(&if_valueisnotequal);
- Goto(&if_valueisheapnumber);
+ GotoIfNot(Float64Equal(value, value64), &if_heap_number);
+ GotoIfNot(Word32Equal(value32, Int32Constant(0)), &if_int32);
+ Branch(Int32LessThan(UncheckedCast<Int32T>(Float64ExtractHighWord32(value)),
+ Int32Constant(0)),
+ &if_heap_number, &if_int32);
TVARIABLE(Number, var_result);
- BIND(&if_valueisint32);
+ BIND(&if_int32);
{
if (SmiValuesAre32Bits()) {
- TNode<Smi> result = SmiTag(ChangeInt32ToIntPtr(value32));
- var_result = result;
- Goto(&if_join);
+ var_result_smi = SmiTag(ChangeInt32ToIntPtr(value32));
} else {
DCHECK(SmiValuesAre31Bits());
TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow(value32, value32);
TNode<BoolT> overflow = Projection<1>(pair);
- Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
- Branch(overflow, &if_overflow, &if_notoverflow);
- BIND(&if_overflow);
- Goto(&if_valueisheapnumber);
- BIND(&if_notoverflow);
- {
- TNode<Smi> result =
- BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Projection<0>(pair)));
- var_result = result;
- Goto(&if_join);
- }
+ GotoIf(overflow, &if_heap_number);
+ var_result_smi =
+ BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Projection<0>(pair)));
}
+ Goto(if_smi);
}
- BIND(&if_valueisheapnumber);
+ BIND(&if_heap_number);
+}
+
+TNode<Number> CodeStubAssembler::ChangeFloat64ToTagged(
+ SloppyTNode<Float64T> value) {
+ Label if_smi(this), done(this);
+ TVARIABLE(Smi, var_smi_result);
+ TVARIABLE(Number, var_result);
+ TryFloat64ToSmi(value, var_smi_result, &if_smi);
+
+ var_result = AllocateHeapNumberWithValue(value);
+ Goto(&done);
+
+ BIND(&if_smi);
{
- var_result = AllocateHeapNumberWithValue(value);
- Goto(&if_join);
+ var_result = var_smi_result.value();
+ Goto(&done);
}
- BIND(&if_join);
+ BIND(&done);
return var_result.value();
}
@@ -4679,17 +4905,16 @@ TNode<Number> CodeStubAssembler::ChangeInt32ToTagged(
TNode<Float64T> value64 = ChangeInt32ToFloat64(value);
TNode<HeapNumber> result = AllocateHeapNumberWithValue(value64);
var_result = result;
+ Goto(&if_join);
}
- Goto(&if_join);
BIND(&if_notoverflow);
{
TNode<IntPtrT> almost_tagged_value =
ChangeInt32ToIntPtr(Projection<0>(pair));
- TNode<Smi> result;
- result = BitcastWordToTaggedSigned(almost_tagged_value);
+ TNode<Smi> result = BitcastWordToTaggedSigned(almost_tagged_value);
var_result = result;
+ Goto(&if_join);
}
- Goto(&if_join);
BIND(&if_join);
return var_result.value();
}
@@ -4780,6 +5005,24 @@ TNode<String> CodeStubAssembler::ToThisString(Node* context, Node* value,
return CAST(var_value.value());
}
+TNode<Uint32T> CodeStubAssembler::ChangeNumberToUint32(TNode<Number> value) {
+ TVARIABLE(Uint32T, var_result);
+ Label if_smi(this), if_heapnumber(this, Label::kDeferred), done(this);
+ Branch(TaggedIsSmi(value), &if_smi, &if_heapnumber);
+ BIND(&if_smi);
+ {
+ var_result = Unsigned(SmiToInt32(CAST(value)));
+ Goto(&done);
+ }
+ BIND(&if_heapnumber);
+ {
+ var_result = ChangeFloat64ToUint32(LoadHeapNumberValue(CAST(value)));
+ Goto(&done);
+ }
+ BIND(&done);
+ return var_result.value();
+}
+
TNode<Float64T> CodeStubAssembler::ChangeNumberToFloat64(
SloppyTNode<Number> value) {
// TODO(tebbi): Remove assert once argument is TNode instead of SloppyTNode.
@@ -4804,25 +5047,30 @@ TNode<Float64T> CodeStubAssembler::ChangeNumberToFloat64(
TNode<UintPtrT> CodeStubAssembler::ChangeNonnegativeNumberToUintPtr(
TNode<Number> value) {
TVARIABLE(UintPtrT, result);
- Label smi(this), done(this, &result);
- GotoIf(TaggedIsSmi(value), &smi);
-
- TNode<HeapNumber> value_hn = CAST(value);
- result = ChangeFloat64ToUintPtr(LoadHeapNumberValue(value_hn));
- Goto(&done);
-
- BIND(&smi);
- TNode<Smi> value_smi = CAST(value);
- CSA_SLOW_ASSERT(this, SmiLessThan(SmiConstant(-1), value_smi));
- result = UncheckedCast<UintPtrT>(SmiToIntPtr(value_smi));
- Goto(&done);
+ Label done(this, &result);
+ Branch(TaggedIsSmi(value),
+ [&] {
+ TNode<Smi> value_smi = CAST(value);
+ CSA_SLOW_ASSERT(this, SmiLessThan(SmiConstant(-1), value_smi));
+ result = UncheckedCast<UintPtrT>(SmiToIntPtr(value_smi));
+ Goto(&done);
+ },
+ [&] {
+ TNode<HeapNumber> value_hn = CAST(value);
+ result = ChangeFloat64ToUintPtr(LoadHeapNumberValue(value_hn));
+ Goto(&done);
+ });
BIND(&done);
return result.value();
}
-SloppyTNode<WordT> CodeStubAssembler::TimesPointerSize(Node* value) {
- return WordShl(value, IntPtrConstant(kPointerSizeLog2));
+TNode<WordT> CodeStubAssembler::TimesPointerSize(SloppyTNode<WordT> value) {
+ return WordShl(value, kPointerSizeLog2);
+}
+
+TNode<WordT> CodeStubAssembler::TimesDoubleSize(SloppyTNode<WordT> value) {
+ return WordShl(value, kDoubleSizeLog2);
}
Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
@@ -5374,11 +5622,6 @@ TNode<BoolT> CodeStubAssembler::IsFixedArrayWithKind(
}
}
-TNode<BoolT> CodeStubAssembler::IsWeakCell(SloppyTNode<HeapObject> object) {
- CSA_ASSERT(this, IsStrongHeapObject(object));
- return IsWeakCellMap(LoadMap(object));
-}
-
TNode<BoolT> CodeStubAssembler::IsBoolean(SloppyTNode<HeapObject> object) {
return IsBooleanMap(LoadMap(object));
}
@@ -5676,7 +5919,7 @@ TNode<BoolT> CodeStubAssembler::IsHeapNumberUint32(TNode<HeapNumber> number) {
IsHeapNumberPositive(number),
[=] {
TNode<Float64T> value = LoadHeapNumberValue(number);
- TNode<Uint32T> int_value = ChangeFloat64ToUint32(value);
+ TNode<Uint32T> int_value = Unsigned(TruncateFloat64ToWord32(value));
return Float64Equal(value, ChangeUint32ToFloat64(int_value));
},
[=] { return Int32FalseConstant(); });
@@ -5758,8 +6001,9 @@ TNode<String> CodeStubAssembler::StringFromSingleCharCode(TNode<Int32T> code) {
BIND(&if_codeisonebyte);
{
// Load the isolate wide single character string cache.
- Node* cache = LoadRoot(Heap::kSingleCharacterStringCacheRootIndex);
- Node* code_index = ChangeUint32ToWord(code);
+ TNode<FixedArray> cache =
+ CAST(LoadRoot(Heap::kSingleCharacterStringCacheRootIndex));
+ TNode<IntPtrT> code_index = Signed(ChangeUint32ToWord(code));
// Check if we have an entry for the {code} in the single character string
// cache already.
@@ -5771,7 +6015,7 @@ TNode<String> CodeStubAssembler::StringFromSingleCharCode(TNode<Int32T> code) {
BIND(&if_entryisundefined);
{
// Allocate a new SeqOneByteString for {code} and store it in the {cache}.
- Node* result = AllocateSeqOneByteString(1);
+ TNode<String> result = AllocateSeqOneByteString(1);
StoreNoWriteBarrier(
MachineRepresentation::kWord8, result,
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag), code);
@@ -6081,11 +6325,11 @@ TNode<String> ToDirectStringAssembler::TryToDirect(Label* if_bailout) {
return CAST(var_string_.value());
}
-Node* ToDirectStringAssembler::TryToSequential(StringPointerKind ptr_kind,
- Label* if_bailout) {
+TNode<RawPtrT> ToDirectStringAssembler::TryToSequential(
+ StringPointerKind ptr_kind, Label* if_bailout) {
CHECK(ptr_kind == PTR_TO_DATA || ptr_kind == PTR_TO_STRING);
- VARIABLE(var_result, MachineType::PointerRepresentation());
+ TVARIABLE(RawPtrT, var_result);
Label out(this), if_issequential(this), if_isexternal(this, Label::kDeferred);
Branch(is_external(), &if_isexternal, &if_issequential);
@@ -6093,12 +6337,12 @@ Node* ToDirectStringAssembler::TryToSequential(StringPointerKind ptr_kind,
{
STATIC_ASSERT(SeqOneByteString::kHeaderSize ==
SeqTwoByteString::kHeaderSize);
- Node* result = BitcastTaggedToWord(var_string_.value());
+ TNode<IntPtrT> result = BitcastTaggedToWord(var_string_.value());
if (ptr_kind == PTR_TO_DATA) {
result = IntPtrAdd(result, IntPtrConstant(SeqOneByteString::kHeaderSize -
kHeapObjectTag));
}
- var_result.Bind(result);
+ var_result = ReinterpretCast<RawPtrT>(result);
Goto(&out);
}
@@ -6107,14 +6351,14 @@ Node* ToDirectStringAssembler::TryToSequential(StringPointerKind ptr_kind,
GotoIf(IsShortExternalStringInstanceType(var_instance_type_.value()),
if_bailout);
- Node* const string = var_string_.value();
- Node* result = LoadObjectField(string, ExternalString::kResourceDataOffset,
- MachineType::Pointer());
+ TNode<String> string = CAST(var_string_.value());
+ TNode<IntPtrT> result =
+ LoadObjectField<IntPtrT>(string, ExternalString::kResourceDataOffset);
if (ptr_kind == PTR_TO_STRING) {
result = IntPtrSub(result, IntPtrConstant(SeqOneByteString::kHeaderSize -
kHeapObjectTag));
}
- var_result.Bind(result);
+ var_result = ReinterpretCast<RawPtrT>(result);
Goto(&out);
}
@@ -6398,7 +6642,9 @@ TNode<Number> CodeStubAssembler::StringToNumber(TNode<String> input) {
TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
TVARIABLE(String, result);
- Label runtime(this, Label::kDeferred), smi(this), done(this, &result);
+ TVARIABLE(Smi, smi_input);
+ Label runtime(this, Label::kDeferred), if_smi(this), if_heap_number(this),
+ done(this, &result);
// Load the number string cache.
Node* number_string_cache = LoadRoot(Heap::kNumberStringCacheRootIndex);
@@ -6411,63 +6657,67 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
TNode<IntPtrT> one = IntPtrConstant(1);
mask = IntPtrSub(mask, one);
- GotoIf(TaggedIsSmi(input), &smi);
-
- TNode<HeapNumber> heap_number_input = CAST(input);
-
- // Make a hash from the two 32-bit values of the double.
- TNode<Int32T> low =
- LoadObjectField<Int32T>(heap_number_input, HeapNumber::kValueOffset);
- TNode<Int32T> high = LoadObjectField<Int32T>(
- heap_number_input, HeapNumber::kValueOffset + kIntSize);
- TNode<Word32T> hash = Word32Xor(low, high);
- TNode<WordT> word_hash = WordShl(ChangeInt32ToIntPtr(hash), one);
- TNode<WordT> index =
- WordAnd(word_hash, WordSar(mask, SmiShiftBitsConstant()));
-
- // Cache entry's key must be a heap number
- Node* number_key = LoadFixedArrayElement(number_string_cache, index);
- GotoIf(TaggedIsSmi(number_key), &runtime);
- GotoIfNot(IsHeapNumber(number_key), &runtime);
-
- // Cache entry's key must match the heap number value we're looking for.
- Node* low_compare = LoadObjectField(number_key, HeapNumber::kValueOffset,
- MachineType::Int32());
- Node* high_compare = LoadObjectField(
- number_key, HeapNumber::kValueOffset + kIntSize, MachineType::Int32());
- GotoIfNot(Word32Equal(low, low_compare), &runtime);
- GotoIfNot(Word32Equal(high, high_compare), &runtime);
-
- // Heap number match, return value from cache entry.
- IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
- result =
- CAST(LoadFixedArrayElement(number_string_cache, index, kPointerSize));
- Goto(&done);
-
- BIND(&runtime);
- {
- // No cache entry, go to the runtime.
- result = CAST(CallRuntime(Runtime::kNumberToStringSkipCache,
- NoContextConstant(), input));
+ GotoIfNot(TaggedIsSmi(input), &if_heap_number);
+ smi_input = CAST(input);
+ Goto(&if_smi);
+
+ BIND(&if_heap_number);
+ {
+ TNode<HeapNumber> heap_number_input = CAST(input);
+ // Try normalizing the HeapNumber.
+ TryHeapNumberToSmi(heap_number_input, smi_input, &if_smi);
+
+ // Make a hash from the two 32-bit values of the double.
+ TNode<Int32T> low =
+ LoadObjectField<Int32T>(heap_number_input, HeapNumber::kValueOffset);
+ TNode<Int32T> high = LoadObjectField<Int32T>(
+ heap_number_input, HeapNumber::kValueOffset + kIntSize);
+ TNode<Word32T> hash = Word32Xor(low, high);
+ TNode<WordT> word_hash = WordShl(ChangeInt32ToIntPtr(hash), one);
+ TNode<WordT> index =
+ WordAnd(word_hash, WordSar(mask, SmiShiftBitsConstant()));
+
+ // Cache entry's key must be a heap number
+ Node* number_key = LoadFixedArrayElement(CAST(number_string_cache), index);
+ GotoIf(TaggedIsSmi(number_key), &runtime);
+ GotoIfNot(IsHeapNumber(number_key), &runtime);
+
+ // Cache entry's key must match the heap number value we're looking for.
+ Node* low_compare = LoadObjectField(number_key, HeapNumber::kValueOffset,
+ MachineType::Int32());
+ Node* high_compare = LoadObjectField(
+ number_key, HeapNumber::kValueOffset + kIntSize, MachineType::Int32());
+ GotoIfNot(Word32Equal(low, low_compare), &runtime);
+ GotoIfNot(Word32Equal(high, high_compare), &runtime);
+
+ // Heap number match, return value from cache entry.
+ result = CAST(
+ LoadFixedArrayElement(CAST(number_string_cache), index, kPointerSize));
+ Goto(&done);
}
- Goto(&done);
- BIND(&smi);
+ BIND(&if_smi);
{
// Load the smi key, make sure it matches the smi we're looking for.
Node* smi_index = BitcastWordToTagged(
- WordAnd(WordShl(BitcastTaggedToWord(input), one), mask));
- Node* smi_key = LoadFixedArrayElement(number_string_cache, smi_index, 0,
- SMI_PARAMETERS);
- GotoIf(WordNotEqual(smi_key, input), &runtime);
+ WordAnd(WordShl(BitcastTaggedToWord(smi_input.value()), one), mask));
+ Node* smi_key = LoadFixedArrayElement(CAST(number_string_cache), smi_index,
+ 0, SMI_PARAMETERS);
+ GotoIf(WordNotEqual(smi_key, smi_input.value()), &runtime);
// Smi match, return value from cache entry.
- IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
- result = CAST(LoadFixedArrayElement(number_string_cache, smi_index,
+ result = CAST(LoadFixedArrayElement(CAST(number_string_cache), smi_index,
kPointerSize, SMI_PARAMETERS));
Goto(&done);
}
+ BIND(&runtime);
+ {
+ // No cache entry, go to the runtime.
+ result =
+ CAST(CallRuntime(Runtime::kNumberToString, NoContextConstant(), input));
+ Goto(&done);
+ }
BIND(&done);
return result.value();
}
@@ -6961,6 +7211,30 @@ TNode<JSReceiver> CodeStubAssembler::ToObject(SloppyTNode<Context> context,
return CAST(CallBuiltin(Builtins::kToObject, context, input));
}
+TNode<JSReceiver> CodeStubAssembler::ToObject_Inline(TNode<Context> context,
+ TNode<Object> input) {
+ TVARIABLE(JSReceiver, result);
+ Label if_isreceiver(this), if_isnotreceiver(this, Label::kDeferred);
+ Label done(this);
+
+ BranchIfJSReceiver(input, &if_isreceiver, &if_isnotreceiver);
+
+ BIND(&if_isreceiver);
+ {
+ result = CAST(input);
+ Goto(&done);
+ }
+
+ BIND(&if_isnotreceiver);
+ {
+ result = ToObject(context, input);
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return result.value();
+}
+
TNode<Smi> CodeStubAssembler::ToSmiIndex(TNode<Object> input,
TNode<Context> context,
Label* range_error) {
@@ -8159,7 +8433,7 @@ void CodeStubAssembler::LoadPropertyFromGlobalDictionary(Node* dictionary,
Comment("[ LoadPropertyFromGlobalDictionary");
CSA_ASSERT(this, IsGlobalDictionary(dictionary));
- Node* property_cell = LoadFixedArrayElement(dictionary, name_index);
+ Node* property_cell = LoadFixedArrayElement(CAST(dictionary), name_index);
CSA_ASSERT(this, IsPropertyCell(property_cell));
Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
@@ -8248,15 +8522,8 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
LoadObjectField(accessor_info, AccessorInfo::kNameOffset)),
if_bailout);
- // if (!(has_prototype_slot() && !has_non_instance_prototype())) use
- // generic property loading mechanism.
- GotoIfNot(
- Word32Equal(
- Word32And(LoadMapBitField(receiver_map),
- Int32Constant(Map::HasPrototypeSlotBit::kMask |
- Map::HasNonInstancePrototypeBit::kMask)),
- Int32Constant(Map::HasPrototypeSlotBit::kMask)),
- if_bailout);
+ GotoIfPrototypeRequiresRuntimeLookup(CAST(receiver), CAST(receiver_map),
+ if_bailout);
var_value.Bind(LoadJSFunctionPrototype(receiver, if_bailout));
Goto(&done);
}
@@ -8419,25 +8686,26 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
BIND(&if_isobjectorsmi);
{
- Node* elements = LoadElements(object);
- Node* length = LoadAndUntagFixedArrayBaseLength(elements);
+ TNode<FixedArray> elements = CAST(LoadElements(object));
+ TNode<IntPtrT> length = LoadAndUntagFixedArrayBaseLength(elements);
GotoIfNot(UintPtrLessThan(intptr_index, length), &if_oob);
- Node* element = LoadFixedArrayElement(elements, intptr_index);
- Node* the_hole = TheHoleConstant();
+ TNode<Object> element = LoadFixedArrayElement(elements, intptr_index);
+ TNode<Oddball> the_hole = TheHoleConstant();
Branch(WordEqual(element, the_hole), if_not_found, if_found);
}
BIND(&if_isdouble);
{
- Node* elements = LoadElements(object);
- Node* length = LoadAndUntagFixedArrayBaseLength(elements);
+ TNode<FixedArrayBase> elements = LoadElements(object);
+ TNode<IntPtrT> length = LoadAndUntagFixedArrayBaseLength(elements);
GotoIfNot(UintPtrLessThan(intptr_index, length), &if_oob);
// Check if the element is a double hole, but don't load it.
- LoadFixedDoubleArrayElement(elements, intptr_index, MachineType::None(), 0,
- INTPTR_PARAMETERS, if_not_found);
+ LoadFixedDoubleArrayElement(CAST(elements), intptr_index,
+ MachineType::None(), 0, INTPTR_PARAMETERS,
+ if_not_found);
Goto(if_found);
}
BIND(&if_isdictionary);
@@ -8709,15 +8977,8 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
GotoIfNot(InstanceTypeEqual(callable_instance_type, JS_FUNCTION_TYPE),
&return_runtime);
- // Goto runtime if {callable} is not a constructor or has
- // a non-instance "prototype".
- Node* callable_bitfield = LoadMapBitField(callable_map);
- GotoIfNot(Word32Equal(
- Word32And(callable_bitfield,
- Int32Constant(Map::HasNonInstancePrototypeBit::kMask |
- Map::IsConstructorBit::kMask)),
- Int32Constant(Map::IsConstructorBit::kMask)),
- &return_runtime);
+ GotoIfPrototypeRequiresRuntimeLookup(CAST(callable), CAST(callable_map),
+ &return_runtime);
// Get the "prototype" (or initial map) of the {callable}.
Node* callable_prototype =
@@ -8963,8 +9224,8 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
key = SmiUntag(key);
GotoIf(IntPtrLessThan(key, IntPtrConstant(0)), bailout);
- Node* elements = LoadElements(receiver);
- Node* elements_length = LoadAndUntagFixedArrayBaseLength(elements);
+ TNode<FixedArray> elements = CAST(LoadElements(receiver));
+ TNode<IntPtrT> elements_length = LoadAndUntagFixedArrayBaseLength(elements);
VARIABLE(var_result, MachineRepresentation::kTagged);
if (!is_load) {
@@ -8976,37 +9237,38 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
GotoIf(UintPtrGreaterThanOrEqual(key, adjusted_length), &if_unmapped);
- Node* mapped_index =
+ TNode<Object> mapped_index =
LoadFixedArrayElement(elements, IntPtrAdd(key, intptr_two));
Branch(WordEqual(mapped_index, TheHoleConstant()), &if_unmapped, &if_mapped);
BIND(&if_mapped);
{
- CSA_ASSERT(this, TaggedIsSmi(mapped_index));
- mapped_index = SmiUntag(mapped_index);
- Node* the_context = LoadFixedArrayElement(elements, 0);
+ TNode<IntPtrT> mapped_index_intptr = SmiUntag(CAST(mapped_index));
+ TNode<Context> the_context = CAST(LoadFixedArrayElement(elements, 0));
// Assert that we can use LoadFixedArrayElement/StoreFixedArrayElement
// methods for accessing Context.
STATIC_ASSERT(Context::kHeaderSize == FixedArray::kHeaderSize);
DCHECK_EQ(Context::SlotOffset(0) + kHeapObjectTag,
FixedArray::OffsetOfElementAt(0));
if (is_load) {
- Node* result = LoadFixedArrayElement(the_context, mapped_index);
+ Node* result = LoadFixedArrayElement(the_context, mapped_index_intptr);
CSA_ASSERT(this, WordNotEqual(result, TheHoleConstant()));
var_result.Bind(result);
} else {
- StoreFixedArrayElement(the_context, mapped_index, value);
+ StoreFixedArrayElement(the_context, mapped_index_intptr, value);
}
Goto(&end);
}
BIND(&if_unmapped);
{
- Node* backing_store = LoadFixedArrayElement(elements, 1);
- GotoIf(WordNotEqual(LoadMap(backing_store), FixedArrayMapConstant()),
+ TNode<HeapObject> backing_store_ho =
+ CAST(LoadFixedArrayElement(elements, 1));
+ GotoIf(WordNotEqual(LoadMap(backing_store_ho), FixedArrayMapConstant()),
bailout);
+ TNode<FixedArray> backing_store = CAST(backing_store_ho);
- Node* backing_store_length =
+ TNode<IntPtrT> backing_store_length =
LoadAndUntagFixedArrayBaseLength(backing_store);
GotoIf(UintPtrGreaterThanOrEqual(key, backing_store_length), bailout);
@@ -9078,12 +9340,12 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
return;
} else if (IsDoubleElementsKind(kind)) {
// Make sure we do not store signalling NaNs into double arrays.
- value = Float64SilenceNaN(value);
- StoreFixedDoubleArrayElement(elements, index, value, mode);
+ TNode<Float64T> value_silenced = Float64SilenceNaN(value);
+ StoreFixedDoubleArrayElement(CAST(elements), index, value_silenced, mode);
} else {
WriteBarrierMode barrier_mode =
IsSmiElementsKind(kind) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
- StoreFixedArrayElement(elements, index, value, barrier_mode, 0, mode);
+ StoreFixedArrayElement(CAST(elements), index, value, barrier_mode, 0, mode);
}
}
@@ -9391,12 +9653,13 @@ Node* CodeStubAssembler::CheckForCapacityGrow(
Node* object, Node* elements, ElementsKind kind,
KeyedAccessStoreMode store_mode, Node* length, Node* key,
ParameterMode mode, bool is_js_array, Label* bailout) {
+ DCHECK(IsFastElementsKind(kind));
VARIABLE(checked_elements, MachineRepresentation::kTagged);
Label grow_case(this), no_grow_case(this), done(this),
grow_bailout(this, Label::kDeferred);
Node* condition;
- if (IsHoleyOrDictionaryElementsKind(kind)) {
+ if (IsHoleyElementsKind(kind)) {
condition = UintPtrGreaterThanOrEqual(key, length);
} else {
// We don't support growing here unless the value is being appended.
@@ -9572,7 +9835,7 @@ void CodeStubAssembler::TrapAllocationMemento(Node* object,
}
TNode<IntPtrT> CodeStubAssembler::PageFromAddress(TNode<IntPtrT> address) {
- return WordAnd(address, IntPtrConstant(~Page::kPageAlignmentMask));
+ return WordAnd(address, IntPtrConstant(~kPageAlignmentMask));
}
TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
@@ -9603,7 +9866,7 @@ TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
// Store an empty fixed array for the code dependency.
StoreObjectFieldRoot(site, AllocationSite::kDependentCodeOffset,
- Heap::kEmptyFixedArrayRootIndex);
+ Heap::kEmptyWeakFixedArrayRootIndex);
// Link the object to the allocation site list
TNode<ExternalReference> site_list = ExternalConstant(
@@ -9624,10 +9887,13 @@ TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
}
TNode<MaybeObject> CodeStubAssembler::StoreWeakReferenceInFeedbackVector(
- SloppyTNode<FeedbackVector> feedback_vector, SloppyTNode<IntPtrT> slot,
- TNode<HeapObject> value) {
+ SloppyTNode<FeedbackVector> feedback_vector, Node* slot,
+ SloppyTNode<HeapObject> value, int additional_offset,
+ ParameterMode parameter_mode) {
TNode<MaybeObject> weak_value = MakeWeak(value);
- StoreFeedbackVectorSlot(feedback_vector, slot, weak_value);
+ StoreFeedbackVectorSlot(feedback_vector, slot, weak_value,
+ UPDATE_WRITE_BARRIER, additional_offset,
+ parameter_mode);
return weak_value;
}
@@ -9681,7 +9947,15 @@ Node* CodeStubAssembler::BuildFastLoop(
// to force the loop header check at the end of the loop and branch forward to
// it from the pre-header). The extra branch is slower in the case that the
// loop actually iterates.
- Branch(WordEqual(var.value(), end_index), &after_loop, &loop);
+ Node* first_check = WordEqual(var.value(), end_index);
+ int32_t first_check_val;
+ if (ToInt32Constant(first_check, first_check_val)) {
+ if (first_check_val) return var.value();
+ Goto(&loop);
+ } else {
+ Branch(first_check, &after_loop, &loop);
+ }
+
BIND(&loop);
{
if (advance_mode == IndexAdvanceMode::kPre) {
@@ -9785,62 +10059,59 @@ void CodeStubAssembler::BranchIfNumberRelationalComparison(
TVARIABLE(Float64T, var_left_float);
TVARIABLE(Float64T, var_right_float);
- Label if_left_smi(this), if_left_not_smi(this);
- Branch(TaggedIsSmi(left), &if_left_smi, &if_left_not_smi);
-
- BIND(&if_left_smi);
- {
- TNode<Smi> smi_left = CAST(left);
-
- Label if_right_not_smi(this);
- GotoIfNot(TaggedIsSmi(right), &if_right_not_smi);
- {
- TNode<Smi> smi_right = CAST(right);
-
- // Both {left} and {right} are Smi, so just perform a fast Smi comparison.
- switch (op) {
- case Operation::kLessThan:
- BranchIfSmiLessThan(smi_left, smi_right, if_true, if_false);
- break;
- case Operation::kLessThanOrEqual:
- BranchIfSmiLessThanOrEqual(smi_left, smi_right, if_true, if_false);
- break;
- case Operation::kGreaterThan:
- BranchIfSmiLessThan(smi_right, smi_left, if_true, if_false);
- break;
- case Operation::kGreaterThanOrEqual:
- BranchIfSmiLessThanOrEqual(smi_right, smi_left, if_true, if_false);
- break;
- default:
- UNREACHABLE();
- }
- }
- BIND(&if_right_not_smi);
- {
- CSA_ASSERT(this, IsHeapNumber(right));
- var_left_float = SmiToFloat64(smi_left);
- var_right_float = LoadHeapNumberValue(right);
- Goto(&do_float_comparison);
- }
- }
-
- BIND(&if_left_not_smi);
- {
- CSA_ASSERT(this, IsHeapNumber(left));
- var_left_float = LoadHeapNumberValue(left);
-
- Label if_right_not_smi(this);
- GotoIfNot(TaggedIsSmi(right), &if_right_not_smi);
- var_right_float = SmiToFloat64(right);
- Goto(&do_float_comparison);
-
- BIND(&if_right_not_smi);
- {
- CSA_ASSERT(this, IsHeapNumber(right));
- var_right_float = LoadHeapNumberValue(right);
- Goto(&do_float_comparison);
- }
- }
+ Branch(TaggedIsSmi(left),
+ [&] {
+ TNode<Smi> smi_left = CAST(left);
+
+ Branch(TaggedIsSmi(right),
+ [&] {
+ TNode<Smi> smi_right = CAST(right);
+
+ // Both {left} and {right} are Smi, so just perform a fast
+ // Smi comparison.
+ switch (op) {
+ case Operation::kLessThan:
+ BranchIfSmiLessThan(smi_left, smi_right, if_true,
+ if_false);
+ break;
+ case Operation::kLessThanOrEqual:
+ BranchIfSmiLessThanOrEqual(smi_left, smi_right, if_true,
+ if_false);
+ break;
+ case Operation::kGreaterThan:
+ BranchIfSmiLessThan(smi_right, smi_left, if_true,
+ if_false);
+ break;
+ case Operation::kGreaterThanOrEqual:
+ BranchIfSmiLessThanOrEqual(smi_right, smi_left, if_true,
+ if_false);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ },
+ [&] {
+ CSA_ASSERT(this, IsHeapNumber(right));
+ var_left_float = SmiToFloat64(smi_left);
+ var_right_float = LoadHeapNumberValue(right);
+ Goto(&do_float_comparison);
+ });
+ },
+ [&] {
+ CSA_ASSERT(this, IsHeapNumber(left));
+ var_left_float = LoadHeapNumberValue(left);
+
+ Branch(TaggedIsSmi(right),
+ [&] {
+ var_right_float = SmiToFloat64(right);
+ Goto(&do_float_comparison);
+ },
+ [&] {
+ CSA_ASSERT(this, IsHeapNumber(right));
+ var_right_float = LoadHeapNumberValue(right);
+ Goto(&do_float_comparison);
+ });
+ });
BIND(&do_float_comparison);
{
@@ -11086,69 +11357,66 @@ void CodeStubAssembler::BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true,
{
// Since {lhs} is a Smi, the comparison can only yield true
// iff the {rhs} is a HeapNumber with the same float64 value.
- GotoIf(TaggedIsSmi(rhs), if_false);
- GotoIfNot(IsHeapNumber(rhs), if_false);
- var_lhs_value.Bind(SmiToFloat64(lhs));
- var_rhs_value.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fcmp);
+ Branch(TaggedIsSmi(rhs), if_false, [&] {
+ GotoIfNot(IsHeapNumber(rhs), if_false);
+ var_lhs_value.Bind(SmiToFloat64(lhs));
+ var_rhs_value.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
+ });
}
BIND(&if_lhsisheapobject);
{
// Check if the {rhs} is a Smi.
- Label if_rhsissmi(this), if_rhsisheapobject(this);
- Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisheapobject);
-
- BIND(&if_rhsissmi);
- {
- // Since {rhs} is a Smi, the comparison can only yield true
- // iff the {lhs} is a HeapNumber with the same float64 value.
- GotoIfNot(IsHeapNumber(lhs), if_false);
- var_lhs_value.Bind(LoadHeapNumberValue(lhs));
- var_rhs_value.Bind(SmiToFloat64(rhs));
- Goto(&do_fcmp);
- }
-
- BIND(&if_rhsisheapobject);
- {
- // Now this can only yield true if either both {lhs} and {rhs} are
- // HeapNumbers with the same value, or both are Strings with the same
- // character sequence, or both are BigInts with the same value.
- Label if_lhsisheapnumber(this), if_lhsisstring(this),
- if_lhsisbigint(this);
- Node* const lhs_map = LoadMap(lhs);
- GotoIf(IsHeapNumberMap(lhs_map), &if_lhsisheapnumber);
- Node* const lhs_instance_type = LoadMapInstanceType(lhs_map);
- GotoIf(IsStringInstanceType(lhs_instance_type), &if_lhsisstring);
- Branch(IsBigIntInstanceType(lhs_instance_type), &if_lhsisbigint,
- if_false);
-
- BIND(&if_lhsisheapnumber);
- {
- GotoIfNot(IsHeapNumber(rhs), if_false);
- var_lhs_value.Bind(LoadHeapNumberValue(lhs));
- var_rhs_value.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fcmp);
- }
-
- BIND(&if_lhsisstring);
- {
- // Now we can only yield true if {rhs} is also a String
- // with the same sequence of characters.
- GotoIfNot(IsString(rhs), if_false);
- Node* const result =
- CallBuiltin(Builtins::kStringEqual, NoContextConstant(), lhs, rhs);
- Branch(IsTrue(result), if_true, if_false);
- }
-
- BIND(&if_lhsisbigint);
- {
- GotoIfNot(IsBigInt(rhs), if_false);
- Node* const result = CallRuntime(Runtime::kBigIntEqualToBigInt,
- NoContextConstant(), lhs, rhs);
- Branch(IsTrue(result), if_true, if_false);
- }
- }
+ Branch(TaggedIsSmi(rhs),
+ [&] {
+ // Since {rhs} is a Smi, the comparison can only yield true
+ // iff the {lhs} is a HeapNumber with the same float64 value.
+ GotoIfNot(IsHeapNumber(lhs), if_false);
+ var_lhs_value.Bind(LoadHeapNumberValue(lhs));
+ var_rhs_value.Bind(SmiToFloat64(rhs));
+ Goto(&do_fcmp);
+ },
+ [&] {
+ // Now this can only yield true if either both {lhs} and {rhs} are
+ // HeapNumbers with the same value, or both are Strings with the
+ // same character sequence, or both are BigInts with the same
+ // value.
+ Label if_lhsisheapnumber(this), if_lhsisstring(this),
+ if_lhsisbigint(this);
+ Node* const lhs_map = LoadMap(lhs);
+ GotoIf(IsHeapNumberMap(lhs_map), &if_lhsisheapnumber);
+ Node* const lhs_instance_type = LoadMapInstanceType(lhs_map);
+ GotoIf(IsStringInstanceType(lhs_instance_type), &if_lhsisstring);
+ Branch(IsBigIntInstanceType(lhs_instance_type), &if_lhsisbigint,
+ if_false);
+
+ BIND(&if_lhsisheapnumber);
+ {
+ GotoIfNot(IsHeapNumber(rhs), if_false);
+ var_lhs_value.Bind(LoadHeapNumberValue(lhs));
+ var_rhs_value.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
+ }
+
+ BIND(&if_lhsisstring);
+ {
+ // Now we can only yield true if {rhs} is also a String
+ // with the same sequence of characters.
+ GotoIfNot(IsString(rhs), if_false);
+ Node* const result = CallBuiltin(Builtins::kStringEqual,
+ NoContextConstant(), lhs, rhs);
+ Branch(IsTrue(result), if_true, if_false);
+ }
+
+ BIND(&if_lhsisbigint);
+ {
+ GotoIfNot(IsBigInt(rhs), if_false);
+ Node* const result = CallRuntime(Runtime::kBigIntEqualToBigInt,
+ NoContextConstant(), lhs, rhs);
+ Branch(IsTrue(result), if_true, if_false);
+ }
+ });
}
BIND(&do_fcmp);
@@ -11181,9 +11449,9 @@ void CodeStubAssembler::BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true,
}
}
-TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<HeapObject> object,
+TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<Context> context,
+ SloppyTNode<Object> object,
SloppyTNode<Object> key,
- SloppyTNode<Context> context,
HasPropertyLookupMode mode) {
Label call_runtime(this, Label::kDeferred), return_true(this),
return_false(this), end(this), if_proxy(this, Label::kDeferred);
@@ -11715,8 +11983,8 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
Node* native_context = LoadNativeContext(context);
Node* length = SmiConstant(2);
int const elements_size = FixedArray::SizeFor(2);
- Node* elements =
- Allocate(elements_size + JSArray::kSize + JSIteratorResult::kSize);
+ TNode<FixedArray> elements = UncheckedCast<FixedArray>(
+ Allocate(elements_size + JSArray::kSize + JSIteratorResult::kSize));
StoreObjectFieldRoot(elements, FixedArray::kMapOffset,
Heap::kFixedArrayMapRootIndex);
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
@@ -11761,6 +12029,23 @@ Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) {
return IsSetWord32<JSArrayBuffer::WasNeutered>(buffer_bit_field);
}
+void CodeStubAssembler::ThrowIfArrayBufferIsDetached(
+ SloppyTNode<Context> context, TNode<JSArrayBuffer> array_buffer,
+ const char* method_name) {
+ Label if_detached(this, Label::kDeferred), if_not_detached(this);
+ Branch(IsDetachedBuffer(array_buffer), &if_detached, &if_not_detached);
+ BIND(&if_detached);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
+ BIND(&if_not_detached);
+}
+
+void CodeStubAssembler::ThrowIfArrayBufferViewBufferIsDetached(
+ SloppyTNode<Context> context, TNode<JSArrayBufferView> array_buffer_view,
+ const char* method_name) {
+ TNode<JSArrayBuffer> buffer = LoadArrayBufferViewBuffer(array_buffer_view);
+ ThrowIfArrayBufferIsDetached(context, buffer, method_name);
+}
+
TNode<JSArrayBuffer> CodeStubAssembler::LoadArrayBufferViewBuffer(
TNode<JSArrayBufferView> array_buffer_view) {
return LoadObjectField<JSArrayBuffer>(array_buffer_view,
@@ -11959,6 +12244,13 @@ Node* CodeStubAssembler::IsDebugActive() {
return Word32NotEqual(is_debug_active, Int32Constant(0));
}
+TNode<BoolT> CodeStubAssembler::IsRuntimeCallStatsEnabled() {
+ TNode<Word32T> flag_value = UncheckedCast<Word32T>(Load(
+ MachineType::Int32(),
+ ExternalConstant(ExternalReference::address_of_runtime_stats_flag())));
+ return Word32NotEqual(flag_value, Int32Constant(0));
+}
+
Node* CodeStubAssembler::IsPromiseHookEnabled() {
Node* const promise_hook = Load(
MachineType::Pointer(),
@@ -12191,9 +12483,8 @@ Node* CodeStubAssembler::CheckEnumCache(Node* receiver, Label* if_empty,
{
// Avoid runtime-call for empty dictionary receivers.
GotoIfNot(IsDictionaryMap(receiver_map), if_runtime);
- Node* properties = LoadSlowProperties(receiver);
- Node* length = LoadFixedArrayElement(
- properties, NameDictionary::kNumberOfElementsIndex);
+ TNode<NameDictionary> properties = CAST(LoadSlowProperties(receiver));
+ TNode<Smi> length = GetNumberOfElements(properties);
GotoIfNot(WordEqual(length, SmiConstant(0)), if_runtime);
// Check that there are no elements on the {receiver} and its prototype
// chain. Given that we do not create an EnumCache for dict-mode objects,
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index 3d7859f064..51ed647412 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -11,6 +11,7 @@
#include "src/compiler/code-assembler.h"
#include "src/globals.h"
#include "src/objects.h"
+#include "src/objects/bigint.h"
#include "src/roots.h"
namespace v8 {
@@ -46,7 +47,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(EmptySlowElementDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
V(empty_string, empty_string, EmptyString) \
- V(EmptyWeakCell, empty_weak_cell, EmptyWeakCell) \
V(FalseValue, false_value, False) \
V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \
V(FixedArrayMap, fixed_array_map, FixedArrayMap) \
@@ -85,7 +85,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
uncompiled_data_with_pre_parsed_scope_map, \
UncompiledDataWithPreParsedScopeMap) \
V(UndefinedValue, undefined_value, Undefined) \
- V(WeakCellMap, weak_cell_map, WeakCellMap) \
V(WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArrayMap)
#define HEAP_IMMOVABLE_OBJECT_LIST(V) \
@@ -103,12 +102,16 @@ struct IteratorRecord {
compiler::TNode<Object> next;
};
+#ifdef DEBUG
#define CSA_CHECK(csa, x) \
(csa)->Check( \
[&]() -> compiler::Node* { \
return implicit_cast<compiler::SloppyTNode<Word32T>>(x); \
}, \
#x, __FILE__, __LINE__)
+#else
+#define CSA_CHECK(csa, x) (csa)->FastCheck(x)
+#endif
#ifdef DEBUG
// Add stringified versions to the given values, except the first. That is,
@@ -305,9 +308,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return UncheckedCast<HeapObject>(value);
}
- TNode<JSArray> TaggedToJSArray(TNode<Object> value, Label* fail) {
- GotoIf(TaggedIsSmi(value), fail);
- TNode<HeapObject> heap_object = CAST(value);
+ TNode<JSArray> HeapObjectToJSArray(TNode<HeapObject> heap_object,
+ Label* fail) {
GotoIfNot(IsJSArray(heap_object), fail);
return UncheckedCast<JSArray>(heap_object);
}
@@ -320,18 +322,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return UncheckedCast<JSArray>(heap_object);
}
- TNode<JSDataView> TaggedToJSDataView(TNode<Object> value, Label* fail) {
- GotoIf(TaggedIsSmi(value), fail);
- TNode<HeapObject> heap_object = CAST(value);
+ TNode<JSDataView> HeapObjectToJSDataView(TNode<HeapObject> heap_object,
+ Label* fail) {
GotoIfNot(IsJSDataView(heap_object), fail);
- return UncheckedCast<JSDataView>(heap_object);
+ return CAST(heap_object);
}
- TNode<JSReceiver> TaggedToCallable(TNode<Object> value, Label* fail) {
- GotoIf(TaggedIsSmi(value), fail);
- TNode<HeapObject> result = UncheckedCast<HeapObject>(value);
- GotoIfNot(IsCallable(result), fail);
- return CAST(result);
+ TNode<JSReceiver> HeapObjectToCallable(TNode<HeapObject> heap_object,
+ Label* fail) {
+ GotoIfNot(IsCallable(heap_object), fail);
+ return CAST(heap_object);
}
TNode<HeapNumber> UnsafeCastNumberToHeapNumber(TNode<Number> p_n) {
@@ -382,6 +382,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return p_o;
}
+ TNode<Object> UnsafeCastObjectToLoadFn(TNode<Object> p_o) { return p_o; }
+ TNode<Object> UnsafeCastObjectToStoreFn(TNode<Object> p_o) { return p_o; }
+ TNode<Object> UnsafeCastObjectToCanUseSameAccessorFn(TNode<Object> p_o) {
+ return p_o;
+ }
+
TNode<NumberDictionary> UnsafeCastObjectToNumberDictionary(
TNode<Object> p_o) {
return CAST(p_o);
@@ -520,6 +526,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
SMI_ARITHMETIC_BINOP(SmiAnd, WordAnd, Word32And)
SMI_ARITHMETIC_BINOP(SmiOr, WordOr, Word32Or)
#undef SMI_ARITHMETIC_BINOP
+ TNode<Smi> SmiInc(TNode<Smi> value) { return SmiAdd(value, SmiConstant(1)); }
TNode<Smi> TrySmiAdd(TNode<Smi> a, TNode<Smi> b, Label* if_overflow);
TNode<Smi> TrySmiSub(TNode<Smi> a, TNode<Smi> b, Label* if_overflow);
@@ -641,6 +648,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* extra_node3 = nullptr, const char* extra_node3_name = "",
Node* extra_node4 = nullptr, const char* extra_node4_name = "",
Node* extra_node5 = nullptr, const char* extra_node5_name = "");
+ void FastCheck(TNode<BoolT> condition);
// The following Call wrappers call an object according to the semantics that
// one finds in the EcmaScript spec, operating on an Callable (e.g. a
@@ -746,7 +754,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void BranchIfJSReceiver(Node* object, Label* if_true, Label* if_false);
void BranchIfFastJSArray(Node* object, Node* context, Label* if_true,
- Label* if_false);
+ Label* if_false, bool iteration_only = false);
void BranchIfNotFastJSArray(Node* object, Node* context, Label* if_true,
Label* if_false) {
BranchIfFastJSArray(object, context, if_false, if_true);
@@ -916,10 +924,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* PointerToSeqStringData(Node* seq_string);
// Load value field of a JSValue object.
Node* LoadJSValueValue(Node* object);
- // Load value field of a WeakCell object.
- TNode<Object> LoadWeakCellValueUnchecked(SloppyTNode<HeapObject> weak_cell);
- TNode<Object> LoadWeakCellValue(SloppyTNode<WeakCell> weak_cell,
- Label* if_cleared = nullptr);
// Figures out whether the value of maybe_object is:
// - a SMI (jump to "if_smi", "extracted" will be the SMI value)
@@ -960,6 +964,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<MaybeObject> MakeWeak(TNode<HeapObject> value);
+ void FixedArrayBoundsCheck(TNode<FixedArrayBase> array, Node* index,
+ int additional_offset = 0,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS);
+
// Load an array element from a FixedArray / WeakFixedArray / PropertyArray.
TNode<MaybeObject> LoadArrayElement(
SloppyTNode<HeapObject> object, int array_header_size, Node* index,
@@ -969,11 +977,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Load an array element from a FixedArray.
TNode<Object> LoadFixedArrayElement(
- SloppyTNode<HeapObject> object, Node* index, int additional_offset = 0,
+ TNode<FixedArray> object, Node* index, int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
- TNode<Object> LoadFixedArrayElement(SloppyTNode<HeapObject> object,
+ TNode<Object> LoadFixedArrayElement(TNode<FixedArray> object,
TNode<IntPtrT> index,
LoadSensitivity needs_poisoning) {
return LoadFixedArrayElement(object, index, 0, INTPTR_PARAMETERS,
@@ -981,27 +989,27 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
}
TNode<Object> LoadFixedArrayElement(
- SloppyTNode<HeapObject> object, TNode<IntPtrT> index,
- int additional_offset = 0,
+ TNode<FixedArray> object, TNode<IntPtrT> index, int additional_offset = 0,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
return LoadFixedArrayElement(object, index, additional_offset,
INTPTR_PARAMETERS, needs_poisoning);
}
TNode<Object> LoadFixedArrayElement(
- SloppyTNode<HeapObject> object, int index, int additional_offset = 0,
+ TNode<FixedArray> object, int index, int additional_offset = 0,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
return LoadFixedArrayElement(object, IntPtrConstant(index),
additional_offset, INTPTR_PARAMETERS,
needs_poisoning);
}
- TNode<Object> LoadFixedArrayElement(TNode<HeapObject> object,
+ TNode<Object> LoadFixedArrayElement(TNode<FixedArray> object,
TNode<Smi> index) {
return LoadFixedArrayElement(object, index, 0, SMI_PARAMETERS);
}
TNode<Object> LoadPropertyArrayElement(SloppyTNode<PropertyArray> object,
SloppyTNode<IntPtrT> index);
+ TNode<IntPtrT> LoadPropertyArrayLength(TNode<PropertyArray> object);
// Load an array element from a FixedArray / WeakFixedArray, untag it and
// return it as Word32.
@@ -1048,6 +1056,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
SMI_PARAMETERS);
}
+ // Load an array element from a FixedArray, FixedDoubleArray or a
+ // NumberDictionary (depending on the |elements_kind|) and return
+ // it as a tagged value. Assumes that the |index| passed a length
+ // check before. Bails out to |if_accessor| if the element that
+ // was found is an accessor, or to |if_hole| if the element at
+ // the given |index| is not found in |elements|.
+ TNode<Object> LoadFixedArrayBaseElementAsTagged(
+ TNode<FixedArrayBase> elements, TNode<IntPtrT> index,
+ TNode<Int32T> elements_kind, Label* if_accessor, Label* if_hole);
+
// Load a feedback slot from a FeedbackVector.
TNode<MaybeObject> LoadFeedbackVectorSlot(
Node* object, Node* index, int additional_offset = 0,
@@ -1069,6 +1087,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* LoadFixedTypedArrayElementAsTagged(
Node* data_pointer, Node* index_node, ElementsKind elements_kind,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ TNode<Numeric> LoadFixedTypedArrayElementAsTagged(
+ TNode<WordT> data_pointer, TNode<Smi> index, TNode<Int32T> elements_kind);
// Parts of the above, factored out for readability:
Node* LoadFixedBigInt64ArrayElementAsTagged(Node* data_pointer, Node* offset);
Node* LoadFixedBigUint64ArrayElementAsTagged(Node* data_pointer,
@@ -1107,6 +1127,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Map> LoadJSArrayElementsMap(SloppyTNode<Int32T> kind,
SloppyTNode<Context> native_context);
+ TNode<BoolT> IsGeneratorFunction(TNode<JSFunction> function);
+ TNode<BoolT> HasPrototypeProperty(TNode<JSFunction> function, TNode<Map> map);
+ void GotoIfPrototypeRequiresRuntimeLookup(TNode<JSFunction> function,
+ TNode<Map> map, Label* runtime);
// Load the "prototype" property of a JSFunction.
Node* LoadJSFunctionPrototype(Node* function, Label* if_bailout);
@@ -1137,8 +1161,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* StoreObjectFieldRoot(Node* object, int offset,
Heap::RootListIndex root);
// Store an array element to a FixedArray.
- Node* StoreFixedArrayElement(
- Node* object, int index, Node* value,
+ void StoreFixedArrayElement(
+ TNode<FixedArray> object, int index, SloppyTNode<Object> value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
return StoreFixedArrayElement(object, IntPtrConstant(index), value,
barrier_mode);
@@ -1147,21 +1171,40 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* StoreJSArrayLength(TNode<JSArray> array, TNode<Smi> length);
Node* StoreElements(TNode<Object> object, TNode<FixedArrayBase> elements);
- Node* StoreFixedArrayElement(
- Node* object, Node* index, Node* value,
+ void StoreFixedArrayOrPropertyArrayElement(
+ Node* array, Node* index, Node* value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
- Node* StoreFixedArrayElementSmi(
- TNode<FixedArray> object, TNode<Smi> index, TNode<Object> value,
+ void StoreFixedArrayElement(
+ TNode<FixedArray> array, Node* index, SloppyTNode<Object> value,
+ WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
+ int additional_offset = 0,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS) {
+ FixedArrayBoundsCheck(array, index, additional_offset, parameter_mode);
+ StoreFixedArrayOrPropertyArrayElement(array, index, value, barrier_mode,
+ additional_offset, parameter_mode);
+ }
+
+ void StorePropertyArrayElement(
+ TNode<PropertyArray> array, Node* index, SloppyTNode<Object> value,
+ WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
+ int additional_offset = 0,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS) {
+ StoreFixedArrayOrPropertyArrayElement(array, index, value, barrier_mode,
+ additional_offset, parameter_mode);
+ }
+
+ void StoreFixedArrayElementSmi(
+ TNode<FixedArray> array, TNode<Smi> index, TNode<Object> value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
- return StoreFixedArrayElement(object, index, value, barrier_mode, 0,
- SMI_PARAMETERS);
+ StoreFixedArrayElement(array, index, value, barrier_mode, 0,
+ SMI_PARAMETERS);
}
- Node* StoreFixedDoubleArrayElement(
- Node* object, Node* index, Node* value,
+ void StoreFixedDoubleArrayElement(
+ TNode<FixedDoubleArray> object, Node* index, TNode<Float64T> value,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
Node* StoreFeedbackVectorSlot(
@@ -1356,18 +1399,35 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* capacity = nullptr,
Node* allocation_site = nullptr);
- TNode<FixedArray> AllocateFixedArray(
+ TNode<FixedArrayBase> AllocateFixedArray(
ElementsKind kind, Node* capacity, ParameterMode mode = INTPTR_PARAMETERS,
AllocationFlags flags = kNone,
SloppyTNode<Map> fixed_array_map = nullptr);
- TNode<FixedArray> AllocateFixedArray(
+ TNode<FixedArrayBase> AllocateFixedArray(
ElementsKind kind, TNode<IntPtrT> capacity, AllocationFlags flags,
SloppyTNode<Map> fixed_array_map = nullptr) {
return AllocateFixedArray(kind, capacity, INTPTR_PARAMETERS, flags,
fixed_array_map);
}
+ TNode<FixedArray> AllocateZeroedFixedArray(TNode<IntPtrT> capacity) {
+ TNode<FixedArray> result = UncheckedCast<FixedArray>(
+ AllocateFixedArray(PACKED_ELEMENTS, capacity,
+ AllocationFlag::kAllowLargeObjectAllocation));
+ FillFixedArrayWithSmiZero(result, capacity);
+ return result;
+ }
+
+ TNode<FixedDoubleArray> AllocateZeroedFixedDoubleArray(
+ TNode<IntPtrT> capacity) {
+ TNode<FixedDoubleArray> result = UncheckedCast<FixedDoubleArray>(
+ AllocateFixedArray(FLOAT64_ELEMENTS, capacity,
+ AllocationFlag::kAllowLargeObjectAllocation));
+ FillFixedDoubleArrayWithZero(result, capacity);
+ return result;
+ }
+
Node* AllocatePropertyArray(Node* capacity,
ParameterMode mode = INTPTR_PARAMETERS,
AllocationFlags flags = kNone);
@@ -1388,6 +1448,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Heap::RootListIndex value_root_index,
ParameterMode mode = INTPTR_PARAMETERS);
+ // Uses memset to effectively initialize the given FixedArray with zeroes.
+ void FillFixedArrayWithSmiZero(TNode<FixedArray> array,
+ TNode<IntPtrT> length);
+ void FillFixedDoubleArrayWithZero(TNode<FixedDoubleArray> array,
+ TNode<IntPtrT> length);
+
void FillPropertyArrayWithUndefined(Node* array, Node* from_index,
Node* to_index,
ParameterMode mode = INTPTR_PARAMETERS);
@@ -1440,11 +1506,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
SMI_PARAMETERS);
}
- TNode<FixedArray> ConvertFixedArrayBaseToFixedArray(
- TNode<FixedArrayBase> base, Label* cast_fail);
+ TNode<FixedArray> HeapObjectToFixedArray(TNode<HeapObject> base,
+ Label* cast_fail);
- TNode<FixedDoubleArray> ConvertFixedArrayBaseToFixedDoubleArray(
- TNode<FixedArrayBase> base, Label* cast_fail) {
+ TNode<FixedDoubleArray> HeapObjectToFixedDoubleArray(TNode<HeapObject> base,
+ Label* cast_fail) {
GotoIf(WordNotEqual(LoadMap(base),
LoadRoot(Heap::kFixedDoubleArrayMapRootIndex)),
cast_fail);
@@ -1484,15 +1550,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// passed as the |source| parameter.
// * |parameter_mode| determines the parameter mode of |first|, |count| and
// |capacity|.
- TNode<FixedArray> ExtractFixedArray(
+ TNode<FixedArrayBase> ExtractFixedArray(
Node* source, Node* first, Node* count = nullptr,
Node* capacity = nullptr,
ExtractFixedArrayFlags extract_flags =
ExtractFixedArrayFlag::kAllFixedArrays,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
- TNode<FixedArray> ExtractFixedArray(
- TNode<FixedArray> source, TNode<Smi> first, TNode<Smi> count,
+ TNode<FixedArrayBase> ExtractFixedArray(
+ TNode<FixedArrayBase> source, TNode<Smi> first, TNode<Smi> count,
TNode<Smi> capacity,
ExtractFixedArrayFlags extract_flags =
ExtractFixedArrayFlag::kAllFixedArrays) {
@@ -1588,9 +1654,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* TruncateHeapNumberValueToWord32(Node* object);
// Conversions.
+ void TryHeapNumberToSmi(TNode<HeapNumber> number, TVariable<Smi>& output,
+ Label* if_smi);
+ void TryFloat64ToSmi(TNode<Float64T> number, TVariable<Smi>& output,
+ Label* if_smi);
TNode<Number> ChangeFloat64ToTagged(SloppyTNode<Float64T> value);
TNode<Number> ChangeInt32ToTagged(SloppyTNode<Int32T> value);
TNode<Number> ChangeUint32ToTagged(SloppyTNode<Uint32T> value);
+ TNode<Uint32T> ChangeNumberToUint32(TNode<Number> value);
TNode<Float64T> ChangeNumberToFloat64(SloppyTNode<Number> value);
TNode<UintPtrT> ChangeNonnegativeNumberToUintPtr(TNode<Number> value);
@@ -1600,7 +1671,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Variable* var_numeric,
Variable* var_feedback);
- SloppyTNode<WordT> TimesPointerSize(Node* value);
+ TNode<WordT> TimesPointerSize(SloppyTNode<WordT> value);
+ TNode<IntPtrT> TimesPointerSize(TNode<IntPtrT> value) {
+ return Signed(TimesPointerSize(implicit_cast<TNode<WordT>>(value)));
+ }
+ TNode<UintPtrT> TimesPointerSize(TNode<UintPtrT> value) {
+ return Unsigned(TimesPointerSize(implicit_cast<TNode<WordT>>(value)));
+ }
+ TNode<WordT> TimesDoubleSize(SloppyTNode<WordT> value);
+ TNode<UintPtrT> TimesDoubleSize(TNode<UintPtrT> value) {
+ return Unsigned(TimesDoubleSize(implicit_cast<TNode<WordT>>(value)));
+ }
+ TNode<IntPtrT> TimesDoubleSize(TNode<IntPtrT> value) {
+ return Signed(TimesDoubleSize(implicit_cast<TNode<WordT>>(value)));
+ }
// Type conversions.
// Throws a TypeError for {method_name} if {value} is not coercible to Object,
@@ -1659,9 +1743,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<BoolT> IsExternalStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsFastJSArray(SloppyTNode<Object> object,
SloppyTNode<Context> context);
- TNode<BoolT> IsFastJSArrayWithNoCustomIteration(
- TNode<Object> object, TNode<Context> context,
- TNode<Context> native_context);
+ TNode<BoolT> IsFastJSArrayWithNoCustomIteration(TNode<Object> object,
+ TNode<Context> context);
TNode<BoolT> IsFeedbackCell(SloppyTNode<HeapObject> object);
TNode<BoolT> IsFeedbackVector(SloppyTNode<HeapObject> object);
TNode<BoolT> IsContext(SloppyTNode<HeapObject> object);
@@ -1739,7 +1822,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<BoolT> IsSymbolInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsSymbol(SloppyTNode<HeapObject> object);
TNode<BoolT> IsUndetectableMap(SloppyTNode<Map> map);
- TNode<BoolT> IsWeakCell(SloppyTNode<HeapObject> object);
TNode<BoolT> IsNotWeakFixedArraySubclass(SloppyTNode<HeapObject> object);
TNode<BoolT> IsZeroOrContext(SloppyTNode<Object> object);
@@ -1795,6 +1877,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
bool IsFastElementsKind(ElementsKind kind) {
return v8::internal::IsFastElementsKind(kind);
}
+ TNode<BoolT> IsDictionaryElementsKind(TNode<Int32T> elements_kind) {
+ return ElementsKindEqual(elements_kind, Int32Constant(DICTIONARY_ELEMENTS));
+ }
TNode<BoolT> IsDoubleElementsKind(TNode<Int32T> elements_kind);
bool IsDoubleElementsKind(ElementsKind kind) {
return v8::internal::IsDoubleElementsKind(kind);
@@ -1890,6 +1975,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<JSReceiver> ToObject(SloppyTNode<Context> context,
SloppyTNode<Object> input);
+ // Same as ToObject but avoids the Builtin call if |input| is already a
+ // JSReceiver.
+ TNode<JSReceiver> ToObject_Inline(TNode<Context> context,
+ TNode<Object> input);
+
enum ToIntegerTruncationMode {
kNoTruncation,
kTruncateMinusZero,
@@ -1981,6 +2071,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Int32Constant(0));
}
+ // Returns true if all of the mask's bits in a given |word32| are set.
+ TNode<BoolT> IsAllSetWord32(SloppyTNode<Word32T> word32, uint32_t mask) {
+ TNode<Int32T> const_mask = Int32Constant(mask);
+ return Word32Equal(Word32And(word32, const_mask), const_mask);
+ }
+
// Returns true if any of the |T|'s bits in given |word| are set.
template <typename T>
TNode<BoolT> IsSetWord(SloppyTNode<WordT> word) {
@@ -2078,8 +2174,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
const int kKeyToDetailsOffset =
(ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) *
kPointerSize;
- return Unsigned(LoadAndUntagToWord32FixedArrayElement(container, key_index,
- kKeyToDetailsOffset));
+ return Unsigned(LoadAndUntagToWord32FixedArrayElement(
+ CAST(container), key_index, kKeyToDetailsOffset));
}
// Loads the value for the entry with the given key_index.
@@ -2091,8 +2187,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
const int kKeyToValueOffset =
(ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) *
kPointerSize;
- return UncheckedCast<Object>(
- LoadFixedArrayElement(container, key_index, kKeyToValueOffset));
+ return LoadFixedArrayElement(CAST(container), key_index, kKeyToValueOffset);
}
TNode<Uint32T> LoadDetailsByKeyIndex(TNode<DescriptorArray> container,
@@ -2105,7 +2200,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Stores the details for the entry with the given key_index.
// |details| must be a Smi.
template <class ContainerType>
- void StoreDetailsByKeyIndex(Node* container, Node* key_index, Node* details) {
+ void StoreDetailsByKeyIndex(TNode<ContainerType> container,
+ TNode<IntPtrT> key_index, TNode<Smi> details) {
const int kKeyToDetailsOffset =
(ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) *
kPointerSize;
@@ -2116,7 +2212,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Stores the value for the entry with the given key_index.
template <class ContainerType>
void StoreValueByKeyIndex(
- Node* container, Node* key_index, Node* value,
+ TNode<ContainerType> container, TNode<IntPtrT> key_index,
+ TNode<Object> value,
WriteBarrierMode write_barrier = UPDATE_WRITE_BARRIER) {
const int kKeyToValueOffset =
(ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) *
@@ -2250,8 +2347,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Object> GetProperty(SloppyTNode<Context> context,
SloppyTNode<Object> receiver,
SloppyTNode<Object> name) {
- return CallStub(Builtins::CallableFor(isolate(), Builtins::kGetProperty),
- context, receiver, name);
+ return CallBuiltin(Builtins::kGetProperty, context, receiver, name);
+ }
+
+ TNode<Object> SetPropertyStrict(TNode<Context> context,
+ TNode<Object> receiver, TNode<Object> key,
+ TNode<Object> value) {
+ return CallBuiltin(Builtins::kSetProperty, context, receiver, key, value);
}
Node* GetMethod(Node* context, Node* object, Handle<Name> name,
@@ -2461,8 +2563,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Store a weak in-place reference into the FeedbackVector.
TNode<MaybeObject> StoreWeakReferenceInFeedbackVector(
- SloppyTNode<FeedbackVector> feedback_vector, SloppyTNode<IntPtrT> slot,
- TNode<HeapObject> value);
+ SloppyTNode<FeedbackVector> feedback_vector, Node* slot,
+ SloppyTNode<HeapObject> value, int additional_offset = 0,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS);
// Create a new AllocationSite and install it into a feedback vector.
TNode<AllocationSite> CreateAllocationSiteInFeedbackVector(
@@ -2590,9 +2693,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
enum HasPropertyLookupMode { kHasProperty, kForInHasProperty };
- TNode<Oddball> HasProperty(SloppyTNode<HeapObject> object,
+ TNode<Oddball> HasProperty(SloppyTNode<Context> context,
+ SloppyTNode<Object> object,
SloppyTNode<Object> key,
- SloppyTNode<Context> context,
HasPropertyLookupMode mode);
Node* Typeof(Node* value);
@@ -2609,8 +2712,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Debug helpers
Node* IsDebugActive();
+ TNode<BoolT> IsRuntimeCallStatsEnabled();
+
// TypedArray/ArrayBuffer helpers
Node* IsDetachedBuffer(Node* buffer);
+ void ThrowIfArrayBufferIsDetached(SloppyTNode<Context> context,
+ TNode<JSArrayBuffer> array_buffer,
+ const char* method_name);
+ void ThrowIfArrayBufferViewBufferIsDetached(
+ SloppyTNode<Context> context, TNode<JSArrayBufferView> array_buffer_view,
+ const char* method_name);
TNode<JSArrayBuffer> LoadArrayBufferViewBuffer(
TNode<JSArrayBufferView> array_buffer_view);
TNode<RawPtrT> LoadArrayBufferBackingStore(TNode<JSArrayBuffer> array_buffer);
@@ -2945,13 +3056,13 @@ class ToDirectStringAssembler : public CodeStubAssembler {
// Returns a pointer to the beginning of the string data.
// Jumps to if_bailout if the external string cannot be unpacked.
- Node* PointerToData(Label* if_bailout) {
+ TNode<RawPtrT> PointerToData(Label* if_bailout) {
return TryToSequential(PTR_TO_DATA, if_bailout);
}
// Returns a pointer that, offset-wise, looks like a String.
// Jumps to if_bailout if the external string cannot be unpacked.
- Node* PointerToString(Label* if_bailout) {
+ TNode<RawPtrT> PointerToString(Label* if_bailout) {
return TryToSequential(PTR_TO_STRING, if_bailout);
}
@@ -2963,7 +3074,7 @@ class ToDirectStringAssembler : public CodeStubAssembler {
Node* is_external() { return var_is_external_.value(); }
private:
- Node* TryToSequential(StringPointerKind ptr_kind, Label* if_bailout);
+ TNode<RawPtrT> TryToSequential(StringPointerKind ptr_kind, Label* if_bailout);
Variable var_string_;
Variable var_instance_type_;
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index f887e2e89e..6bbcefa781 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -71,7 +71,7 @@ const double kMaxIdleTimeToExpectInMs = 40;
class MemoryPressureTask : public CancelableTask {
public:
- MemoryPressureTask(Isolate* isolate, CancelableTaskManager* task_manager,
+ MemoryPressureTask(CancelableTaskManager* task_manager,
CompilerDispatcher* dispatcher);
~MemoryPressureTask() override;
@@ -84,8 +84,7 @@ class MemoryPressureTask : public CancelableTask {
DISALLOW_COPY_AND_ASSIGN(MemoryPressureTask);
};
-MemoryPressureTask::MemoryPressureTask(Isolate* isolate,
- CancelableTaskManager* task_manager,
+MemoryPressureTask::MemoryPressureTask(CancelableTaskManager* task_manager,
CompilerDispatcher* dispatcher)
: CancelableTask(task_manager), dispatcher_(dispatcher) {}
@@ -99,7 +98,7 @@ void MemoryPressureTask::RunInternal() {
class CompilerDispatcher::AbortTask : public CancelableTask {
public:
- AbortTask(Isolate* isolate, CancelableTaskManager* task_manager,
+ AbortTask(CancelableTaskManager* task_manager,
CompilerDispatcher* dispatcher);
~AbortTask() override;
@@ -112,8 +111,7 @@ class CompilerDispatcher::AbortTask : public CancelableTask {
DISALLOW_COPY_AND_ASSIGN(AbortTask);
};
-CompilerDispatcher::AbortTask::AbortTask(Isolate* isolate,
- CancelableTaskManager* task_manager,
+CompilerDispatcher::AbortTask::AbortTask(CancelableTaskManager* task_manager,
CompilerDispatcher* dispatcher)
: CancelableTask(task_manager), dispatcher_(dispatcher) {}
@@ -125,7 +123,7 @@ void CompilerDispatcher::AbortTask::RunInternal() {
class CompilerDispatcher::WorkerTask : public CancelableTask {
public:
- WorkerTask(Isolate* isolate, CancelableTaskManager* task_manager,
+ WorkerTask(CancelableTaskManager* task_manager,
CompilerDispatcher* dispatcher);
~WorkerTask() override;
@@ -138,8 +136,7 @@ class CompilerDispatcher::WorkerTask : public CancelableTask {
DISALLOW_COPY_AND_ASSIGN(WorkerTask);
};
-CompilerDispatcher::WorkerTask::WorkerTask(Isolate* isolate,
- CancelableTaskManager* task_manager,
+CompilerDispatcher::WorkerTask::WorkerTask(CancelableTaskManager* task_manager,
CompilerDispatcher* dispatcher)
: CancelableTask(task_manager), dispatcher_(dispatcher) {}
@@ -151,8 +148,7 @@ void CompilerDispatcher::WorkerTask::RunInternal() {
class CompilerDispatcher::IdleTask : public CancelableIdleTask {
public:
- IdleTask(Isolate* isolate, CancelableTaskManager* task_manager,
- CompilerDispatcher* dispatcher);
+ IdleTask(CancelableTaskManager* task_manager, CompilerDispatcher* dispatcher);
~IdleTask() override;
// CancelableIdleTask implementation.
@@ -164,8 +160,7 @@ class CompilerDispatcher::IdleTask : public CancelableIdleTask {
DISALLOW_COPY_AND_ASSIGN(IdleTask);
};
-CompilerDispatcher::IdleTask::IdleTask(Isolate* isolate,
- CancelableTaskManager* task_manager,
+CompilerDispatcher::IdleTask::IdleTask(CancelableTaskManager* task_manager,
CompilerDispatcher* dispatcher)
: CancelableIdleTask(task_manager), dispatcher_(dispatcher) {}
@@ -465,7 +460,7 @@ void CompilerDispatcher::MemoryPressureNotification(
}
platform_->CallOnForegroundThread(
reinterpret_cast<v8::Isolate*>(isolate_),
- new MemoryPressureTask(isolate_, task_manager_.get(), this));
+ new MemoryPressureTask(task_manager_.get(), this));
}
}
@@ -490,7 +485,7 @@ void CompilerDispatcher::ScheduleIdleTaskFromAnyThread() {
idle_task_scheduled_ = true;
}
platform_->CallIdleOnForegroundThread(
- v8_isolate, new IdleTask(isolate_, task_manager_.get(), this));
+ v8_isolate, new IdleTask(task_manager_.get(), this));
}
void CompilerDispatcher::ScheduleIdleTaskIfNeeded() {
@@ -500,8 +495,8 @@ void CompilerDispatcher::ScheduleIdleTaskIfNeeded() {
void CompilerDispatcher::ScheduleAbortTask() {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
- platform_->CallOnForegroundThread(
- v8_isolate, new AbortTask(isolate_, task_manager_.get(), this));
+ platform_->CallOnForegroundThread(v8_isolate,
+ new AbortTask(task_manager_.get(), this));
}
void CompilerDispatcher::ConsiderJobForBackgroundProcessing(
@@ -526,7 +521,7 @@ void CompilerDispatcher::ScheduleMoreWorkerTasksIfNeeded() {
++num_worker_tasks_;
}
platform_->CallOnWorkerThread(
- base::make_unique<WorkerTask>(isolate_, task_manager_.get(), this));
+ base::make_unique<WorkerTask>(task_manager_.get(), this));
}
void CompilerDispatcher::DoBackgroundWork() {
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 6c5211b74d..58c1099135 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -7,7 +7,7 @@
#include <algorithm>
#include <memory>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/asmjs/asm-js.h"
#include "src/assembler-inl.h"
#include "src/ast/prettyprinter.h"
@@ -130,11 +130,7 @@ void LogFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
// Implementation of UnoptimizedCompilationJob
CompilationJob::Status UnoptimizedCompilationJob::ExecuteJob() {
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
- DisallowCodeDependencyChange no_dependency_change;
-
+ DisallowHeapAccess no_heap_access;
// Delegate to the underlying implementation.
DCHECK_EQ(state(), State::kReadyToExecute);
ScopedTimer t(&time_taken_to_execute_);
@@ -212,11 +208,7 @@ CompilationJob::Status OptimizedCompilationJob::PrepareJob(Isolate* isolate) {
}
CompilationJob::Status OptimizedCompilationJob::ExecuteJob() {
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
- DisallowCodeDependencyChange no_dependency_change;
-
+ DisallowHeapAccess no_heap_access;
// Delegate to the underlying implementation.
DCHECK_EQ(state(), State::kReadyToExecute);
ScopedTimer t(&time_taken_to_execute_);
@@ -478,9 +470,7 @@ std::unique_ptr<UnoptimizedCompilationJob> ExecuteUnoptimizedCompileJobs(
std::unique_ptr<UnoptimizedCompilationJob> GenerateUnoptimizedCode(
ParseInfo* parse_info, AccountingAllocator* allocator,
UnoptimizedCompilationJobList* inner_function_jobs) {
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
+ DisallowHeapAccess no_heap_access;
DCHECK(inner_function_jobs->empty());
if (!Compiler::Analyze(parse_info)) {
@@ -922,9 +912,7 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(ParseInfo* parse_info,
std::unique_ptr<UnoptimizedCompilationJob> CompileTopLevelOnBackgroundThread(
ParseInfo* parse_info, AccountingAllocator* allocator,
UnoptimizedCompilationJobList* inner_function_jobs) {
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
+ DisallowHeapAccess no_heap_access;
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileCodeBackground");
RuntimeCallTimerScope runtimeTimer(
@@ -1003,9 +991,7 @@ BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* source,
void BackgroundCompileTask::Run() {
TimedHistogramScope timer(timer_);
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
+ DisallowHeapAccess no_heap_access;
source_->info->set_on_background_thread(true);
@@ -1397,6 +1383,7 @@ struct ScriptCompileTimerScope {
kNoCacheBecausePacScript,
kNoCacheBecauseInDocumentWrite,
kNoCacheBecauseResourceWithNoCacheHandler,
+ kHitIsolateCacheWhenStreamingSource,
kCount
};
@@ -1469,8 +1456,9 @@ struct ScriptCompileTimerScope {
}
if (hit_isolate_cache_) {
- // There's probably no need to distinguish the different isolate cache
- // hits.
+ if (no_cache_reason_ == ScriptCompiler::kNoCacheBecauseStreamingSource) {
+ return CacheBehaviour::kHitIsolateCacheWhenStreamingSource;
+ }
return CacheBehaviour::kHitIsolateCacheWhenNoCache;
}
@@ -1524,6 +1512,7 @@ struct ScriptCompileTimerScope {
return isolate_->counters()->compile_script_with_produce_cache();
case CacheBehaviour::kHitIsolateCacheWhenNoCache:
case CacheBehaviour::kHitIsolateCacheWhenConsumeCodeCache:
+ case CacheBehaviour::kHitIsolateCacheWhenStreamingSource:
return isolate_->counters()->compile_script_with_isolate_cache_hit();
case CacheBehaviour::kConsumeCodeCacheFailed:
return isolate_->counters()->compile_script_consume_failed();
@@ -1595,11 +1584,11 @@ Handle<Script> NewScript(Isolate* isolate, ParseInfo* parse_info,
} // namespace
MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
- Handle<String> source, const Compiler::ScriptDetails& script_details,
+ Isolate* isolate, Handle<String> source,
+ const Compiler::ScriptDetails& script_details,
ScriptOriginOptions origin_options, v8::Extension* extension,
ScriptData* cached_data, ScriptCompiler::CompileOptions compile_options,
ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
- Isolate* isolate = source->GetIsolate();
ScriptCompileTimerScope compile_timer(isolate, no_cache_reason);
if (compile_options == ScriptCompiler::kNoCompileOptions ||
@@ -1621,8 +1610,7 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
MaybeHandle<SharedFunctionInfo> maybe_result;
if (extension == nullptr) {
bool can_consume_code_cache =
- compile_options == ScriptCompiler::kConsumeCodeCache &&
- !isolate->debug()->is_active();
+ compile_options == ScriptCompiler::kConsumeCodeCache;
if (can_consume_code_cache) {
compile_timer.set_consuming_code_cache();
}
@@ -1666,7 +1654,6 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
natives);
// Compile the function and add it to the isolate cache.
- Zone compile_zone(isolate->allocator(), ZONE_NAME);
if (origin_options.IsModule()) parse_info.set_module();
parse_info.set_extension(extension);
parse_info.set_eager(compile_options == ScriptCompiler::kEagerCompile);
@@ -1712,8 +1699,7 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
MaybeHandle<SharedFunctionInfo> maybe_result;
bool can_consume_code_cache =
- compile_options == ScriptCompiler::kConsumeCodeCache &&
- !isolate->debug()->is_active();
+ compile_options == ScriptCompiler::kConsumeCodeCache;
if (can_consume_code_cache) {
compile_timer.set_consuming_code_cache();
// Then check cached code provided by embedder.
@@ -1775,9 +1761,9 @@ ScriptCompiler::ScriptStreamingTask* Compiler::NewBackgroundCompileTask(
MaybeHandle<SharedFunctionInfo>
Compiler::GetSharedFunctionInfoForStreamedScript(
- Handle<String> source, const ScriptDetails& script_details,
- ScriptOriginOptions origin_options, ScriptStreamingData* streaming_data) {
- Isolate* isolate = source->GetIsolate();
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details, ScriptOriginOptions origin_options,
+ ScriptStreamingData* streaming_data) {
ScriptCompileTimerScope compile_timer(
isolate, ScriptCompiler::kNoCacheBecauseStreamingSource);
PostponeInterruptsScope postpone(isolate);
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 4789759dfc..b3a5b73997 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -139,9 +139,10 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// Create a shared function info object for a String source.
static MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
- Handle<String> source, const ScriptDetails& script_details,
- ScriptOriginOptions origin_options, v8::Extension* extension,
- ScriptData* cached_data, ScriptCompiler::CompileOptions compile_options,
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details, ScriptOriginOptions origin_options,
+ v8::Extension* extension, ScriptData* cached_data,
+ ScriptCompiler::CompileOptions compile_options,
ScriptCompiler::NoCacheReason no_cache_reason,
NativesFlag is_natives_code);
@@ -151,8 +152,9 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// have been released, however the object itself isn't freed and is still
// owned by the caller.
static MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForStreamedScript(
- Handle<String> source, const ScriptDetails& script_details,
- ScriptOriginOptions origin_options, ScriptStreamingData* streaming_data);
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details, ScriptOriginOptions origin_options,
+ ScriptStreamingData* streaming_data);
// Create a shared function info object for the given function literal
// node (the code may be lazily compiled).
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 0b78795e00..0342a9c950 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -12,6 +12,7 @@
#include "src/objects-inl.h"
#include "src/objects/arguments.h"
#include "src/objects/js-collection.h"
+#include "src/objects/js-generator.h"
#include "src/objects/module.h"
namespace v8 {
@@ -733,13 +734,11 @@ FieldAccess AccessBuilder::ForJSGlobalProxyNativeContext() {
// static
FieldAccess AccessBuilder::ForJSArrayIteratorIteratedObject() {
- FieldAccess access = {kTaggedBase,
- JSArrayIterator::kIteratedObjectOffset,
- Handle<Name>(),
- MaybeHandle<Map>(),
- Type::ReceiverOrUndefined(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSArrayIterator::kIteratedObjectOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Receiver(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 62ed7e7d85..0b7d1a18a1 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -237,7 +237,7 @@ Handle<Cell> PropertyAccessInfo::export_cell() const {
return Handle<Cell>::cast(constant_);
}
-AccessInfoFactory::AccessInfoFactory(const JSHeapBroker* js_heap_broker,
+AccessInfoFactory::AccessInfoFactory(JSHeapBroker* js_heap_broker,
CompilationDependencies* dependencies,
Handle<Context> native_context, Zone* zone)
: js_heap_broker_(js_heap_broker),
@@ -399,9 +399,9 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
dependencies()->DependOnFieldType(MapRef(js_heap_broker(), map),
number);
// Remember the field map, and try to infer a useful type.
- field_type = Type::For(js_heap_broker(),
- descriptors_field_type->AsClass());
- field_map = descriptors_field_type->AsClass();
+ Handle<Map> map(descriptors_field_type->AsClass(), isolate());
+ field_type = Type::For(js_heap_broker(), map);
+ field_map = MaybeHandle<Map>(map);
}
}
*access_info = PropertyAccessInfo::DataField(
@@ -428,10 +428,8 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
DCHECK(map->is_prototype_map());
Handle<PrototypeInfo> proto_info =
Map::GetOrCreatePrototypeInfo(map, isolate());
- DCHECK(proto_info->weak_cell()->IsWeakCell());
Handle<JSModuleNamespace> module_namespace(
- JSModuleNamespace::cast(
- WeakCell::cast(proto_info->weak_cell())->value()),
+ JSModuleNamespace::cast(proto_info->module_namespace()),
isolate());
Handle<Cell> cell(
Cell::cast(module_namespace->module()->exports()->Lookup(
@@ -583,9 +581,9 @@ namespace {
Maybe<ElementsKind> GeneralizeElementsKind(ElementsKind this_kind,
ElementsKind that_kind) {
- if (IsHoleyOrDictionaryElementsKind(this_kind)) {
+ if (IsHoleyElementsKind(this_kind)) {
that_kind = GetHoleyElementsKind(that_kind);
- } else if (IsHoleyOrDictionaryElementsKind(that_kind)) {
+ } else if (IsHoleyElementsKind(that_kind)) {
this_kind = GetHoleyElementsKind(this_kind);
}
if (this_kind == that_kind) return Just(this_kind);
@@ -703,9 +701,9 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
dependencies()->DependOnFieldType(
MapRef(js_heap_broker(), transition_map), number);
// Remember the field map, and try to infer a useful type.
- field_type =
- Type::For(js_heap_broker(), descriptors_field_type->AsClass());
- field_map = descriptors_field_type->AsClass();
+ Handle<Map> map(descriptors_field_type->AsClass(), isolate());
+ field_type = Type::For(js_heap_broker(), map);
+ field_map = MaybeHandle<Map>(map);
}
}
dependencies()->DependOnTransition(MapRef(js_heap_broker(), transition_map));
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index fa737ce0c4..e9890bbb7a 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -140,7 +140,7 @@ class PropertyAccessInfo final {
// Factory class for {ElementAccessInfo}s and {PropertyAccessInfo}s.
class AccessInfoFactory final {
public:
- AccessInfoFactory(const JSHeapBroker* js_heap_broker,
+ AccessInfoFactory(JSHeapBroker* js_heap_broker,
CompilationDependencies* dependencies,
Handle<Context> native_context, Zone* zone);
@@ -169,13 +169,13 @@ class AccessInfoFactory final {
PropertyAccessInfo* access_info);
CompilationDependencies* dependencies() const { return dependencies_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
Factory* factory() const;
Isolate* isolate() const { return isolate_; }
Handle<Context> native_context() const { return native_context_; }
Zone* zone() const { return zone_; }
- const JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const js_heap_broker_;
CompilationDependencies* const dependencies_;
Handle<Context> const native_context_;
Isolate* const isolate_;
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index d129274863..8e1c1ab8f4 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -14,6 +14,7 @@
#include "src/double.h"
#include "src/heap/heap-inl.h"
#include "src/optimized-compilation-info.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -415,6 +416,48 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
__ dmb(ISH); \
} while (0)
+#define ASSEMBLE_ATOMIC64_ARITH_BINOP(instr1, instr2) \
+ do { \
+ Label binop; \
+ __ add(i.TempRegister(0), i.InputRegister(2), i.InputRegister(3)); \
+ __ dmb(ISH); \
+ __ bind(&binop); \
+ __ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0)); \
+ __ instr1(i.TempRegister(1), i.OutputRegister(0), i.InputRegister(0), \
+ SBit::SetCC); \
+ __ instr2(i.TempRegister(2), i.OutputRegister(1), \
+ Operand(i.InputRegister(1))); \
+ DCHECK_EQ(LeaveCC, i.OutputSBit()); \
+ __ strexd(i.TempRegister(3), i.TempRegister(1), i.TempRegister(2), \
+ i.TempRegister(0)); \
+ __ teq(i.TempRegister(3), Operand(0)); \
+ __ b(ne, &binop); \
+ __ dmb(ISH); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr) \
+ do { \
+ Label binop; \
+ __ add(i.TempRegister(0), i.InputRegister(2), i.InputRegister(3)); \
+ __ dmb(ISH); \
+ __ bind(&binop); \
+ __ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0)); \
+ __ instr(i.TempRegister(1), i.OutputRegister(0), \
+ Operand(i.InputRegister(0))); \
+ __ instr(i.TempRegister(2), i.OutputRegister(1), \
+ Operand(i.InputRegister(1))); \
+ __ strexd(i.TempRegister(3), i.TempRegister(1), i.TempRegister(2), \
+ i.TempRegister(0)); \
+ __ teq(i.TempRegister(3), Operand(0)); \
+ __ b(ne, &binop); \
+ __ dmb(ISH); \
+ } while (0)
+
+#define ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op) \
+ if (arch_opcode == kArmWord64AtomicNarrow##op) { \
+ __ mov(i.OutputRegister(1), Operand(0)); \
+ }
+
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
@@ -1148,6 +1191,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmRev:
+ __ rev(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArmClz:
__ clz(i.OutputRegister(), i.InputRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -2623,7 +2670,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kWord32AtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
break;
-
case kWord32AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
break;
@@ -2638,17 +2684,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kWord32AtomicExchangeUint8:
+ case kArmWord64AtomicNarrowExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(ExchangeUint8);
break;
case kWord32AtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kWord32AtomicExchangeUint16:
+ case kArmWord64AtomicNarrowExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(ExchangeUint16);
break;
case kWord32AtomicExchangeWord32:
+ case kArmWord64AtomicNarrowExchangeUint32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex);
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(ExchangeUint32);
break;
case kWord32AtomicCompareExchangeInt8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
@@ -2658,10 +2710,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kWord32AtomicCompareExchangeUint8:
+ case kArmWord64AtomicNarrowCompareExchangeUint8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxtb(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
i.TempRegister(2));
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(CompareExchangeUint8);
break;
case kWord32AtomicCompareExchangeInt16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
@@ -2671,15 +2725,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kWord32AtomicCompareExchangeUint16:
+ case kArmWord64AtomicNarrowCompareExchangeUint16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxth(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
i.TempRegister(2));
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(CompareExchangeUint16);
break;
case kWord32AtomicCompareExchangeWord32:
+ case kArmWord64AtomicNarrowCompareExchangeUint32:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex,
i.InputRegister(2));
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(CompareExchangeUint32);
break;
#define ATOMIC_BINOP_CASE(op, inst) \
case kWord32Atomic##op##Int8: \
@@ -2687,17 +2745,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
case kWord32Atomic##op##Uint8: \
+ case kArmWord64AtomicNarrow##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op##Uint8); \
break; \
case kWord32Atomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
__ sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
case kWord32Atomic##op##Uint16: \
+ case kArmWord64AtomicNarrow##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op##Uint16); \
break; \
case kWord32Atomic##op##Word32: \
+ case kArmWord64AtomicNarrow##op##Uint32: \
ASSEMBLE_ATOMIC_BINOP(ldrex, strex, inst); \
+ ATOMIC_NARROW_OP_CLEAR_HIGH_WORD(op##Uint32); \
break;
ATOMIC_BINOP_CASE(Add, add)
ATOMIC_BINOP_CASE(Sub, sub)
@@ -2705,11 +2769,81 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, orr)
ATOMIC_BINOP_CASE(Xor, eor)
#undef ATOMIC_BINOP_CASE
+ case kArmWord32AtomicPairLoad:
+ __ add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0));
+ __ dmb(ISH);
+ break;
+ case kArmWord32AtomicPairStore: {
+ Label store;
+ __ add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ dmb(ISH);
+ __ bind(&store);
+ __ ldrexd(i.TempRegister(1), i.TempRegister(2), i.TempRegister(0));
+ __ strexd(i.TempRegister(1), i.InputRegister(2), i.InputRegister(3),
+ i.TempRegister(0));
+ __ teq(i.TempRegister(1), Operand(0));
+ __ b(ne, &store);
+ __ dmb(ISH);
+ break;
+ }
+#define ATOMIC_ARITH_BINOP_CASE(op, instr1, instr2) \
+ case kArmWord32AtomicPair##op: { \
+ ASSEMBLE_ATOMIC64_ARITH_BINOP(instr1, instr2); \
+ break; \
+ }
+ ATOMIC_ARITH_BINOP_CASE(Add, add, adc)
+ ATOMIC_ARITH_BINOP_CASE(Sub, sub, sbc)
+#undef ATOMIC_ARITH_BINOP_CASE
+#define ATOMIC_LOGIC_BINOP_CASE(op, instr) \
+ case kArmWord32AtomicPair##op: { \
+ ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr); \
+ break; \
+ }
+ ATOMIC_LOGIC_BINOP_CASE(And, and_)
+ ATOMIC_LOGIC_BINOP_CASE(Or, orr)
+ ATOMIC_LOGIC_BINOP_CASE(Xor, eor)
+ case kArmWord32AtomicPairExchange: {
+ Label exchange;
+ __ add(i.TempRegister(0), i.InputRegister(2), i.InputRegister(3));
+ __ dmb(ISH);
+ __ bind(&exchange);
+ __ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0));
+ __ strexd(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1),
+ i.TempRegister(0));
+ __ teq(i.TempRegister(1), Operand(0));
+ __ b(ne, &exchange);
+ __ dmb(ISH);
+ break;
+ }
+ case kArmWord32AtomicPairCompareExchange: {
+ __ add(i.TempRegister(0), i.InputRegister(4), i.InputRegister(5));
+ Label compareExchange;
+ Label exit;
+ __ dmb(ISH);
+ __ bind(&compareExchange);
+ __ ldrexd(i.OutputRegister(0), i.OutputRegister(1), i.TempRegister(0));
+ __ teq(i.InputRegister(0), Operand(i.OutputRegister(0)));
+ __ b(ne, &exit);
+ __ teq(i.InputRegister(1), Operand(i.OutputRegister(1)));
+ __ b(ne, &exit);
+ __ strexd(i.TempRegister(1), i.InputRegister(2), i.InputRegister(3),
+ i.TempRegister(0));
+ __ teq(i.TempRegister(1), Operand(0));
+ __ b(ne, &compareExchange);
+ __ bind(&exit);
+ __ dmb(ISH);
+ break;
+ }
+#undef ATOMIC_LOGIC_BINOP_CASE
+#undef ATOMIC_NARROW_OP_CLEAR_HIGH_WORD
#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
#undef ASSEMBLE_ATOMIC_STORE_INTEGER
#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_ATOMIC64_ARITH_BINOP
+#undef ASSEMBLE_ATOMIC64_LOGIC_BINOP
#undef ASSEMBLE_IEEE754_BINOP
#undef ASSEMBLE_IEEE754_UNOP
#undef ASSEMBLE_NEON_NARROWING_OP
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index a9f9be38ef..ca8684a375 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -11,264 +11,295 @@ namespace compiler {
// ARM-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(ArmAdd) \
- V(ArmAnd) \
- V(ArmBic) \
- V(ArmClz) \
- V(ArmCmp) \
- V(ArmCmn) \
- V(ArmTst) \
- V(ArmTeq) \
- V(ArmOrr) \
- V(ArmEor) \
- V(ArmSub) \
- V(ArmRsb) \
- V(ArmMul) \
- V(ArmMla) \
- V(ArmMls) \
- V(ArmSmull) \
- V(ArmSmmul) \
- V(ArmSmmla) \
- V(ArmUmull) \
- V(ArmSdiv) \
- V(ArmUdiv) \
- V(ArmMov) \
- V(ArmMvn) \
- V(ArmBfc) \
- V(ArmUbfx) \
- V(ArmSbfx) \
- V(ArmSxtb) \
- V(ArmSxth) \
- V(ArmSxtab) \
- V(ArmSxtah) \
- V(ArmUxtb) \
- V(ArmUxth) \
- V(ArmUxtab) \
- V(ArmRbit) \
- V(ArmUxtah) \
- V(ArmAddPair) \
- V(ArmSubPair) \
- V(ArmMulPair) \
- V(ArmLslPair) \
- V(ArmLsrPair) \
- V(ArmAsrPair) \
- V(ArmVcmpF32) \
- V(ArmVaddF32) \
- V(ArmVsubF32) \
- V(ArmVmulF32) \
- V(ArmVmlaF32) \
- V(ArmVmlsF32) \
- V(ArmVdivF32) \
- V(ArmVabsF32) \
- V(ArmVnegF32) \
- V(ArmVsqrtF32) \
- V(ArmVcmpF64) \
- V(ArmVaddF64) \
- V(ArmVsubF64) \
- V(ArmVmulF64) \
- V(ArmVmlaF64) \
- V(ArmVmlsF64) \
- V(ArmVdivF64) \
- V(ArmVmodF64) \
- V(ArmVabsF64) \
- V(ArmVnegF64) \
- V(ArmVsqrtF64) \
- V(ArmVrintmF32) \
- V(ArmVrintmF64) \
- V(ArmVrintpF32) \
- V(ArmVrintpF64) \
- V(ArmVrintzF32) \
- V(ArmVrintzF64) \
- V(ArmVrintaF64) \
- V(ArmVrintnF32) \
- V(ArmVrintnF64) \
- V(ArmVcvtF32F64) \
- V(ArmVcvtF64F32) \
- V(ArmVcvtF32S32) \
- V(ArmVcvtF32U32) \
- V(ArmVcvtF64S32) \
- V(ArmVcvtF64U32) \
- V(ArmVcvtS32F32) \
- V(ArmVcvtU32F32) \
- V(ArmVcvtS32F64) \
- V(ArmVcvtU32F64) \
- V(ArmVmovU32F32) \
- V(ArmVmovF32U32) \
- V(ArmVmovLowU32F64) \
- V(ArmVmovLowF64U32) \
- V(ArmVmovHighU32F64) \
- V(ArmVmovHighF64U32) \
- V(ArmVmovF64U32U32) \
- V(ArmVmovU32U32F64) \
- V(ArmVldrF32) \
- V(ArmVstrF32) \
- V(ArmVldrF64) \
- V(ArmVld1F64) \
- V(ArmVstrF64) \
- V(ArmVst1F64) \
- V(ArmVld1S128) \
- V(ArmVst1S128) \
- V(ArmFloat32Max) \
- V(ArmFloat64Max) \
- V(ArmFloat32Min) \
- V(ArmFloat64Min) \
- V(ArmFloat64SilenceNaN) \
- V(ArmLdrb) \
- V(ArmLdrsb) \
- V(ArmStrb) \
- V(ArmLdrh) \
- V(ArmLdrsh) \
- V(ArmStrh) \
- V(ArmLdr) \
- V(ArmStr) \
- V(ArmPush) \
- V(ArmPoke) \
- V(ArmPeek) \
- V(ArmDsbIsb) \
- V(ArmF32x4Splat) \
- V(ArmF32x4ExtractLane) \
- V(ArmF32x4ReplaceLane) \
- V(ArmF32x4SConvertI32x4) \
- V(ArmF32x4UConvertI32x4) \
- V(ArmF32x4Abs) \
- V(ArmF32x4Neg) \
- V(ArmF32x4RecipApprox) \
- V(ArmF32x4RecipSqrtApprox) \
- V(ArmF32x4Add) \
- V(ArmF32x4AddHoriz) \
- V(ArmF32x4Sub) \
- V(ArmF32x4Mul) \
- V(ArmF32x4Min) \
- V(ArmF32x4Max) \
- V(ArmF32x4Eq) \
- V(ArmF32x4Ne) \
- V(ArmF32x4Lt) \
- V(ArmF32x4Le) \
- V(ArmI32x4Splat) \
- V(ArmI32x4ExtractLane) \
- V(ArmI32x4ReplaceLane) \
- V(ArmI32x4SConvertF32x4) \
- V(ArmI32x4SConvertI16x8Low) \
- V(ArmI32x4SConvertI16x8High) \
- V(ArmI32x4Neg) \
- V(ArmI32x4Shl) \
- V(ArmI32x4ShrS) \
- V(ArmI32x4Add) \
- V(ArmI32x4AddHoriz) \
- V(ArmI32x4Sub) \
- V(ArmI32x4Mul) \
- V(ArmI32x4MinS) \
- V(ArmI32x4MaxS) \
- V(ArmI32x4Eq) \
- V(ArmI32x4Ne) \
- V(ArmI32x4GtS) \
- V(ArmI32x4GeS) \
- V(ArmI32x4UConvertF32x4) \
- V(ArmI32x4UConvertI16x8Low) \
- V(ArmI32x4UConvertI16x8High) \
- V(ArmI32x4ShrU) \
- V(ArmI32x4MinU) \
- V(ArmI32x4MaxU) \
- V(ArmI32x4GtU) \
- V(ArmI32x4GeU) \
- V(ArmI16x8Splat) \
- V(ArmI16x8ExtractLane) \
- V(ArmI16x8ReplaceLane) \
- V(ArmI16x8SConvertI8x16Low) \
- V(ArmI16x8SConvertI8x16High) \
- V(ArmI16x8Neg) \
- V(ArmI16x8Shl) \
- V(ArmI16x8ShrS) \
- V(ArmI16x8SConvertI32x4) \
- V(ArmI16x8Add) \
- V(ArmI16x8AddSaturateS) \
- V(ArmI16x8AddHoriz) \
- V(ArmI16x8Sub) \
- V(ArmI16x8SubSaturateS) \
- V(ArmI16x8Mul) \
- V(ArmI16x8MinS) \
- V(ArmI16x8MaxS) \
- V(ArmI16x8Eq) \
- V(ArmI16x8Ne) \
- V(ArmI16x8GtS) \
- V(ArmI16x8GeS) \
- V(ArmI16x8UConvertI8x16Low) \
- V(ArmI16x8UConvertI8x16High) \
- V(ArmI16x8ShrU) \
- V(ArmI16x8UConvertI32x4) \
- V(ArmI16x8AddSaturateU) \
- V(ArmI16x8SubSaturateU) \
- V(ArmI16x8MinU) \
- V(ArmI16x8MaxU) \
- V(ArmI16x8GtU) \
- V(ArmI16x8GeU) \
- V(ArmI8x16Splat) \
- V(ArmI8x16ExtractLane) \
- V(ArmI8x16ReplaceLane) \
- V(ArmI8x16Neg) \
- V(ArmI8x16Shl) \
- V(ArmI8x16ShrS) \
- V(ArmI8x16SConvertI16x8) \
- V(ArmI8x16Add) \
- V(ArmI8x16AddSaturateS) \
- V(ArmI8x16Sub) \
- V(ArmI8x16SubSaturateS) \
- V(ArmI8x16Mul) \
- V(ArmI8x16MinS) \
- V(ArmI8x16MaxS) \
- V(ArmI8x16Eq) \
- V(ArmI8x16Ne) \
- V(ArmI8x16GtS) \
- V(ArmI8x16GeS) \
- V(ArmI8x16ShrU) \
- V(ArmI8x16UConvertI16x8) \
- V(ArmI8x16AddSaturateU) \
- V(ArmI8x16SubSaturateU) \
- V(ArmI8x16MinU) \
- V(ArmI8x16MaxU) \
- V(ArmI8x16GtU) \
- V(ArmI8x16GeU) \
- V(ArmS128Zero) \
- V(ArmS128Dup) \
- V(ArmS128And) \
- V(ArmS128Or) \
- V(ArmS128Xor) \
- V(ArmS128Not) \
- V(ArmS128Select) \
- V(ArmS32x4ZipLeft) \
- V(ArmS32x4ZipRight) \
- V(ArmS32x4UnzipLeft) \
- V(ArmS32x4UnzipRight) \
- V(ArmS32x4TransposeLeft) \
- V(ArmS32x4TransposeRight) \
- V(ArmS32x4Shuffle) \
- V(ArmS16x8ZipLeft) \
- V(ArmS16x8ZipRight) \
- V(ArmS16x8UnzipLeft) \
- V(ArmS16x8UnzipRight) \
- V(ArmS16x8TransposeLeft) \
- V(ArmS16x8TransposeRight) \
- V(ArmS8x16ZipLeft) \
- V(ArmS8x16ZipRight) \
- V(ArmS8x16UnzipLeft) \
- V(ArmS8x16UnzipRight) \
- V(ArmS8x16TransposeLeft) \
- V(ArmS8x16TransposeRight) \
- V(ArmS8x16Concat) \
- V(ArmS8x16Shuffle) \
- V(ArmS32x2Reverse) \
- V(ArmS16x4Reverse) \
- V(ArmS16x2Reverse) \
- V(ArmS8x8Reverse) \
- V(ArmS8x4Reverse) \
- V(ArmS8x2Reverse) \
- V(ArmS1x4AnyTrue) \
- V(ArmS1x4AllTrue) \
- V(ArmS1x8AnyTrue) \
- V(ArmS1x8AllTrue) \
- V(ArmS1x16AnyTrue) \
- V(ArmS1x16AllTrue)
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(ArmAdd) \
+ V(ArmAnd) \
+ V(ArmBic) \
+ V(ArmClz) \
+ V(ArmCmp) \
+ V(ArmCmn) \
+ V(ArmTst) \
+ V(ArmTeq) \
+ V(ArmOrr) \
+ V(ArmEor) \
+ V(ArmSub) \
+ V(ArmRsb) \
+ V(ArmMul) \
+ V(ArmMla) \
+ V(ArmMls) \
+ V(ArmSmull) \
+ V(ArmSmmul) \
+ V(ArmSmmla) \
+ V(ArmUmull) \
+ V(ArmSdiv) \
+ V(ArmUdiv) \
+ V(ArmMov) \
+ V(ArmMvn) \
+ V(ArmBfc) \
+ V(ArmUbfx) \
+ V(ArmSbfx) \
+ V(ArmSxtb) \
+ V(ArmSxth) \
+ V(ArmSxtab) \
+ V(ArmSxtah) \
+ V(ArmUxtb) \
+ V(ArmUxth) \
+ V(ArmUxtab) \
+ V(ArmRbit) \
+ V(ArmRev) \
+ V(ArmUxtah) \
+ V(ArmAddPair) \
+ V(ArmSubPair) \
+ V(ArmMulPair) \
+ V(ArmLslPair) \
+ V(ArmLsrPair) \
+ V(ArmAsrPair) \
+ V(ArmVcmpF32) \
+ V(ArmVaddF32) \
+ V(ArmVsubF32) \
+ V(ArmVmulF32) \
+ V(ArmVmlaF32) \
+ V(ArmVmlsF32) \
+ V(ArmVdivF32) \
+ V(ArmVabsF32) \
+ V(ArmVnegF32) \
+ V(ArmVsqrtF32) \
+ V(ArmVcmpF64) \
+ V(ArmVaddF64) \
+ V(ArmVsubF64) \
+ V(ArmVmulF64) \
+ V(ArmVmlaF64) \
+ V(ArmVmlsF64) \
+ V(ArmVdivF64) \
+ V(ArmVmodF64) \
+ V(ArmVabsF64) \
+ V(ArmVnegF64) \
+ V(ArmVsqrtF64) \
+ V(ArmVrintmF32) \
+ V(ArmVrintmF64) \
+ V(ArmVrintpF32) \
+ V(ArmVrintpF64) \
+ V(ArmVrintzF32) \
+ V(ArmVrintzF64) \
+ V(ArmVrintaF64) \
+ V(ArmVrintnF32) \
+ V(ArmVrintnF64) \
+ V(ArmVcvtF32F64) \
+ V(ArmVcvtF64F32) \
+ V(ArmVcvtF32S32) \
+ V(ArmVcvtF32U32) \
+ V(ArmVcvtF64S32) \
+ V(ArmVcvtF64U32) \
+ V(ArmVcvtS32F32) \
+ V(ArmVcvtU32F32) \
+ V(ArmVcvtS32F64) \
+ V(ArmVcvtU32F64) \
+ V(ArmVmovU32F32) \
+ V(ArmVmovF32U32) \
+ V(ArmVmovLowU32F64) \
+ V(ArmVmovLowF64U32) \
+ V(ArmVmovHighU32F64) \
+ V(ArmVmovHighF64U32) \
+ V(ArmVmovF64U32U32) \
+ V(ArmVmovU32U32F64) \
+ V(ArmVldrF32) \
+ V(ArmVstrF32) \
+ V(ArmVldrF64) \
+ V(ArmVld1F64) \
+ V(ArmVstrF64) \
+ V(ArmVst1F64) \
+ V(ArmVld1S128) \
+ V(ArmVst1S128) \
+ V(ArmFloat32Max) \
+ V(ArmFloat64Max) \
+ V(ArmFloat32Min) \
+ V(ArmFloat64Min) \
+ V(ArmFloat64SilenceNaN) \
+ V(ArmLdrb) \
+ V(ArmLdrsb) \
+ V(ArmStrb) \
+ V(ArmLdrh) \
+ V(ArmLdrsh) \
+ V(ArmStrh) \
+ V(ArmLdr) \
+ V(ArmStr) \
+ V(ArmPush) \
+ V(ArmPoke) \
+ V(ArmPeek) \
+ V(ArmDsbIsb) \
+ V(ArmF32x4Splat) \
+ V(ArmF32x4ExtractLane) \
+ V(ArmF32x4ReplaceLane) \
+ V(ArmF32x4SConvertI32x4) \
+ V(ArmF32x4UConvertI32x4) \
+ V(ArmF32x4Abs) \
+ V(ArmF32x4Neg) \
+ V(ArmF32x4RecipApprox) \
+ V(ArmF32x4RecipSqrtApprox) \
+ V(ArmF32x4Add) \
+ V(ArmF32x4AddHoriz) \
+ V(ArmF32x4Sub) \
+ V(ArmF32x4Mul) \
+ V(ArmF32x4Min) \
+ V(ArmF32x4Max) \
+ V(ArmF32x4Eq) \
+ V(ArmF32x4Ne) \
+ V(ArmF32x4Lt) \
+ V(ArmF32x4Le) \
+ V(ArmI32x4Splat) \
+ V(ArmI32x4ExtractLane) \
+ V(ArmI32x4ReplaceLane) \
+ V(ArmI32x4SConvertF32x4) \
+ V(ArmI32x4SConvertI16x8Low) \
+ V(ArmI32x4SConvertI16x8High) \
+ V(ArmI32x4Neg) \
+ V(ArmI32x4Shl) \
+ V(ArmI32x4ShrS) \
+ V(ArmI32x4Add) \
+ V(ArmI32x4AddHoriz) \
+ V(ArmI32x4Sub) \
+ V(ArmI32x4Mul) \
+ V(ArmI32x4MinS) \
+ V(ArmI32x4MaxS) \
+ V(ArmI32x4Eq) \
+ V(ArmI32x4Ne) \
+ V(ArmI32x4GtS) \
+ V(ArmI32x4GeS) \
+ V(ArmI32x4UConvertF32x4) \
+ V(ArmI32x4UConvertI16x8Low) \
+ V(ArmI32x4UConvertI16x8High) \
+ V(ArmI32x4ShrU) \
+ V(ArmI32x4MinU) \
+ V(ArmI32x4MaxU) \
+ V(ArmI32x4GtU) \
+ V(ArmI32x4GeU) \
+ V(ArmI16x8Splat) \
+ V(ArmI16x8ExtractLane) \
+ V(ArmI16x8ReplaceLane) \
+ V(ArmI16x8SConvertI8x16Low) \
+ V(ArmI16x8SConvertI8x16High) \
+ V(ArmI16x8Neg) \
+ V(ArmI16x8Shl) \
+ V(ArmI16x8ShrS) \
+ V(ArmI16x8SConvertI32x4) \
+ V(ArmI16x8Add) \
+ V(ArmI16x8AddSaturateS) \
+ V(ArmI16x8AddHoriz) \
+ V(ArmI16x8Sub) \
+ V(ArmI16x8SubSaturateS) \
+ V(ArmI16x8Mul) \
+ V(ArmI16x8MinS) \
+ V(ArmI16x8MaxS) \
+ V(ArmI16x8Eq) \
+ V(ArmI16x8Ne) \
+ V(ArmI16x8GtS) \
+ V(ArmI16x8GeS) \
+ V(ArmI16x8UConvertI8x16Low) \
+ V(ArmI16x8UConvertI8x16High) \
+ V(ArmI16x8ShrU) \
+ V(ArmI16x8UConvertI32x4) \
+ V(ArmI16x8AddSaturateU) \
+ V(ArmI16x8SubSaturateU) \
+ V(ArmI16x8MinU) \
+ V(ArmI16x8MaxU) \
+ V(ArmI16x8GtU) \
+ V(ArmI16x8GeU) \
+ V(ArmI8x16Splat) \
+ V(ArmI8x16ExtractLane) \
+ V(ArmI8x16ReplaceLane) \
+ V(ArmI8x16Neg) \
+ V(ArmI8x16Shl) \
+ V(ArmI8x16ShrS) \
+ V(ArmI8x16SConvertI16x8) \
+ V(ArmI8x16Add) \
+ V(ArmI8x16AddSaturateS) \
+ V(ArmI8x16Sub) \
+ V(ArmI8x16SubSaturateS) \
+ V(ArmI8x16Mul) \
+ V(ArmI8x16MinS) \
+ V(ArmI8x16MaxS) \
+ V(ArmI8x16Eq) \
+ V(ArmI8x16Ne) \
+ V(ArmI8x16GtS) \
+ V(ArmI8x16GeS) \
+ V(ArmI8x16ShrU) \
+ V(ArmI8x16UConvertI16x8) \
+ V(ArmI8x16AddSaturateU) \
+ V(ArmI8x16SubSaturateU) \
+ V(ArmI8x16MinU) \
+ V(ArmI8x16MaxU) \
+ V(ArmI8x16GtU) \
+ V(ArmI8x16GeU) \
+ V(ArmS128Zero) \
+ V(ArmS128Dup) \
+ V(ArmS128And) \
+ V(ArmS128Or) \
+ V(ArmS128Xor) \
+ V(ArmS128Not) \
+ V(ArmS128Select) \
+ V(ArmS32x4ZipLeft) \
+ V(ArmS32x4ZipRight) \
+ V(ArmS32x4UnzipLeft) \
+ V(ArmS32x4UnzipRight) \
+ V(ArmS32x4TransposeLeft) \
+ V(ArmS32x4TransposeRight) \
+ V(ArmS32x4Shuffle) \
+ V(ArmS16x8ZipLeft) \
+ V(ArmS16x8ZipRight) \
+ V(ArmS16x8UnzipLeft) \
+ V(ArmS16x8UnzipRight) \
+ V(ArmS16x8TransposeLeft) \
+ V(ArmS16x8TransposeRight) \
+ V(ArmS8x16ZipLeft) \
+ V(ArmS8x16ZipRight) \
+ V(ArmS8x16UnzipLeft) \
+ V(ArmS8x16UnzipRight) \
+ V(ArmS8x16TransposeLeft) \
+ V(ArmS8x16TransposeRight) \
+ V(ArmS8x16Concat) \
+ V(ArmS8x16Shuffle) \
+ V(ArmS32x2Reverse) \
+ V(ArmS16x4Reverse) \
+ V(ArmS16x2Reverse) \
+ V(ArmS8x8Reverse) \
+ V(ArmS8x4Reverse) \
+ V(ArmS8x2Reverse) \
+ V(ArmS1x4AnyTrue) \
+ V(ArmS1x4AllTrue) \
+ V(ArmS1x8AnyTrue) \
+ V(ArmS1x8AllTrue) \
+ V(ArmS1x16AnyTrue) \
+ V(ArmS1x16AllTrue) \
+ V(ArmWord32AtomicPairLoad) \
+ V(ArmWord32AtomicPairStore) \
+ V(ArmWord32AtomicPairAdd) \
+ V(ArmWord32AtomicPairSub) \
+ V(ArmWord32AtomicPairAnd) \
+ V(ArmWord32AtomicPairOr) \
+ V(ArmWord32AtomicPairXor) \
+ V(ArmWord32AtomicPairExchange) \
+ V(ArmWord32AtomicPairCompareExchange) \
+ V(ArmWord64AtomicNarrowAddUint8) \
+ V(ArmWord64AtomicNarrowAddUint16) \
+ V(ArmWord64AtomicNarrowAddUint32) \
+ V(ArmWord64AtomicNarrowSubUint8) \
+ V(ArmWord64AtomicNarrowSubUint16) \
+ V(ArmWord64AtomicNarrowSubUint32) \
+ V(ArmWord64AtomicNarrowAndUint8) \
+ V(ArmWord64AtomicNarrowAndUint16) \
+ V(ArmWord64AtomicNarrowAndUint32) \
+ V(ArmWord64AtomicNarrowOrUint8) \
+ V(ArmWord64AtomicNarrowOrUint16) \
+ V(ArmWord64AtomicNarrowOrUint32) \
+ V(ArmWord64AtomicNarrowXorUint8) \
+ V(ArmWord64AtomicNarrowXorUint16) \
+ V(ArmWord64AtomicNarrowXorUint32) \
+ V(ArmWord64AtomicNarrowExchangeUint8) \
+ V(ArmWord64AtomicNarrowExchangeUint16) \
+ V(ArmWord64AtomicNarrowExchangeUint32) \
+ V(ArmWord64AtomicNarrowCompareExchangeUint8) \
+ V(ArmWord64AtomicNarrowCompareExchangeUint16) \
+ V(ArmWord64AtomicNarrowCompareExchangeUint32)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index e538020f69..56ff02689a 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -49,6 +49,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmUxtab:
case kArmUxtah:
case kArmRbit:
+ case kArmRev:
case kArmAddPair:
case kArmSubPair:
case kArmMulPair:
@@ -263,6 +264,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmLdrsh:
case kArmLdr:
case kArmPeek:
+ case kArmWord32AtomicPairLoad:
return kIsLoadOperation;
case kArmVstrF32:
@@ -275,6 +277,35 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmPush:
case kArmPoke:
case kArmDsbIsb:
+ case kArmWord32AtomicPairStore:
+ case kArmWord32AtomicPairAdd:
+ case kArmWord32AtomicPairSub:
+ case kArmWord32AtomicPairAnd:
+ case kArmWord32AtomicPairOr:
+ case kArmWord32AtomicPairXor:
+ case kArmWord32AtomicPairExchange:
+ case kArmWord32AtomicPairCompareExchange:
+ case kArmWord64AtomicNarrowAddUint8:
+ case kArmWord64AtomicNarrowAddUint16:
+ case kArmWord64AtomicNarrowAddUint32:
+ case kArmWord64AtomicNarrowSubUint8:
+ case kArmWord64AtomicNarrowSubUint16:
+ case kArmWord64AtomicNarrowSubUint32:
+ case kArmWord64AtomicNarrowAndUint8:
+ case kArmWord64AtomicNarrowAndUint16:
+ case kArmWord64AtomicNarrowAndUint32:
+ case kArmWord64AtomicNarrowOrUint8:
+ case kArmWord64AtomicNarrowOrUint16:
+ case kArmWord64AtomicNarrowOrUint32:
+ case kArmWord64AtomicNarrowXorUint8:
+ case kArmWord64AtomicNarrowXorUint16:
+ case kArmWord64AtomicNarrowXorUint32:
+ case kArmWord64AtomicNarrowExchangeUint8:
+ case kArmWord64AtomicNarrowExchangeUint16:
+ case kArmWord64AtomicNarrowExchangeUint32:
+ case kArmWord64AtomicNarrowCompareExchangeUint8:
+ case kArmWord64AtomicNarrowCompareExchangeUint16:
+ case kArmWord64AtomicNarrowCompareExchangeUint32:
return kHasSideEffect;
#define CASE(Name) case k##Name:
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 8fc5779112..277d9779c0 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -403,6 +403,46 @@ void EmitStore(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
}
+void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ ArmOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ Node* value_high = node->InputAt(3);
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[] = {g.UseUniqueRegister(value),
+ g.UseUniqueRegister(value_high),
+ g.UseRegister(base), g.UseRegister(index)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r2),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r3)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(r6),
+ g.TempRegister(r7), g.TempRegister()};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+}
+
+void VisitNarrowAtomicBinOp(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ ArmOperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[3] = {g.UseRegister(base), g.UseRegister(index),
+ g.UseUniqueRegister(value)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r4),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r5)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister()};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+}
+
} // namespace
void InstructionSelector::VisitStackSlot(Node* node) {
@@ -1100,7 +1140,9 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) {
void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ VisitRR(this, kArmRev, node);
+}
void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
@@ -2100,7 +2142,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2136,7 +2178,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2175,7 +2217,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = int8_op;
} else if (type == MachineType::Uint8()) {
@@ -2219,6 +2261,190 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
+void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r0),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r1)};
+ InstructionOperand temps[] = {g.TempRegister()};
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionCode code =
+ kArmWord32AtomicPairLoad | AddressingModeField::encode(addressing_mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value_low = node->InputAt(2);
+ Node* value_high = node->InputAt(3);
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(base), g.UseUniqueRegister(index),
+ g.UseFixed(value_low, r2), g.UseFixed(value_high, r3)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(r0),
+ g.TempRegister(r1)};
+ InstructionCode code =
+ kArmWord32AtomicPairStore | AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
+ VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairAdd);
+}
+
+void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
+ VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairSub);
+}
+
+void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
+ VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairAnd);
+}
+
+void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
+ VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairOr);
+}
+
+void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
+ VisitPairAtomicBinOp(this, node, kArmWord32AtomicPairXor);
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowBinop(Node* node,
+ ArchOpcode uint8_op,
+ ArchOpcode uint16_op,
+ ArchOpcode uint32_op) {
+ MachineType type = AtomicOpType(node->op());
+ DCHECK(type != MachineType::Uint64());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Uint32()) {
+ opcode = uint32_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ VisitNarrowAtomicBinOp(this, node, opcode);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64AtomicNarrow##op(Node* node) { \
+ VisitWord64AtomicNarrowBinop(node, kArmWord64AtomicNarrow##op##Uint8, \
+ kArmWord64AtomicNarrow##op##Uint16, \
+ kArmWord64AtomicNarrow##op##Uint32); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ Node* value_high = node->InputAt(3);
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[] = {g.UseFixed(value, r0),
+ g.UseFixed(value_high, r1),
+ g.UseRegister(base), g.UseRegister(index)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r6),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r7)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ InstructionCode code = kArmWord32AtomicPairExchange |
+ AddressingModeField::encode(addressing_mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kArmWord64AtomicNarrowExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kArmWord64AtomicNarrowExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kArmWord64AtomicNarrowExchangeUint32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
+ g.UseUniqueRegister(value)};
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 0)),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
+ ArmOperandGenerator g(this);
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[] = {
+ g.UseFixed(node->InputAt(2), r4), g.UseFixed(node->InputAt(3), r5),
+ g.UseFixed(node->InputAt(4), r8), g.UseFixed(node->InputAt(5), r9),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), r2),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), r3)};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ InstructionCode code = kArmWord32AtomicPairCompareExchange |
+ AddressingModeField::encode(addressing_mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kArmWord64AtomicNarrowCompareExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kArmWord64AtomicNarrowCompareExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kArmWord64AtomicNarrowCompareExchangeUint32;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
+ g.UseUniqueRegister(old_value),
+ g.UseUniqueRegister(new_value)};
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 0)),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister()};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+}
+
#define SIMD_TYPE_LIST(V) \
V(F32x4) \
V(I32x4) \
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index a7c5beee4c..867c3687a1 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -13,6 +13,7 @@
#include "src/frame-constants.h"
#include "src/heap/heap-inl.h"
#include "src/optimized-compilation-info.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -1266,6 +1267,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Rbit32:
__ Rbit(i.OutputRegister32(), i.InputRegister32(0));
break;
+ case kArm64Rev:
+ __ Rev(i.OutputRegister64(), i.InputRegister64(0));
+ break;
+ case kArm64Rev32:
+ __ Rev(i.OutputRegister32(), i.InputRegister32(0));
+ break;
case kArm64Cmp:
__ Cmp(i.InputOrZeroRegister64(0), i.InputOperand2_64(1));
break;
@@ -1346,12 +1353,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(1));
break;
case kArm64Float64Mod: {
- // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
+ // TODO(turbofan): implement directly.
FrameScope scope(tasm(), StackFrame::MANUAL);
DCHECK(d0.is(i.InputDoubleRegister(0)));
DCHECK(d1.is(i.InputDoubleRegister(1)));
DCHECK(d0.is(i.OutputDoubleRegister()));
- // TODO(dcarney): make sure this saves all relevant registers.
+ // TODO(turbofan): make sure this saves all relevant registers.
__ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
break;
}
@@ -1414,35 +1421,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Float32ToInt64:
__ Fcvtzs(i.OutputRegister64(), i.InputFloat32Register(0));
if (i.OutputCount() > 1) {
- __ Mov(i.OutputRegister(1), 1);
- Label done;
- __ Cmp(i.OutputRegister(0), 1);
- __ Ccmp(i.OutputRegister(0), -1, VFlag, vc);
- __ Fccmp(i.InputFloat32Register(0), i.InputFloat32Register(0), VFlag,
- vc);
- __ B(vc, &done);
+ // Check for inputs below INT64_MIN and NaN.
__ Fcmp(i.InputFloat32Register(0), static_cast<float>(INT64_MIN));
- __ Cset(i.OutputRegister(1), eq);
- __ Bind(&done);
+ // Check overflow.
+ // -1 value is used to indicate a possible overflow which will occur
+ // when subtracting (-1) from the provided INT64_MAX operand.
+ // OutputRegister(1) is set to 0 if the input was out of range or NaN.
+ __ Ccmp(i.OutputRegister(0), -1, VFlag, ge);
+ __ Cset(i.OutputRegister(1), vc);
}
break;
case kArm64Float64ToInt64:
__ Fcvtzs(i.OutputRegister(0), i.InputDoubleRegister(0));
if (i.OutputCount() > 1) {
- __ Mov(i.OutputRegister(1), 1);
- Label done;
- __ Cmp(i.OutputRegister(0), 1);
- __ Ccmp(i.OutputRegister(0), -1, VFlag, vc);
- __ Fccmp(i.InputDoubleRegister(0), i.InputDoubleRegister(0), VFlag, vc);
- __ B(vc, &done);
+ // See kArm64Float32ToInt64 for a detailed description.
__ Fcmp(i.InputDoubleRegister(0), static_cast<double>(INT64_MIN));
- __ Cset(i.OutputRegister(1), eq);
- __ Bind(&done);
+ __ Ccmp(i.OutputRegister(0), -1, VFlag, ge);
+ __ Cset(i.OutputRegister(1), vc);
}
break;
case kArm64Float32ToUint64:
__ Fcvtzu(i.OutputRegister64(), i.InputFloat32Register(0));
if (i.OutputCount() > 1) {
+ // See kArm64Float32ToInt64 for a detailed description.
__ Fcmp(i.InputFloat32Register(0), -1.0);
__ Ccmp(i.OutputRegister(0), -1, ZFlag, gt);
__ Cset(i.OutputRegister(1), ne);
@@ -1451,6 +1452,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Float64ToUint64:
__ Fcvtzu(i.OutputRegister64(), i.InputDoubleRegister(0));
if (i.OutputCount() > 1) {
+ // See kArm64Float32ToInt64 for a detailed description.
__ Fcmp(i.InputDoubleRegister(0), -1.0);
__ Ccmp(i.OutputRegister(0), -1, ZFlag, gt);
__ Cset(i.OutputRegister(1), ne);
@@ -2580,10 +2582,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
- __ Move(dst, src_object);
+ __ Mov(dst, src_object);
}
- } else if (src.type() == Constant::kExternalReference) {
- __ Mov(dst, src.ToExternalReference());
} else {
__ Mov(dst, g.ToImmediate(source));
}
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index ce73515321..7b119c8fe7 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -77,6 +77,8 @@ namespace compiler {
V(Arm64Bfi) \
V(Arm64Rbit) \
V(Arm64Rbit32) \
+ V(Arm64Rev) \
+ V(Arm64Rev32) \
V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
diff --git a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
index 4ea251c590..d443bd7641 100644
--- a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -79,6 +79,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Bfi:
case kArm64Rbit:
case kArm64Rbit32:
+ case kArm64Rev:
+ case kArm64Rev32:
case kArm64Float32Cmp:
case kArm64Float32Add:
case kArm64Float32Sub:
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index e07debf9ec..b2e8b4b205 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -1189,6 +1189,8 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
V(Word32Clz, kArm64Clz32) \
V(Word32ReverseBits, kArm64Rbit32) \
V(Word64ReverseBits, kArm64Rbit) \
+ V(Word32ReverseBytes, kArm64Rev32) \
+ V(Word64ReverseBytes, kArm64Rev) \
V(ChangeFloat32ToFloat64, kArm64Float32ToFloat64) \
V(RoundInt32ToFloat32, kArm64Int32ToFloat32) \
V(RoundUint32ToFloat32, kArm64Uint32ToFloat32) \
@@ -1272,10 +1274,6 @@ void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
-
-void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
-
void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
@@ -2082,23 +2080,42 @@ void VisitWord64Test(InstructionSelector* selector, Node* node,
VisitWordTest(selector, node, kArm64Tst, cont);
}
-template <typename Matcher, ArchOpcode kOpcode>
-bool TryEmitTestAndBranch(InstructionSelector* selector, Node* node,
- FlagsContinuation* cont) {
- Arm64OperandGenerator g(selector);
- Matcher m(node);
- if (cont->IsBranch() && !cont->IsPoisoned() && m.right().HasValue() &&
- base::bits::IsPowerOfTwo(m.right().Value())) {
- // If the mask has only one bit set, we can use tbz/tbnz.
- DCHECK((cont->condition() == kEqual) || (cont->condition() == kNotEqual));
- selector->EmitWithContinuation(
- kOpcode, g.UseRegister(m.left().node()),
- g.TempImmediate(base::bits::CountTrailingZeros(m.right().Value())),
- cont);
- return true;
+template <typename Matcher>
+struct TestAndBranchMatcher {
+ TestAndBranchMatcher(Node* node, FlagsContinuation* cont)
+ : matches_(false), cont_(cont), matcher_(node) {
+ Initialize();
+ }
+ bool Matches() const { return matches_; }
+
+ unsigned bit() const {
+ DCHECK(Matches());
+ return base::bits::CountTrailingZeros(matcher_.right().Value());
}
- return false;
-}
+
+ Node* input() const {
+ DCHECK(Matches());
+ return matcher_.left().node();
+ }
+
+ private:
+ bool matches_;
+ FlagsContinuation* cont_;
+ Matcher matcher_;
+
+ void Initialize() {
+ if (cont_->IsBranch() && !cont_->IsPoisoned() &&
+ matcher_.right().HasValue() &&
+ base::bits::IsPowerOfTwo(matcher_.right().Value())) {
+ // If the mask has only one bit set, we can use tbz/tbnz.
+ DCHECK((cont_->condition() == kEqual) ||
+ (cont_->condition() == kNotEqual));
+ matches_ = true;
+ } else {
+ matches_ = false;
+ }
+ }
+};
// Shared routine for multiple float32 compare operations.
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
@@ -2228,6 +2245,58 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
cont->Negate();
}
+ // Try to match bit checks to create TBZ/TBNZ instructions.
+ // Unlike the switch below, CanCover check is not needed here.
+ // If there are several uses of the given operation, we will generate a TBZ
+ // instruction for each. This is useful even if there are other uses of the
+ // arithmetic result, because it moves dependencies further back.
+ switch (value->opcode()) {
+ case IrOpcode::kWord64Equal: {
+ Int64BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ Node* const left = m.left().node();
+ if (left->opcode() == IrOpcode::kWord64And) {
+ // Attempt to merge the Word64Equal(Word64And(x, y), 0) comparison
+ // into a tbz/tbnz instruction.
+ TestAndBranchMatcher<Uint64BinopMatcher> tbm(left, cont);
+ if (tbm.Matches()) {
+ Arm64OperandGenerator gen(this);
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ this->EmitWithContinuation(kArm64TestAndBranch,
+ gen.UseRegister(tbm.input()),
+ gen.TempImmediate(tbm.bit()), cont);
+ return;
+ }
+ }
+ }
+ break;
+ }
+ case IrOpcode::kWord32And: {
+ TestAndBranchMatcher<Uint32BinopMatcher> tbm(value, cont);
+ if (tbm.Matches()) {
+ Arm64OperandGenerator gen(this);
+ this->EmitWithContinuation(kArm64TestAndBranch32,
+ gen.UseRegister(tbm.input()),
+ gen.TempImmediate(tbm.bit()), cont);
+ return;
+ }
+ break;
+ }
+ case IrOpcode::kWord64And: {
+ TestAndBranchMatcher<Uint64BinopMatcher> tbm(value, cont);
+ if (tbm.Matches()) {
+ Arm64OperandGenerator gen(this);
+ this->EmitWithContinuation(kArm64TestAndBranch,
+ gen.UseRegister(tbm.input()),
+ gen.TempImmediate(tbm.bit()), cont);
+ return;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal:
@@ -2251,12 +2320,6 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
if (m.right().Is(0)) {
Node* const left = m.left().node();
if (CanCover(value, left) && left->opcode() == IrOpcode::kWord64And) {
- // Attempt to merge the Word64Equal(Word64And(x, y), 0) comparison
- // into a tbz/tbnz instruction.
- if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
- this, left, cont)) {
- return;
- }
return VisitWordCompare(this, left, kArm64Tst, cont, true,
kLogical64Imm);
}
@@ -2353,17 +2416,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
case IrOpcode::kInt32Sub:
return VisitWord32Compare(this, value, cont);
case IrOpcode::kWord32And:
- if (TryEmitTestAndBranch<Uint32BinopMatcher, kArm64TestAndBranch32>(
- this, value, cont)) {
- return;
- }
return VisitWordCompare(this, value, kArm64Tst32, cont, true,
kLogical32Imm);
case IrOpcode::kWord64And:
- if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
- this, value, cont)) {
- return;
- }
return VisitWordCompare(this, value, kArm64Tst, cont, true,
kLogical64Imm);
default:
@@ -2742,7 +2797,7 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2762,7 +2817,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kArm64Word64AtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
@@ -2780,7 +2835,7 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2800,7 +2855,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = kArm64Word64AtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
@@ -2820,7 +2875,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = int8_op;
} else if (type == MachineType::Uint8()) {
@@ -2856,7 +2911,7 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
ArchOpcode uint64_op) {
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
opcode = uint8_op;
} else if (type == MachineType::Uint16()) {
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.cc b/deps/v8/src/compiler/basic-block-instrumentor.cc
index ebec8161ba..77f88502c3 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.cc
+++ b/deps/v8/src/compiler/basic-block-instrumentor.cc
@@ -40,27 +40,23 @@ static NodeVector::iterator FindInsertionPoint(BasicBlock* block) {
// TODO(dcarney): need to mark code as non-serializable.
static const Operator* PointerConstant(CommonOperatorBuilder* common,
- void* ptr) {
- return kPointerSize == 8
- ? common->Int64Constant(reinterpret_cast<intptr_t>(ptr))
- : common->Int32Constant(
- static_cast<int32_t>(reinterpret_cast<intptr_t>(ptr)));
+ intptr_t ptr) {
+ return kPointerSize == 8 ? common->Int64Constant(ptr)
+ : common->Int32Constant(static_cast<int32_t>(ptr));
}
BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
OptimizedCompilationInfo* info, Graph* graph, Schedule* schedule,
Isolate* isolate) {
+ // Basic block profiling disables concurrent compilation, so handle deref is
+ // fine.
+ AllowHandleDereference allow_handle_dereference;
// Skip the exit block in profiles, since the register allocator can't handle
// it and entry into it means falling off the end of the function anyway.
size_t n_blocks = static_cast<size_t>(schedule->RpoBlockCount()) - 1;
- BasicBlockProfiler::Data* data =
- isolate->GetOrCreateBasicBlockProfiler()->NewData(n_blocks);
+ BasicBlockProfiler::Data* data = BasicBlockProfiler::Get()->NewData(n_blocks);
// Set the function name.
- if (info->has_shared_info()) {
- std::ostringstream os;
- info->shared_info()->Name()->PrintUC16(os);
- data->SetFunctionName(&os);
- }
+ data->SetFunctionName(info->GetDebugName());
// Capture the schedule string before instrumentation.
{
std::ostringstream os;
@@ -77,7 +73,7 @@ BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
for (BasicBlockVector::iterator it = blocks->begin(); block_number < n_blocks;
++it, ++block_number) {
BasicBlock* block = (*it);
- data->SetBlockId(block_number, block->id().ToSize());
+ data->SetBlockRpoNumber(block_number, block->rpo_number());
// TODO(dcarney): wire effect and control deps for load and store.
// Construct increment operation.
Node* base = graph->NewNode(
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 2d16ba525c..bd8b551f4f 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -14,6 +14,8 @@
#include "src/compiler/simplified-operator.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-inl.h"
+#include "src/objects/js-generator.h"
#include "src/objects/literal-objects-inl.h"
#include "src/vector-slot-pair.h"
@@ -1624,6 +1626,18 @@ void BytecodeGraphBuilder::VisitCreateEmptyObjectLiteral() {
environment()->BindAccumulator(literal);
}
+void BytecodeGraphBuilder::VisitCloneObject() {
+ PrepareEagerCheckpoint();
+ Node* source =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ int flags = bytecode_iterator().GetFlagOperand(1);
+ int slot = bytecode_iterator().GetIndexOperand(2);
+ const Operator* op =
+ javascript()->CloneObject(CreateVectorSlotPair(slot), flags);
+ Node* value = NewNode(op, source);
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitGetTemplateObject() {
Handle<TemplateObjectDescription> description(
TemplateObjectDescription::cast(
@@ -2439,19 +2453,15 @@ void BytecodeGraphBuilder::VisitTestReferenceEqual() {
environment()->BindAccumulator(result);
}
-void BytecodeGraphBuilder::BuildTestingOp(const Operator* op) {
+void BytecodeGraphBuilder::VisitTestIn() {
PrepareEagerCheckpoint();
- Node* left =
+ Node* object = environment()->LookupAccumulator();
+ Node* key =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Node* right = environment()->LookupAccumulator();
- Node* node = NewNode(op, left, right);
+ Node* node = NewNode(javascript()->HasProperty(), object, key);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitTestIn() {
- BuildTestingOp(javascript()->HasProperty());
-}
-
void BytecodeGraphBuilder::VisitTestInstanceOf() {
int const slot_index = bytecode_iterator().GetIndexOperand(1);
BuildCompareOp(javascript()->InstanceOf(CreateVectorSlotPair(slot_index)));
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index a94a3d79af..57127142de 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -93,6 +93,12 @@ class BytecodeGraphBuilder {
return MakeNode(op, arraysize(buffer), buffer, false);
}
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6) {
+ Node* buffer[] = {n1, n2, n3, n4, n5, n6};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
// Helpers to create new control nodes.
Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
@@ -178,7 +184,6 @@ class BytecodeGraphBuilder {
void BuildBinaryOp(const Operator* op);
void BuildBinaryOpWithImmediate(const Operator* op);
void BuildCompareOp(const Operator* op);
- void BuildTestingOp(const Operator* op);
void BuildDelete(LanguageMode language_mode);
void BuildCastOperator(const Operator* op);
void BuildHoleCheckAndThrow(Node* condition, Runtime::FunctionId runtime_id,
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 0b77d10072..4f400846d4 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -1378,12 +1378,64 @@ void CodeAssembler::GotoIfNot(SloppyTNode<IntegralT> condition,
void CodeAssembler::Branch(SloppyTNode<IntegralT> condition, Label* true_label,
Label* false_label) {
+ int32_t constant;
+ if (ToInt32Constant(condition, constant)) {
+ if ((true_label->is_used() || true_label->is_bound()) &&
+ (false_label->is_used() || false_label->is_bound())) {
+ return Goto(constant ? true_label : false_label);
+ }
+ }
true_label->MergeVariables();
false_label->MergeVariables();
return raw_assembler()->Branch(condition, true_label->label_,
false_label->label_);
}
+void CodeAssembler::Branch(TNode<BoolT> condition,
+ std::function<void()> true_body,
+ std::function<void()> false_body) {
+ int32_t constant;
+ if (ToInt32Constant(condition, constant)) {
+ return constant ? true_body() : false_body();
+ }
+
+ Label vtrue(this), vfalse(this);
+ Branch(condition, &vtrue, &vfalse);
+
+ Bind(&vtrue);
+ true_body();
+
+ Bind(&vfalse);
+ false_body();
+}
+
+void CodeAssembler::Branch(TNode<BoolT> condition, Label* true_label,
+ std::function<void()> false_body) {
+ int32_t constant;
+ if (ToInt32Constant(condition, constant)) {
+ return constant ? Goto(true_label) : false_body();
+ }
+
+ Label vfalse(this);
+ Branch(condition, true_label, &vfalse);
+ Bind(&vfalse);
+ false_body();
+}
+
+void CodeAssembler::Branch(TNode<BoolT> condition,
+ std::function<void()> true_body,
+ Label* false_label) {
+ int32_t constant;
+ if (ToInt32Constant(condition, constant)) {
+ return constant ? true_body() : Goto(false_label);
+ }
+
+ Label vtrue(this);
+ Branch(condition, &vtrue, false_label);
+ Bind(&vtrue);
+ true_body();
+}
+
void CodeAssembler::Switch(Node* index, Label* default_label,
const int32_t* case_values, Label** case_labels,
size_t case_count) {
@@ -1685,8 +1737,7 @@ void CodeAssemblerLabel::UpdateVariablesAfterBind() {
} // namespace compiler
-Smi* CheckObjectType(Isolate* isolate, Object* value, Smi* type,
- String* location) {
+Smi* CheckObjectType(Object* value, Smi* type, String* location) {
#ifdef DEBUG
const char* expected;
switch (static_cast<ObjectType>(type->value())) {
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 6419140a74..6b9089da6b 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -278,8 +278,7 @@ HEAP_OBJECT_TEMPLATE_TYPE_LIST(OBJECT_TYPE_TEMPLATE_CASE)
#undef OBJECT_TYPE_STRUCT_CASE
#undef OBJECT_TYPE_TEMPLATE_CASE
-Smi* CheckObjectType(Isolate* isolate, Object* value, Smi* type,
- String* location);
+Smi* CheckObjectType(Object* value, Smi* type, String* location);
namespace compiler {
@@ -440,18 +439,22 @@ class SloppyTNode : public TNode<T> {
V(Float64LessThanOrEqual, BoolT, Float64T, Float64T) \
V(Float64GreaterThan, BoolT, Float64T, Float64T) \
V(Float64GreaterThanOrEqual, BoolT, Float64T, Float64T) \
+ /* Use Word32Equal if you need Int32Equal */ \
V(Int32GreaterThan, BoolT, Word32T, Word32T) \
V(Int32GreaterThanOrEqual, BoolT, Word32T, Word32T) \
V(Int32LessThan, BoolT, Word32T, Word32T) \
V(Int32LessThanOrEqual, BoolT, Word32T, Word32T) \
+ /* Use WordEqual if you need IntPtrEqual */ \
V(IntPtrLessThan, BoolT, WordT, WordT) \
V(IntPtrLessThanOrEqual, BoolT, WordT, WordT) \
V(IntPtrGreaterThan, BoolT, WordT, WordT) \
V(IntPtrGreaterThanOrEqual, BoolT, WordT, WordT) \
+ /* Use Word32Equal if you need Uint32Equal */ \
V(Uint32LessThan, BoolT, Word32T, Word32T) \
V(Uint32LessThanOrEqual, BoolT, Word32T, Word32T) \
V(Uint32GreaterThan, BoolT, Word32T, Word32T) \
V(Uint32GreaterThanOrEqual, BoolT, Word32T, Word32T) \
+ /* Use WordEqual if you need UintPtrEqual */ \
V(UintPtrLessThan, BoolT, WordT, WordT) \
V(UintPtrLessThanOrEqual, BoolT, WordT, WordT) \
V(UintPtrGreaterThan, BoolT, WordT, WordT) \
@@ -535,12 +538,12 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(Float64RoundTiesEven, Float64T, Float64T) \
V(Float64RoundTruncate, Float64T, Float64T) \
V(Word32Clz, Int32T, Word32T) \
- V(Word32Not, Word32T, Word32T) \
+ V(Word32BitwiseNot, Word32T, Word32T) \
V(WordNot, WordT, WordT) \
V(Int32AbsWithOverflow, PAIR_TYPE(Int32T, BoolT), Int32T) \
V(Int64AbsWithOverflow, PAIR_TYPE(Int64T, BoolT), Int64T) \
V(IntPtrAbsWithOverflow, PAIR_TYPE(IntPtrT, BoolT), IntPtrT) \
- V(Word32BinaryNot, Word32T, Word32T)
+ V(Word32BinaryNot, BoolT, Word32T)
// A "public" interface used by components outside of compiler directory to
// create code objects with TurboFan's backend. This class is mostly a thin
@@ -623,12 +626,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
}
Node* function = code_assembler_->ExternalConstant(
ExternalReference::check_object_type());
- Node* const isolate_ptr = code_assembler_->ExternalConstant(
- ExternalReference::isolate_address(code_assembler_->isolate()));
- code_assembler_->CallCFunction4(
- MachineType::AnyTagged(), MachineType::Pointer(),
- MachineType::AnyTagged(), MachineType::TaggedSigned(),
- MachineType::AnyTagged(), function, isolate_ptr, node_,
+ code_assembler_->CallCFunction3(
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::TaggedSigned(), MachineType::AnyTagged(), function,
+ node_,
code_assembler_->SmiConstant(
static_cast<int>(ObjectTypeOf<A>::value)),
code_assembler_->StringConstant(location_));
@@ -670,12 +671,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return TNode<T>::UncheckedCast(value);
}
- CheckedNode<Object, false> Cast(Node* value, const char* location) {
+ CheckedNode<Object, false> Cast(Node* value, const char* location = "") {
return {value, this, location};
}
template <class T>
- CheckedNode<T, true> Cast(TNode<T> value, const char* location) {
+ CheckedNode<T, true> Cast(TNode<T> value, const char* location = "") {
return {value, this, location};
}
@@ -685,7 +686,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
#define CAST(x) \
Cast(x, "CAST(" #x ") at " __FILE__ ":" TO_STRING_LITERAL(__LINE__))
#else
-#define CAST(x) Cast(x, "")
+#define CAST(x) Cast(x)
#endif
#ifdef DEBUG
@@ -772,6 +773,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void Branch(SloppyTNode<IntegralT> condition, Label* true_label,
Label* false_label);
+ void Branch(TNode<BoolT> condition, std::function<void()> true_body,
+ std::function<void()> false_body);
+ void Branch(TNode<BoolT> condition, Label* true_label,
+ std::function<void()> false_body);
+ void Branch(TNode<BoolT> condition, std::function<void()> true_body,
+ Label* false_label);
+
void Switch(Node* index, Label* default_label, const int32_t* case_values,
Label** case_labels, size_t case_count);
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 6a7d0985f4..83060f9e38 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -42,9 +42,8 @@ CodeGenerator::CodeGenerator(
Zone* codegen_zone, Frame* frame, Linkage* linkage,
InstructionSequence* code, OptimizedCompilationInfo* info, Isolate* isolate,
base::Optional<OsrHelper> osr_helper, int start_source_position,
- JumpOptimizationInfo* jump_opt, WasmCompilationData* wasm_compilation_data,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
- int32_t builtin_index)
+ JumpOptimizationInfo* jump_opt, PoisoningMitigationLevel poisoning_level,
+ const AssemblerOptions& options, int32_t builtin_index)
: zone_(codegen_zone),
isolate_(isolate),
frame_access_state_(nullptr),
@@ -75,7 +74,7 @@ CodeGenerator::CodeGenerator(
optimized_out_literal_id_(-1),
source_position_table_builder_(
SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS),
- wasm_compilation_data_(wasm_compilation_data),
+ protected_instructions_(zone()),
result_(kSuccess),
poisoning_level_(poisoning_level),
block_starts_(zone()),
@@ -86,24 +85,25 @@ CodeGenerator::CodeGenerator(
CreateFrameAccessState(frame);
CHECK_EQ(info->is_osr(), osr_helper_.has_value());
tasm_.set_jump_optimization_info(jump_opt);
- Code::Kind code_kind = info_->code_kind();
+ Code::Kind code_kind = info->code_kind();
if (code_kind == Code::WASM_FUNCTION ||
code_kind == Code::WASM_TO_JS_FUNCTION ||
- code_kind == Code::WASM_INTERPRETER_ENTRY) {
- tasm_.set_trap_on_abort(true);
+ code_kind == Code::WASM_INTERPRETER_ENTRY ||
+ (Builtins::IsBuiltinId(builtin_index) &&
+ Builtins::IsWasmRuntimeStub(builtin_index))) {
+ tasm_.set_abort_hard(true);
}
tasm_.set_builtin_index(builtin_index);
}
bool CodeGenerator::wasm_runtime_exception_support() const {
- DCHECK(wasm_compilation_data_);
- return wasm_compilation_data_->runtime_exception_support();
+ DCHECK_NOT_NULL(info_);
+ return info_->wasm_runtime_exception_support();
}
void CodeGenerator::AddProtectedInstructionLanding(uint32_t instr_offset,
uint32_t landing_offset) {
- DCHECK_NOT_NULL(wasm_compilation_data_);
- wasm_compilation_data_->AddProtectedInstruction(instr_offset, landing_offset);
+ protected_instructions_.push_back({instr_offset, landing_offset});
}
void CodeGenerator::CreateFrameAccessState(Frame* frame) {
@@ -372,6 +372,12 @@ OwnedVector<byte> CodeGenerator::GetSourcePositionTable() {
return source_position_table_builder_.ToSourcePositionTableVector();
}
+OwnedVector<trap_handler::ProtectedInstructionData>
+CodeGenerator::GetProtectedInstructions() {
+ return OwnedVector<trap_handler::ProtectedInstructionData>::Of(
+ protected_instructions_);
+}
+
MaybeHandle<Code> CodeGenerator::FinalizeCode() {
if (result_ != kSuccess) {
tasm()->AbortedCodeGeneration();
@@ -439,10 +445,10 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
// we also don't need to worry about them, since the GC has special
// knowledge about those fields anyway.
if (index < stackSlotToSpillSlotDelta) continue;
- safepoint.DefinePointerSlot(index, zone());
+ safepoint.DefinePointerSlot(index);
} else if (operand.IsRegister() && (kind & Safepoint::kWithRegisters)) {
Register reg = LocationOperand::cast(operand).GetRegister();
- safepoint.DefinePointerRegister(reg, zone());
+ safepoint.DefinePointerRegister(reg);
}
}
}
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index dcdb6bb806..5d4941f825 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -14,6 +14,7 @@
#include "src/macro-assembler.h"
#include "src/safepoint-table.h"
#include "src/source-position-table.h"
+#include "src/trap-handler/trap-handler.h"
namespace v8 {
namespace internal {
@@ -27,7 +28,6 @@ class DeoptimizationExit;
class FrameAccessState;
class Linkage;
class OutOfLineCode;
-class WasmCompilationData;
struct BranchInfo {
FlagsCondition condition;
@@ -83,7 +83,6 @@ class CodeGenerator final : public GapResolver::Assembler {
base::Optional<OsrHelper> osr_helper,
int start_source_position,
JumpOptimizationInfo* jump_opt,
- WasmCompilationData* wasm_compilation_data,
PoisoningMitigationLevel poisoning_level,
const AssemblerOptions& options,
int32_t builtin_index);
@@ -95,6 +94,8 @@ class CodeGenerator final : public GapResolver::Assembler {
MaybeHandle<Code> FinalizeCode();
OwnedVector<byte> GetSourcePositionTable();
+ OwnedVector<trap_handler::ProtectedInstructionData>
+ GetProtectedInstructions();
InstructionSequence* code() const { return code_; }
FrameAccessState* frame_access_state() const { return frame_access_state_; }
@@ -427,7 +428,7 @@ class CodeGenerator final : public GapResolver::Assembler {
int osr_pc_offset_;
int optimized_out_literal_id_;
SourcePositionTableBuilder source_position_table_builder_;
- WasmCompilationData* wasm_compilation_data_;
+ ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions_;
CodeGenResult result_;
PoisoningMitigationLevel poisoning_level_;
ZoneVector<int> block_starts_;
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 6e50d700b7..16a9096079 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -19,7 +19,7 @@ namespace compiler {
namespace {
-Decision DecideCondition(const JSHeapBroker* broker, Node* const cond) {
+Decision DecideCondition(JSHeapBroker* broker, Node* const cond) {
switch (cond->opcode()) {
case IrOpcode::kInt32Constant: {
Int32Matcher mcond(cond);
@@ -38,7 +38,7 @@ Decision DecideCondition(const JSHeapBroker* broker, Node* const cond) {
} // namespace
CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
- const JSHeapBroker* js_heap_broker,
+ JSHeapBroker* js_heap_broker,
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine,
Zone* temp_zone)
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index 77a1d71084..f1b29eaf76 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -25,7 +25,7 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
CommonOperatorReducer(Editor* editor, Graph* graph,
- const JSHeapBroker* js_heap_broker,
+ JSHeapBroker* js_heap_broker,
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine, Zone* temp_zone);
~CommonOperatorReducer() final {}
@@ -48,13 +48,13 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
Graph* graph() const { return graph_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
CommonOperatorBuilder* common() const { return common_; }
MachineOperatorBuilder* machine() const { return machine_; }
Node* dead() const { return dead_; }
Graph* const graph_;
- const JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const js_heap_broker_;
CommonOperatorBuilder* const common_;
MachineOperatorBuilder* const machine_;
Node* const dead_;
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 23f1cdfc1d..9bdaedea20 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -5,12 +5,12 @@
#ifndef V8_COMPILER_COMMON_OPERATOR_H_
#define V8_COMPILER_COMMON_OPERATOR_H_
-#include "src/assembler.h"
#include "src/base/compiler-specific.h"
#include "src/compiler/frame-states.h"
#include "src/deoptimize-reason.h"
#include "src/globals.h"
#include "src/machine-type.h"
+#include "src/reloc-info.h"
#include "src/vector-slot-pair.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone-handle-set.h"
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index a672d0a1f0..b67adbd7ca 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -16,22 +16,18 @@ CompilationDependencies::CompilationDependencies(Isolate* isolate, Zone* zone)
class CompilationDependencies::Dependency : public ZoneObject {
public:
- virtual bool IsSane() const = 0;
virtual bool IsValid() const = 0;
- virtual void Install(Isolate* isolate, Handle<WeakCell> code) = 0;
+ virtual void Install(MaybeObjectHandle code) = 0;
};
class InitialMapDependency final : public CompilationDependencies::Dependency {
public:
+ // TODO(neis): Once the concurrent compiler frontend is always-on, we no
+ // longer need to explicitly store the initial map.
InitialMapDependency(const JSFunctionRef& function, const MapRef& initial_map)
: function_(function), initial_map_(initial_map) {
- DCHECK(IsSane());
- }
-
- bool IsSane() const override {
- DisallowHeapAccess no_heap_access;
- CHECK(function_.has_initial_map());
- return function_.initial_map().equals(initial_map_);
+ DCHECK(function_.has_initial_map());
+ DCHECK(function_.initial_map().equals(initial_map_));
}
bool IsValid() const override {
@@ -40,9 +36,10 @@ class InitialMapDependency final : public CompilationDependencies::Dependency {
function->initial_map() == *initial_map_.object<Map>();
}
- void Install(Isolate* isolate, Handle<WeakCell> code) override {
- DCHECK(IsValid());
- DependentCode::InstallDependency(isolate, code, initial_map_.object<Map>(),
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
+ DependentCode::InstallDependency(function_.isolate(), code,
+ initial_map_.object<Map>(),
DependentCode::kInitialMapChangedGroup);
}
@@ -51,22 +48,51 @@ class InitialMapDependency final : public CompilationDependencies::Dependency {
MapRef initial_map_;
};
-class StableMapDependency final : public CompilationDependencies::Dependency {
+class PrototypePropertyDependency final
+ : public CompilationDependencies::Dependency {
public:
- explicit StableMapDependency(const MapRef& map) : map_(map) {
- DCHECK(IsSane());
+ // TODO(neis): Once the concurrent compiler frontend is always-on, we no
+ // longer need to explicitly store the prototype.
+ PrototypePropertyDependency(const JSFunctionRef& function,
+ const ObjectRef& prototype)
+ : function_(function), prototype_(prototype) {
+ DCHECK(function_.has_prototype());
+ DCHECK(!function_.PrototypeRequiresRuntimeLookup());
+ DCHECK(function_.prototype().equals(prototype_));
+ }
+
+ bool IsValid() const override {
+ Handle<JSFunction> function = function_.object<JSFunction>();
+ return function->has_prototype_slot() && function->has_prototype() &&
+ !function->PrototypeRequiresRuntimeLookup() &&
+ function->prototype() == *prototype_.object();
+ }
+
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
+ Handle<JSFunction> function = function_.object<JSFunction>();
+ if (!function->has_initial_map()) JSFunction::EnsureHasInitialMap(function);
+ Handle<Map> initial_map(function->initial_map(), function_.isolate());
+ DependentCode::InstallDependency(function_.isolate(), code, initial_map,
+ DependentCode::kInitialMapChangedGroup);
}
- bool IsSane() const override {
- DisallowHeapAccess no_heap_access;
- return map_.is_stable();
+ private:
+ JSFunctionRef function_;
+ ObjectRef prototype_;
+};
+
+class StableMapDependency final : public CompilationDependencies::Dependency {
+ public:
+ explicit StableMapDependency(const MapRef& map) : map_(map) {
+ DCHECK(map_.is_stable());
}
bool IsValid() const override { return map_.object<Map>()->is_stable(); }
- void Install(Isolate* isolate, Handle<WeakCell> code) override {
- DCHECK(IsValid());
- DependentCode::InstallDependency(isolate, code, map_.object<Map>(),
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
+ DependentCode::InstallDependency(map_.isolate(), code, map_.object<Map>(),
DependentCode::kPrototypeCheckGroup);
}
@@ -77,19 +103,14 @@ class StableMapDependency final : public CompilationDependencies::Dependency {
class TransitionDependency final : public CompilationDependencies::Dependency {
public:
explicit TransitionDependency(const MapRef& map) : map_(map) {
- DCHECK(IsSane());
- }
-
- bool IsSane() const override {
- DisallowHeapAccess no_heap_access;
- return !map_.is_deprecated();
+ DCHECK(!map_.is_deprecated());
}
bool IsValid() const override { return !map_.object<Map>()->is_deprecated(); }
- void Install(Isolate* isolate, Handle<WeakCell> code) override {
- DCHECK(IsValid());
- DependentCode::InstallDependency(isolate, code, map_.object<Map>(),
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
+ DependentCode::InstallDependency(map_.isolate(), code, map_.object<Map>(),
DependentCode::kTransitionGroup);
}
@@ -100,24 +121,21 @@ class TransitionDependency final : public CompilationDependencies::Dependency {
class PretenureModeDependency final
: public CompilationDependencies::Dependency {
public:
+ // TODO(neis): Once the concurrent compiler frontend is always-on, we no
+ // longer need to explicitly store the mode.
PretenureModeDependency(const AllocationSiteRef& site, PretenureFlag mode)
: site_(site), mode_(mode) {
- DCHECK(IsSane());
- }
-
- bool IsSane() const override {
- DisallowHeapAccess no_heap_access;
- return mode_ == site_.GetPretenureMode();
+ DCHECK_EQ(mode_, site_.GetPretenureMode());
}
bool IsValid() const override {
return mode_ == site_.object<AllocationSite>()->GetPretenureMode();
}
- void Install(Isolate* isolate, Handle<WeakCell> code) override {
- DCHECK(IsValid());
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(
- isolate, code, site_.object<AllocationSite>(),
+ site_.isolate(), code, site_.object<AllocationSite>(),
DependentCode::kAllocationSiteTenuringChangedGroup);
}
@@ -128,16 +146,13 @@ class PretenureModeDependency final
class FieldTypeDependency final : public CompilationDependencies::Dependency {
public:
+ // TODO(neis): Once the concurrent compiler frontend is always-on, we no
+ // longer need to explicitly store the type.
FieldTypeDependency(const MapRef& owner, int descriptor,
- const FieldTypeRef& type)
+ const ObjectRef& type)
: owner_(owner), descriptor_(descriptor), type_(type) {
- DCHECK(IsSane());
- }
-
- bool IsSane() const override {
- DisallowHeapAccess no_heap_access;
- CHECK(owner_.equals(owner_.FindFieldOwner(descriptor_)));
- return type_.equals(owner_.GetFieldType(descriptor_));
+ DCHECK(owner_.equals(owner_.FindFieldOwner(descriptor_)));
+ DCHECK(type_.equals(owner_.GetFieldType(descriptor_)));
}
bool IsValid() const override {
@@ -147,31 +162,29 @@ class FieldTypeDependency final : public CompilationDependencies::Dependency {
return *type == owner->instance_descriptors()->GetFieldType(descriptor_);
}
- void Install(Isolate* isolate, Handle<WeakCell> code) override {
- DCHECK(IsValid());
- DependentCode::InstallDependency(isolate, code, owner_.object<Map>(),
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
+ DependentCode::InstallDependency(owner_.isolate(), code,
+ owner_.object<Map>(),
DependentCode::kFieldOwnerGroup);
}
private:
MapRef owner_;
int descriptor_;
- FieldTypeRef type_;
+ ObjectRef type_;
};
class GlobalPropertyDependency final
: public CompilationDependencies::Dependency {
public:
+ // TODO(neis): Once the concurrent compiler frontend is always-on, we no
+ // longer need to explicitly store the type and the read_only flag.
GlobalPropertyDependency(const PropertyCellRef& cell, PropertyCellType type,
bool read_only)
: cell_(cell), type_(type), read_only_(read_only) {
- DCHECK(IsSane());
- }
-
- bool IsSane() const override {
- DisallowHeapAccess no_heap_access;
- return type_ == cell_.property_details().cell_type() &&
- read_only_ == cell_.property_details().IsReadOnly();
+ DCHECK_EQ(type_, cell_.property_details().cell_type());
+ DCHECK_EQ(read_only_, cell_.property_details().IsReadOnly());
}
bool IsValid() const override {
@@ -180,9 +193,9 @@ class GlobalPropertyDependency final
read_only_ == cell->property_details().IsReadOnly();
}
- void Install(Isolate* isolate, Handle<WeakCell> code) override {
- DCHECK(IsValid());
- DependentCode::InstallDependency(isolate, code,
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
+ DependentCode::InstallDependency(cell_.isolate(), code,
cell_.object<PropertyCell>(),
DependentCode::kPropertyCellChangedGroup);
}
@@ -196,13 +209,7 @@ class GlobalPropertyDependency final
class ProtectorDependency final : public CompilationDependencies::Dependency {
public:
explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) {
- DCHECK(IsSane());
- }
-
- bool IsSane() const override {
- DisallowHeapAccess no_heap_access;
- return cell_.value().IsSmi() &&
- cell_.value().AsSmi() == Isolate::kProtectorValid;
+ DCHECK_EQ(cell_.value().AsSmi(), Isolate::kProtectorValid);
}
bool IsValid() const override {
@@ -210,9 +217,9 @@ class ProtectorDependency final : public CompilationDependencies::Dependency {
return cell->value() == Smi::FromInt(Isolate::kProtectorValid);
}
- void Install(Isolate* isolate, Handle<WeakCell> code) override {
- DCHECK(IsValid());
- DependentCode::InstallDependency(isolate, code,
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
+ DependentCode::InstallDependency(cell_.isolate(), code,
cell_.object<PropertyCell>(),
DependentCode::kPropertyCellChangedGroup);
}
@@ -224,18 +231,14 @@ class ProtectorDependency final : public CompilationDependencies::Dependency {
class ElementsKindDependency final
: public CompilationDependencies::Dependency {
public:
+ // TODO(neis): Once the concurrent compiler frontend is always-on, we no
+ // longer need to explicitly store the elements kind.
ElementsKindDependency(const AllocationSiteRef& site, ElementsKind kind)
: site_(site), kind_(kind) {
- DCHECK(IsSane());
- }
-
- bool IsSane() const override {
- DisallowHeapAccess no_heap_access;
DCHECK(AllocationSite::ShouldTrack(kind_));
- ElementsKind kind = site_.PointsToLiteral()
- ? site_.boilerplate().GetElementsKind()
- : site_.GetElementsKind();
- return kind_ == kind;
+ DCHECK_EQ(kind_, site_.PointsToLiteral()
+ ? site_.boilerplate().value().GetElementsKind()
+ : site_.GetElementsKind());
}
bool IsValid() const override {
@@ -246,10 +249,10 @@ class ElementsKindDependency final
return kind_ == kind;
}
- void Install(Isolate* isolate, Handle<WeakCell> code) override {
- DCHECK(IsValid());
+ void Install(MaybeObjectHandle code) override {
+ SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(
- isolate, code, site_.object<AllocationSite>(),
+ site_.isolate(), code, site_.object<AllocationSite>(),
DependentCode::kAllocationSiteTransitionChangedGroup);
}
@@ -258,6 +261,33 @@ class ElementsKindDependency final
ElementsKind kind_;
};
+class InitialMapInstanceSizePredictionDependency final
+ : public CompilationDependencies::Dependency {
+ public:
+ InitialMapInstanceSizePredictionDependency(const JSFunctionRef& function,
+ int instance_size)
+ : function_(function), instance_size_(instance_size) {}
+
+ bool IsValid() const override {
+ // The dependency is valid if the prediction is the same as the current
+ // slack tracking result.
+ int instance_size =
+ function_.object<JSFunction>()->ComputeInstanceSizeWithMinSlack(
+ function_.isolate());
+ return instance_size == instance_size_;
+ }
+
+ void Install(MaybeObjectHandle code) override {
+ DCHECK(IsValid());
+ // Finish the slack tracking.
+ function_.object<JSFunction>()->CompleteInobjectSlackTrackingIfActive();
+ }
+
+ private:
+ JSFunctionRef function_;
+ int instance_size_;
+};
+
MapRef CompilationDependencies::DependOnInitialMap(
const JSFunctionRef& function) {
MapRef map = function.initial_map();
@@ -265,6 +295,14 @@ MapRef CompilationDependencies::DependOnInitialMap(
return map;
}
+ObjectRef CompilationDependencies::DependOnPrototypeProperty(
+ const JSFunctionRef& function) {
+ ObjectRef prototype = function.prototype();
+ dependencies_.push_front(
+ new (zone_) PrototypePropertyDependency(function, prototype));
+ return prototype;
+}
+
void CompilationDependencies::DependOnStableMap(const MapRef& map) {
if (map.CanTransition()) {
dependencies_.push_front(new (zone_) StableMapDependency(map));
@@ -291,7 +329,7 @@ PretenureFlag CompilationDependencies::DependOnPretenureMode(
void CompilationDependencies::DependOnFieldType(const MapRef& map,
int descriptor) {
MapRef owner = map.FindFieldOwner(descriptor);
- FieldTypeRef type = owner.GetFieldType(descriptor);
+ ObjectRef type = owner.GetFieldType(descriptor);
DCHECK(type.equals(map.GetFieldType(descriptor)));
dependencies_.push_front(new (zone_)
FieldTypeDependency(owner, descriptor, type));
@@ -313,7 +351,7 @@ void CompilationDependencies::DependOnElementsKind(
const AllocationSiteRef& site) {
// Do nothing if the object doesn't have any useful element transitions left.
ElementsKind kind = site.PointsToLiteral()
- ? site.boilerplate().GetElementsKind()
+ ? site.boilerplate().value().GetElementsKind()
: site.GetElementsKind();
if (AllocationSite::ShouldTrack(kind)) {
dependencies_.push_front(new (zone_) ElementsKindDependency(site, kind));
@@ -328,25 +366,28 @@ bool CompilationDependencies::AreValid() const {
}
bool CompilationDependencies::Commit(Handle<Code> code) {
- Isolate* isolate = code->GetIsolate();
-
- // Check validity of all dependencies first, such that we can abort before
- // installing anything.
+ // Check validity of all dependencies first, such that we can avoid installing
+ // anything when there's already an invalid dependency.
if (!AreValid()) {
dependencies_.clear();
return false;
}
- Handle<WeakCell> cell = Code::WeakCellFor(code);
for (auto dep : dependencies_) {
- dep->Install(isolate, cell);
+ // Check each dependency's validity again right before installing it,
+ // because a GC can trigger invalidation for some dependency kinds.
+ if (!dep->IsValid()) {
+ dependencies_.clear();
+ return false;
+ }
+ dep->Install(MaybeObjectHandle::Weak(code));
}
dependencies_.clear();
return true;
}
namespace {
-void DependOnStablePrototypeChain(const JSHeapBroker* broker,
+void DependOnStablePrototypeChain(JSHeapBroker* broker,
CompilationDependencies* deps,
Handle<Map> map,
MaybeHandle<JSReceiver> last_prototype) {
@@ -364,7 +405,7 @@ void DependOnStablePrototypeChain(const JSHeapBroker* broker,
} // namespace
void CompilationDependencies::DependOnStablePrototypeChains(
- const JSHeapBroker* broker, Handle<Context> native_context,
+ JSHeapBroker* broker, Handle<Context> native_context,
std::vector<Handle<Map>> const& receiver_maps, Handle<JSObject> holder) {
Isolate* isolate = holder->GetIsolate();
// Determine actual holder and perform prototype chain checks.
@@ -391,6 +432,28 @@ void CompilationDependencies::DependOnElementsKinds(
CHECK_EQ(current.nested_site().AsSmi(), 0);
}
+SlackTrackingPrediction::SlackTrackingPrediction(MapRef initial_map,
+ int instance_size)
+ : instance_size_(instance_size),
+ inobject_property_count_(
+ (instance_size >> kPointerSizeLog2) -
+ initial_map.GetInObjectPropertiesStartInWords()) {}
+
+SlackTrackingPrediction
+CompilationDependencies::DependOnInitialMapInstanceSizePrediction(
+ const JSFunctionRef& function) {
+ MapRef initial_map = DependOnInitialMap(function);
+ int instance_size = function.InitialMapInstanceSizeWithMinSlack();
+ // Currently, we always install the prediction dependency. If this turns out
+ // to be too expensive, we can only install the dependency if slack
+ // tracking is active.
+ dependencies_.push_front(
+ new (zone_)
+ InitialMapInstanceSizePredictionDependency(function, instance_size));
+ DCHECK_LE(instance_size, function.initial_map().instance_size());
+ return SlackTrackingPrediction(initial_map, instance_size);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index 9770775c2e..5d4cd221df 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -13,6 +13,18 @@ namespace v8 {
namespace internal {
namespace compiler {
+class SlackTrackingPrediction {
+ public:
+ SlackTrackingPrediction(MapRef initial_map, int instance_size);
+
+ int inobject_property_count() const { return inobject_property_count_; }
+ int instance_size() const { return instance_size_; }
+
+ private:
+ int instance_size_;
+ int inobject_property_count_;
+};
+
// Collects and installs dependencies of the code that is being generated.
class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
public:
@@ -21,9 +33,13 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
V8_WARN_UNUSED_RESULT bool Commit(Handle<Code> code);
// Return the initial map of {function} and record the assumption that it
- // stays the intial map.
+ // stays the initial map.
MapRef DependOnInitialMap(const JSFunctionRef& function);
+ // Return the "prototype" property of the given function and record the
+ // assumption that it doesn't change.
+ ObjectRef DependOnPrototypeProperty(const JSFunctionRef& function);
+
// Record the assumption that {map} stays stable.
void DependOnStableMap(const MapRef& map);
@@ -53,12 +69,20 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// {receiver_type} up to (and including) the {holder}.
// TODO(neis): Fully brokerize!
void DependOnStablePrototypeChains(
- const JSHeapBroker* broker, Handle<Context> native_context,
+ JSHeapBroker* broker, Handle<Context> native_context,
std::vector<Handle<Map>> const& receiver_maps, Handle<JSObject> holder);
// Like DependOnElementsKind but also applies to all nested allocation sites.
void DependOnElementsKinds(const AllocationSiteRef& site);
+ // Predict the final instance size for {function}'s initial map and record
+ // the assumption that this prediction is correct. In addition, register
+ // the initial map dependency. This method returns the {function}'s the
+ // predicted minimum slack instance size count (wrapped together with
+ // the corresponding in-object property count for convenience).
+ SlackTrackingPrediction DependOnInitialMapInstanceSizePrediction(
+ const JSFunctionRef& function);
+
// Exposed only for testing purposes.
bool AreValid() const;
diff --git a/deps/v8/src/compiler/constant-folding-reducer.cc b/deps/v8/src/compiler/constant-folding-reducer.cc
index 1811c06f98..a447b2a07c 100644
--- a/deps/v8/src/compiler/constant-folding-reducer.cc
+++ b/deps/v8/src/compiler/constant-folding-reducer.cc
@@ -11,8 +11,8 @@ namespace v8 {
namespace internal {
namespace compiler {
-ConstantFoldingReducer::ConstantFoldingReducer(
- Editor* editor, JSGraph* jsgraph, const JSHeapBroker* js_heap_broker)
+ConstantFoldingReducer::ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph,
+ JSHeapBroker* js_heap_broker)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
js_heap_broker_(js_heap_broker) {}
diff --git a/deps/v8/src/compiler/constant-folding-reducer.h b/deps/v8/src/compiler/constant-folding-reducer.h
index b111e5b878..3fbe5c4c2e 100644
--- a/deps/v8/src/compiler/constant-folding-reducer.h
+++ b/deps/v8/src/compiler/constant-folding-reducer.h
@@ -18,7 +18,7 @@ class V8_EXPORT_PRIVATE ConstantFoldingReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker);
+ JSHeapBroker* js_heap_broker);
~ConstantFoldingReducer() final;
const char* reducer_name() const override { return "ConstantFoldingReducer"; }
@@ -27,10 +27,10 @@ class V8_EXPORT_PRIVATE ConstantFoldingReducer final
private:
JSGraph* jsgraph() const { return jsgraph_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
JSGraph* const jsgraph_;
- const JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const js_heap_broker_;
DISALLOW_COPY_AND_ASSIGN(ConstantFoldingReducer);
};
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 9a3a293055..9b12c022c4 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -1631,6 +1631,32 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
return value;
}
+Node* EffectControlLinearizer::BuildUint32Mod(Node* lhs, Node* rhs) {
+ auto if_rhs_power_of_two = __ MakeLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kWord32);
+
+ // Compute the mask for the {rhs}.
+ Node* one = __ Int32Constant(1);
+ Node* msk = __ Int32Sub(rhs, one);
+
+ // Check if the {rhs} is a power of two.
+ __ GotoIf(__ Word32Equal(__ Word32And(rhs, msk), __ Int32Constant(0)),
+ &if_rhs_power_of_two);
+ {
+ // The {rhs} is not a power of two, do a generic Uint32Mod.
+ __ Goto(&done, __ Uint32Mod(lhs, rhs));
+ }
+
+ __ Bind(&if_rhs_power_of_two);
+ {
+ // The {rhs} is a power of two, just do a fast bit masking.
+ __ Goto(&done, __ Word32And(lhs, msk));
+ }
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
Node* frame_state) {
// General case for signed integer modulus, with optimization for (unknown)
@@ -1639,12 +1665,19 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
// if rhs <= 0 then
// rhs = -rhs
// deopt if rhs == 0
+ // let msk = rhs - 1 in
// if lhs < 0 then
- // let res = lhs % rhs in
- // deopt if res == 0
- // res
+ // let lhs_abs = -lsh in
+ // let res = if rhs & msk == 0 then
+ // lhs_abs & msk
+ // else
+ // lhs_abs % rhs in
+ // if lhs < 0 then
+ // deopt if res == 0
+ // -res
+ // else
+ // res
// else
- // let msk = rhs - 1 in
// if rhs & msk == 0 then
// lhs & msk
// else
@@ -1655,7 +1688,7 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
auto if_rhs_not_positive = __ MakeDeferredLabel();
auto if_lhs_negative = __ MakeDeferredLabel();
- auto if_power_of_two = __ MakeLabel();
+ auto if_rhs_power_of_two = __ MakeLabel();
auto rhs_checked = __ MakeLabel(MachineRepresentation::kWord32);
auto done = __ MakeLabel(MachineRepresentation::kWord32);
@@ -1673,45 +1706,29 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
Node* vtrue0 = __ Int32Sub(zero, rhs);
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
- Node* check = __ Word32Equal(vtrue0, zero);
- __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
- frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(),
+ __ Word32Equal(vtrue0, zero), frame_state);
__ Goto(&rhs_checked, vtrue0);
}
__ Bind(&rhs_checked);
rhs = rhs_checked.PhiAt(0);
- // Check if {lhs} is negative.
- Node* check1 = __ Int32LessThan(lhs, zero);
- __ GotoIf(check1, &if_lhs_negative);
-
- // {lhs} non-negative.
+ __ GotoIf(__ Int32LessThan(lhs, zero), &if_lhs_negative);
{
- Node* one = __ Int32Constant(1);
- Node* msk = __ Int32Sub(rhs, one);
-
- // Check if {rhs} minus one is a valid mask.
- Node* check2 = __ Word32Equal(__ Word32And(rhs, msk), zero);
- __ GotoIf(check2, &if_power_of_two);
- // Compute the remainder using the generic {lhs % rhs}.
- __ Goto(&done, __ Int32Mod(lhs, rhs));
-
- __ Bind(&if_power_of_two);
- // Compute the remainder using {lhs & msk}.
- __ Goto(&done, __ Word32And(lhs, msk));
+ // The {lhs} is a non-negative integer.
+ __ Goto(&done, BuildUint32Mod(lhs, rhs));
}
__ Bind(&if_lhs_negative);
{
- // Compute the remainder using {lhs % msk}.
- Node* vtrue1 = __ Int32Mod(lhs, rhs);
+ // The {lhs} is a negative integer.
+ Node* res = BuildUint32Mod(__ Int32Sub(zero, lhs), rhs);
// Check if we would have to return -0.
- Node* check = __ Word32Equal(vtrue1, zero);
- __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(), check,
- frame_state);
- __ Goto(&done, vtrue1);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(),
+ __ Word32Equal(res, zero), frame_state);
+ __ Goto(&done, __ Int32Sub(zero, res));
}
__ Bind(&done);
@@ -1753,7 +1770,7 @@ Node* EffectControlLinearizer::LowerCheckedUint32Mod(Node* node,
frame_state);
// Perform the actual unsigned integer modulus.
- return __ Uint32Mod(lhs, rhs);
+ return BuildUint32Mod(lhs, rhs);
}
Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
@@ -3293,16 +3310,17 @@ Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
Node* frame_state) {
// If we reach this point w/o eliminating the {node} that's marked
// with allow-return-hole, we cannot do anything, so just deoptimize
- // in case of the hole NaN (similar to Crankshaft).
+ // in case of the hole NaN.
+ CheckFloat64HoleParameters const& params =
+ CheckFloat64HoleParametersOf(node->op());
Node* value = node->InputAt(0);
Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
__ Int32Constant(kHoleNanUpper32));
- __ DeoptimizeIf(DeoptimizeReason::kHole, VectorSlotPair(), check,
+ __ DeoptimizeIf(DeoptimizeReason::kHole, params.feedback(), check,
frame_state);
return value;
}
-
Node* EffectControlLinearizer::LowerCheckNotTaggedHole(Node* node,
Node* frame_state) {
Node* value = node->InputAt(0);
@@ -3752,350 +3770,153 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
return done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerLoadDataViewElement(Node* node) {
- ExternalArrayType element_type = ExternalArrayTypeOf(node->op());
- Node* buffer = node->InputAt(0);
- Node* storage = node->InputAt(1);
- Node* index = node->InputAt(2);
- Node* is_little_endian = node->InputAt(3);
-
- // We need to keep the {buffer} alive so that the GC will not release the
- // ArrayBuffer (if there's any) as long as we are still operating on it.
- __ Retain(buffer);
-
- ElementAccess access_int8 = AccessBuilder::ForTypedArrayElement(
- kExternalInt8Array, true, LoadSensitivity::kCritical);
- ElementAccess access_uint8 = AccessBuilder::ForTypedArrayElement(
- kExternalUint8Array, true, LoadSensitivity::kCritical);
-
- switch (element_type) {
- case kExternalUint8Array:
- return __ LoadElement(access_uint8, storage, index);
-
+Node* EffectControlLinearizer::BuildReverseBytes(ExternalArrayType type,
+ Node* value) {
+ switch (type) {
case kExternalInt8Array:
- return __ LoadElement(access_int8, storage, index);
+ case kExternalUint8Array:
+ case kExternalUint8ClampedArray:
+ return value;
- case kExternalUint16Array: // Fall through.
case kExternalInt16Array: {
- auto big_endian = __ MakeLabel();
- auto done = __ MakeLabel(MachineRepresentation::kWord32);
-
- // If we're doing an Int16 load, sign-extend the most significant byte
- // by loading it as an Int8 instead of Uint8.
- ElementAccess access_msb =
- element_type == kExternalInt16Array ? access_int8 : access_uint8;
-
- __ GotoIfNot(is_little_endian, &big_endian);
- {
- // Little-endian load.
- Node* b0 = __ LoadElement(access_uint8, storage, index);
- Node* b1 = __ LoadElement(access_msb, storage,
- __ Int32Add(index, __ Int32Constant(1)));
-
- // result = (b1 << 8) + b0
- Node* result = __ Int32Add(__ Word32Shl(b1, __ Int32Constant(8)), b0);
- __ Goto(&done, result);
- }
-
- __ Bind(&big_endian);
- {
- // Big-endian load.
- Node* b0 = __ LoadElement(access_msb, storage, index);
- Node* b1 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(1)));
-
- // result = (b0 << 8) + b1;
- Node* result = __ Int32Add(__ Word32Shl(b0, __ Int32Constant(8)), b1);
- __ Goto(&done, result);
- }
-
- // We're done, return {result}.
- __ Bind(&done);
- return done.PhiAt(0);
+ Node* result = __ Word32ReverseBytes(value);
+ result = __ Word32Sar(result, __ Int32Constant(16));
+ return result;
}
- case kExternalUint32Array: // Fall through.
- case kExternalInt32Array: // Fall through.
- case kExternalFloat32Array: {
- Node* b0 = __ LoadElement(access_uint8, storage, index);
- Node* b1 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(1)));
- Node* b2 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(2)));
- Node* b3 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(3)));
-
- auto big_endian = __ MakeLabel();
- auto done = __ MakeLabel(MachineRepresentation::kWord32);
-
- __ GotoIfNot(is_little_endian, &big_endian);
- {
- // Little-endian load.
- // result = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
- Node* result =
- __ Word32Or(__ Word32Or(__ Word32Shl(b3, __ Int32Constant(24)),
- __ Word32Shl(b2, __ Int32Constant(16))),
- __ Word32Or(__ Word32Shl(b1, __ Int32Constant(8)), b0));
- __ Goto(&done, result);
- }
+ case kExternalUint16Array: {
+ Node* result = __ Word32ReverseBytes(value);
+ result = __ Word32Shr(result, __ Int32Constant(16));
+ return result;
+ }
- __ Bind(&big_endian);
- {
- // Big-endian load.
- // result = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
- Node* result =
- __ Word32Or(__ Word32Or(__ Word32Shl(b0, __ Int32Constant(24)),
- __ Word32Shl(b1, __ Int32Constant(16))),
- __ Word32Or(__ Word32Shl(b2, __ Int32Constant(8)), b3));
- __ Goto(&done, result);
- }
+ case kExternalInt32Array: // Fall through.
+ case kExternalUint32Array:
+ return __ Word32ReverseBytes(value);
- // We're done, return {result}.
- __ Bind(&done);
- if (element_type == kExternalFloat32Array) {
- return __ BitcastInt32ToFloat32(done.PhiAt(0));
- } else {
- return done.PhiAt(0);
- }
+ case kExternalFloat32Array: {
+ Node* result = __ BitcastFloat32ToInt32(value);
+ result = __ Word32ReverseBytes(result);
+ result = __ BitcastInt32ToFloat32(result);
+ return result;
}
case kExternalFloat64Array: {
- Node* b0 = __ LoadElement(access_uint8, storage, index);
- Node* b1 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(1)));
- Node* b2 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(2)));
- Node* b3 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(3)));
- Node* b4 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(4)));
- Node* b5 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(5)));
- Node* b6 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(6)));
- Node* b7 = __ LoadElement(access_uint8, storage,
- __ Int32Add(index, __ Int32Constant(7)));
-
- auto big_endian = __ MakeLabel();
- auto done = __ MakeLabel(MachineRepresentation::kWord32,
- MachineRepresentation::kWord32);
-
- __ GotoIfNot(is_little_endian, &big_endian);
- {
- // Little-endian load.
- // low_word = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
- // high_word = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4;
- Node* low_word =
- __ Word32Or(__ Word32Or(__ Word32Shl(b3, __ Int32Constant(24)),
- __ Word32Shl(b2, __ Int32Constant(16))),
- __ Word32Or(__ Word32Shl(b1, __ Int32Constant(8)), b0));
- Node* high_word =
- __ Word32Or(__ Word32Or(__ Word32Shl(b7, __ Int32Constant(24)),
- __ Word32Shl(b6, __ Int32Constant(16))),
- __ Word32Or(__ Word32Shl(b5, __ Int32Constant(8)), b4));
- __ Goto(&done, low_word, high_word);
- }
-
- __ Bind(&big_endian);
- {
- // Big-endian load.
- // high_word = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3;
- // low_word = (b4 << 24) | (b5 << 16) | (b6 << 8) | b7;
- Node* high_word =
- __ Word32Or(__ Word32Or(__ Word32Shl(b0, __ Int32Constant(24)),
- __ Word32Shl(b1, __ Int32Constant(16))),
- __ Word32Or(__ Word32Shl(b2, __ Int32Constant(8)), b3));
- Node* low_word =
- __ Word32Or(__ Word32Or(__ Word32Shl(b4, __ Int32Constant(24)),
- __ Word32Shl(b5, __ Int32Constant(16))),
- __ Word32Or(__ Word32Shl(b6, __ Int32Constant(8)), b7));
- __ Goto(&done, low_word, high_word);
+ if (machine()->Is64()) {
+ Node* result = __ BitcastFloat64ToInt64(value);
+ result = __ Word64ReverseBytes(result);
+ result = __ BitcastInt64ToFloat64(result);
+ return result;
+ } else {
+ Node* lo = __ Word32ReverseBytes(__ Float64ExtractLowWord32(value));
+ Node* hi = __ Word32ReverseBytes(__ Float64ExtractHighWord32(value));
+ Node* result = __ Float64Constant(0.0);
+ result = __ Float64InsertLowWord32(result, hi);
+ result = __ Float64InsertHighWord32(result, lo);
+ return result;
}
-
- // We're done, store the low and high words into a float64.
- __ Bind(&done);
- Node* result = __ Float64Constant(0.0);
- result = __ Float64InsertLowWord32(result, done.PhiAt(0));
- result = __ Float64InsertHighWord32(result, done.PhiAt(1));
- return result;
}
- default:
+ case kExternalBigInt64Array:
+ case kExternalBigUint64Array:
UNREACHABLE();
}
}
-void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) {
+Node* EffectControlLinearizer::LowerLoadDataViewElement(Node* node) {
ExternalArrayType element_type = ExternalArrayTypeOf(node->op());
Node* buffer = node->InputAt(0);
Node* storage = node->InputAt(1);
Node* index = node->InputAt(2);
- Node* value = node->InputAt(3);
- Node* is_little_endian = node->InputAt(4);
+ Node* is_little_endian = node->InputAt(3);
+
+ // On 64-bit platforms, we need to feed a Word64 index to the Load and
+ // Store operators.
+ if (machine()->Is64()) {
+ index = __ ChangeUint32ToUint64(index);
+ }
// We need to keep the {buffer} alive so that the GC will not release the
// ArrayBuffer (if there's any) as long as we are still operating on it.
__ Retain(buffer);
- ElementAccess access =
- AccessBuilder::ForTypedArrayElement(kExternalUint8Array, true);
+ MachineType const machine_type =
+ AccessBuilder::ForTypedArrayElement(element_type, true).machine_type;
- switch (element_type) {
- case kExternalUint8Array: // Fall through.
- case kExternalInt8Array: {
- Node* b0 = __ Word32And(value, __ Int32Constant(0xFF));
- __ StoreElement(access, storage, index, b0);
- break;
- }
- case kExternalUint16Array: // Fall through.
- case kExternalInt16Array: {
- Node* b0 = __ Word32And(value, __ Int32Constant(0xFF));
- Node* b1 = __ Word32And(__ Word32Shr(value, __ Int32Constant(8)),
- __ Int32Constant(0xFF));
+ Node* value = __ LoadUnaligned(machine_type, storage, index);
+ auto big_endian = __ MakeLabel();
+ auto done = __ MakeLabel(machine_type.representation());
- auto big_endian = __ MakeLabel();
- auto done = __ MakeLabel();
-
- __ GotoIfNot(is_little_endian, &big_endian);
- {
- // Little-endian store.
- __ StoreElement(access, storage, index, b0);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(1)), b1);
- __ Goto(&done);
- }
-
- __ Bind(&big_endian);
- {
- // Big-endian store.
- __ StoreElement(access, storage, index, b1);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(1)), b0);
- __ Goto(&done);
- }
-
- __ Bind(&done);
- break;
- }
-
- case kExternalUint32Array: // Fall through.
- case kExternalInt32Array: // Fall through.
- case kExternalFloat32Array: {
- if (element_type == kExternalFloat32Array) {
- value = __ BitcastFloat32ToInt32(value);
- }
+ __ GotoIfNot(is_little_endian, &big_endian);
+ { // Little-endian load.
+#if V8_TARGET_LITTLE_ENDIAN
+ __ Goto(&done, value);
+#else
+ __ Goto(&done, BuildReverseBytes(element_type, value));
+#endif // V8_TARGET_LITTLE_ENDIAN
+ }
- Node* b0 = __ Word32And(value, __ Int32Constant(0xFF));
- Node* b1 = __ Word32And(__ Word32Shr(value, __ Int32Constant(8)),
- __ Int32Constant(0xFF));
- Node* b2 = __ Word32And(__ Word32Shr(value, __ Int32Constant(16)),
- __ Int32Constant(0xFF));
- Node* b3 = __ Word32Shr(value, __ Int32Constant(24));
+ __ Bind(&big_endian);
+ { // Big-endian load.
+#if V8_TARGET_LITTLE_ENDIAN
+ __ Goto(&done, BuildReverseBytes(element_type, value));
+#else
+ __ Goto(&done, value);
+#endif // V8_TARGET_LITTLE_ENDIAN
+ }
- auto big_endian = __ MakeLabel();
- auto done = __ MakeLabel();
+ // We're done, return {result}.
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
- __ GotoIfNot(is_little_endian, &big_endian);
- {
- // Little-endian store.
- __ StoreElement(access, storage, index, b0);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(1)), b1);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(2)), b2);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(3)), b3);
- __ Goto(&done);
- }
+void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) {
+ ExternalArrayType element_type = ExternalArrayTypeOf(node->op());
+ Node* buffer = node->InputAt(0);
+ Node* storage = node->InputAt(1);
+ Node* index = node->InputAt(2);
+ Node* value = node->InputAt(3);
+ Node* is_little_endian = node->InputAt(4);
- __ Bind(&big_endian);
- {
- // Big-endian store.
- __ StoreElement(access, storage, index, b3);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(1)), b2);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(2)), b1);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(3)), b0);
- __ Goto(&done);
- }
+ // On 64-bit platforms, we need to feed a Word64 index to the Load and
+ // Store operators.
+ if (machine()->Is64()) {
+ index = __ ChangeUint32ToUint64(index);
+ }
- __ Bind(&done);
- break;
- }
+ // We need to keep the {buffer} alive so that the GC will not release the
+ // ArrayBuffer (if there's any) as long as we are still operating on it.
+ __ Retain(buffer);
- case kExternalFloat64Array: {
- Node* low_word = __ Float64ExtractLowWord32(value);
- Node* high_word = __ Float64ExtractHighWord32(value);
-
- Node* b0 = __ Word32And(low_word, __ Int32Constant(0xFF));
- Node* b1 = __ Word32And(__ Word32Shr(low_word, __ Int32Constant(8)),
- __ Int32Constant(0xFF));
- Node* b2 = __ Word32And(__ Word32Shr(low_word, __ Int32Constant(16)),
- __ Int32Constant(0xFF));
- Node* b3 = __ Word32Shr(low_word, __ Int32Constant(24));
-
- Node* b4 = __ Word32And(high_word, __ Int32Constant(0xFF));
- Node* b5 = __ Word32And(__ Word32Shr(high_word, __ Int32Constant(8)),
- __ Int32Constant(0xFF));
- Node* b6 = __ Word32And(__ Word32Shr(high_word, __ Int32Constant(16)),
- __ Int32Constant(0xFF));
- Node* b7 = __ Word32Shr(high_word, __ Int32Constant(24));
-
- auto big_endian = __ MakeLabel();
- auto done = __ MakeLabel();
-
- __ GotoIfNot(is_little_endian, &big_endian);
- {
- // Little-endian store.
- __ StoreElement(access, storage, index, b0);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(1)), b1);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(2)), b2);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(3)), b3);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(4)), b4);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(5)), b5);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(6)), b6);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(7)), b7);
- __ Goto(&done);
- }
+ MachineType const machine_type =
+ AccessBuilder::ForTypedArrayElement(element_type, true).machine_type;
- __ Bind(&big_endian);
- {
- // Big-endian store.
- __ StoreElement(access, storage, index, b7);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(1)), b6);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(2)), b5);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(3)), b4);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(4)), b3);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(5)), b2);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(6)), b1);
- __ StoreElement(access, storage,
- __ Int32Add(index, __ Int32Constant(7)), b0);
- __ Goto(&done);
- }
+ auto big_endian = __ MakeLabel();
+ auto done = __ MakeLabel(machine_type.representation());
- __ Bind(&done);
- break;
- }
+ __ GotoIfNot(is_little_endian, &big_endian);
+ { // Little-endian store.
+#if V8_TARGET_LITTLE_ENDIAN
+ __ Goto(&done, value);
+#else
+ __ Goto(&done, BuildReverseBytes(element_type, value));
+#endif // V8_TARGET_LITTLE_ENDIAN
+ }
- default:
- UNREACHABLE();
+ __ Bind(&big_endian);
+ { // Big-endian store.
+#if V8_TARGET_LITTLE_ENDIAN
+ __ Goto(&done, BuildReverseBytes(element_type, value));
+#else
+ __ Goto(&done, value);
+#endif // V8_TARGET_LITTLE_ENDIAN
}
+
+ __ Bind(&done);
+ __ StoreUnaligned(machine_type.representation(), storage, index,
+ done.PhiAt(0));
}
+
Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
Node* buffer = node->InputAt(0);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 47e0a249cf..aa174ed45e 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -178,8 +178,10 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
const VectorSlotPair& feedback,
Node* value,
Node* frame_state);
+ Node* BuildReverseBytes(ExternalArrayType type, Node* value);
Node* BuildFloat64RoundDown(Node* value);
Node* BuildFloat64RoundTruncate(Node* input);
+ Node* BuildUint32Mod(Node* lhs, Node* rhs);
Node* ComputeIntegerHash(Node* value);
Node* LowerStringComparison(Callable const& callable, Node* node);
Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index 1642d85c23..1434a4b98a 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -99,7 +99,8 @@ Reduction EscapeAnalysisReducer::Reduce(Node* node) {
}
switch (node->opcode()) {
- case IrOpcode::kAllocate: {
+ case IrOpcode::kAllocate:
+ case IrOpcode::kTypeGuard: {
const VirtualObject* vobject = analysis_result().GetVirtualObject(node);
if (vobject && !vobject->HasEscaped()) {
RelaxEffectsAndControls(node);
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 9780d227fd..496f322106 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -155,6 +155,28 @@ Node* GraphAssembler::Load(MachineType rep, Node* object, Node* offset) {
current_effect_, current_control_);
}
+Node* GraphAssembler::StoreUnaligned(MachineRepresentation rep, Node* object,
+ Node* offset, Node* value) {
+ Operator const* const op =
+ (rep == MachineRepresentation::kWord8 ||
+ machine()->UnalignedStoreSupported(rep))
+ ? machine()->Store(StoreRepresentation(rep, kNoWriteBarrier))
+ : machine()->UnalignedStore(rep);
+ return current_effect_ = graph()->NewNode(op, object, offset, value,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::LoadUnaligned(MachineType rep, Node* object,
+ Node* offset) {
+ Operator const* const op =
+ (rep.representation() == MachineRepresentation::kWord8 ||
+ machine()->UnalignedLoadSupported(rep.representation()))
+ ? machine()->Load(rep)
+ : machine()->UnalignedLoad(rep);
+ return current_effect_ = graph()->NewNode(op, object, offset, current_effect_,
+ current_control_);
+}
+
Node* GraphAssembler::Retain(Node* buffer) {
return current_effect_ =
graph()->NewNode(common()->Retain(), buffer, current_effect_);
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index f9b45a2007..79eb493608 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -31,8 +31,12 @@ namespace compiler {
V(Float64ExtractLowWord32) \
V(Float64ExtractHighWord32) \
V(BitcastInt32ToFloat32) \
+ V(BitcastInt64ToFloat64) \
V(BitcastFloat32ToInt32) \
- V(Float64Abs)
+ V(BitcastFloat64ToInt64) \
+ V(Float64Abs) \
+ V(Word32ReverseBytes) \
+ V(Word64ReverseBytes)
#define PURE_ASSEMBLER_MACH_BINOP_LIST(V) \
V(WordShl) \
@@ -215,6 +219,10 @@ class GraphAssembler {
Node* Store(StoreRepresentation rep, Node* object, Node* offset, Node* value);
Node* Load(MachineType rep, Node* object, Node* offset);
+ Node* StoreUnaligned(MachineRepresentation rep, Node* object, Node* offset,
+ Node* value);
+ Node* LoadUnaligned(MachineType rep, Node* object, Node* offset);
+
Node* Retain(Node* buffer);
Node* UnsafePointerAdd(Node* base, Node* external);
diff --git a/deps/v8/src/compiler/graph-visualizer.h b/deps/v8/src/compiler/graph-visualizer.h
index 4b95169215..5573c346ee 100644
--- a/deps/v8/src/compiler/graph-visualizer.h
+++ b/deps/v8/src/compiler/graph-visualizer.h
@@ -11,6 +11,7 @@
#include <memory>
#include "src/globals.h"
+#include "src/handles.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 9aef138811..9d54eaeb90 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -16,6 +16,8 @@
#include "src/ia32/assembler-ia32.h"
#include "src/ia32/macro-assembler-ia32.h"
#include "src/optimized-compilation-info.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -166,6 +168,22 @@ class IA32OperandConverter : public InstructionOperandConverter {
Operand MemoryOperand(size_t first_input = 0) {
return MemoryOperand(&first_input);
}
+
+ Operand NextMemoryOperand(size_t offset = 0) {
+ AddressingMode mode = AddressingModeField::decode(instr_->opcode());
+ Register base = InputRegister(NextOffset(&offset));
+ const int32_t disp = 4;
+ if (mode == kMode_MR1) {
+ Register index = InputRegister(NextOffset(&offset));
+ ScaleFactor scale = ScaleFor(kMode_MR1, kMode_MR1);
+ return Operand(base, index, scale, disp);
+ } else if (mode == kMode_MRI) {
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(&offset)));
+ return Operand(base, ctant.ToInt32() + disp, ctant.rmode());
+ } else {
+ UNREACHABLE();
+ }
+ }
};
@@ -409,6 +427,23 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ j(not_equal, &binop); \
} while (false)
+#define ASSEMBLE_I64ATOMIC_BINOP(instr1, instr2) \
+ do { \
+ Label binop; \
+ __ bind(&binop); \
+ __ mov(i.OutputRegister(0), i.MemoryOperand(2)); \
+ __ mov(i.OutputRegister(1), i.NextMemoryOperand(2)); \
+ __ push(i.InputRegister(0)); \
+ __ push(i.InputRegister(1)); \
+ __ instr1(i.InputRegister(0), i.OutputRegister(0)); \
+ __ instr2(i.InputRegister(1), i.OutputRegister(1)); \
+ __ lock(); \
+ __ cmpxchg8b(i.MemoryOperand(2)); \
+ __ pop(i.InputRegister(1)); \
+ __ pop(i.InputRegister(0)); \
+ __ j(not_equal, &binop); \
+ } while (false);
+
#define ASSEMBLE_MOVX(mov_instr) \
do { \
if (instr->addressing_mode() != kMode_None) { \
@@ -1152,6 +1187,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32Popcnt:
__ Popcnt(i.OutputRegister(), i.InputOperand(0));
break;
+ case kIA32Bswap:
+ __ bswap(i.OutputRegister());
+ break;
case kArchWordPoisonOnSpeculation:
DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
__ and_(i.InputRegister(0), kSpeculationPoisonRegister);
@@ -3594,7 +3632,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32StackCheck: {
ExternalReference const stack_limit =
ExternalReference::address_of_stack_limit(__ isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ VerifyRootRegister();
+ __ cmp(esp, tasm()->StaticVariable(stack_limit));
+ break;
+ }
+ case kIA32Word32AtomicPairLoad: {
+ XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
+ __ movq(tmp, i.MemoryOperand());
+ __ Pextrd(i.OutputRegister(0), tmp, 0);
+ __ Pextrd(i.OutputRegister(1), tmp, 1);
+ break;
+ }
+ case kIA32Word32AtomicPairStore: {
+ __ mov(i.TempRegister(0), i.MemoryOperand(2));
+ __ mov(i.TempRegister(1), i.NextMemoryOperand(2));
+ __ lock();
+ __ cmpxchg8b(i.MemoryOperand(2));
break;
}
case kWord32AtomicExchangeInt8: {
@@ -3621,6 +3674,34 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xchg(i.InputRegister(0), i.MemoryOperand(1));
break;
}
+ // For the narrow Word64 operations below, i.OutputRegister(1) contains
+ // the high-order 32 bits for the 64bit operation. As the data exchange
+ // fits in one register, the i.OutputRegister(1) needs to be cleared for
+ // the correct return value to be propagated back.
+ case kIA32Word64AtomicNarrowExchangeUint8: {
+ __ xchg_b(i.OutputRegister(0), i.MemoryOperand(1));
+ __ movzx_b(i.OutputRegister(0), i.OutputRegister(0));
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1));
+ break;
+ }
+ case kIA32Word64AtomicNarrowExchangeUint16: {
+ __ xchg_w(i.OutputRegister(0), i.MemoryOperand(1));
+ __ movzx_w(i.OutputRegister(0), i.OutputRegister(0));
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1));
+ break;
+ }
+ case kIA32Word64AtomicNarrowExchangeUint32: {
+ __ xchg(i.OutputRegister(0), i.MemoryOperand(1));
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1));
+ break;
+ }
+ case kIA32Word32AtomicPairExchange: {
+ __ mov(i.OutputRegister(0), i.MemoryOperand(2));
+ __ mov(i.OutputRegister(1), i.NextMemoryOperand(2));
+ __ lock();
+ __ cmpxchg8b(i.MemoryOperand(2));
+ break;
+ }
case kWord32AtomicCompareExchangeInt8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
@@ -3650,30 +3731,72 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
break;
}
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
- __ movsx_b(eax, eax); \
- break; \
- } \
- case kWord32Atomic##op##Uint8: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
- __ movzx_b(eax, eax); \
- break; \
- } \
- case kWord32Atomic##op##Int16: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
- __ movsx_w(eax, eax); \
- break; \
- } \
- case kWord32Atomic##op##Uint16: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
- __ movzx_w(eax, eax); \
- break; \
- } \
- case kWord32Atomic##op##Word32: { \
- ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
- break; \
+ case kIA32Word64AtomicNarrowCompareExchangeUint8: {
+ __ lock();
+ __ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
+ __ movzx_b(i.OutputRegister(0), i.OutputRegister(0));
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1));
+ break;
+ }
+ case kIA32Word64AtomicNarrowCompareExchangeUint16: {
+ __ lock();
+ __ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
+ __ movzx_w(i.OutputRegister(0), i.OutputRegister(0));
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1));
+ break;
+ }
+ case kIA32Word64AtomicNarrowCompareExchangeUint32: {
+ __ lock();
+ __ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1));
+ break;
+ }
+ case kIA32Word32AtomicPairCompareExchange: {
+ __ lock();
+ __ cmpxchg8b(i.MemoryOperand(4));
+ break;
+ }
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kWord32Atomic##op##Int8: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
+ __ movsx_b(eax, eax); \
+ break; \
+ } \
+ case kIA32Word64AtomicNarrow##op##Uint8: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
+ __ movzx_b(i.OutputRegister(0), i.OutputRegister(0)); \
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1)); \
+ break; \
+ } \
+ case kWord32Atomic##op##Uint8: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
+ __ movzx_b(eax, eax); \
+ break; \
+ } \
+ case kWord32Atomic##op##Int16: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
+ __ movsx_w(eax, eax); \
+ break; \
+ } \
+ case kIA32Word64AtomicNarrow##op##Uint16: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
+ __ movzx_w(i.OutputRegister(0), i.OutputRegister(0)); \
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1)); \
+ break; \
+ } \
+ case kWord32Atomic##op##Uint16: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
+ __ movzx_w(eax, eax); \
+ break; \
+ } \
+ case kIA32Word64AtomicNarrow##op##Uint32: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
+ __ xor_(i.OutputRegister(1), i.OutputRegister(1)); \
+ break; \
+ } \
+ case kWord32Atomic##op##Word32: { \
+ ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
+ break; \
}
ATOMIC_BINOP_CASE(Add, add)
ATOMIC_BINOP_CASE(Sub, sub)
@@ -3681,6 +3804,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, or_)
ATOMIC_BINOP_CASE(Xor, xor_)
#undef ATOMIC_BINOP_CASE
+#define ATOMIC_BINOP_CASE(op, instr1, instr2) \
+ case kIA32Word32AtomicPair##op: { \
+ ASSEMBLE_I64ATOMIC_BINOP(instr1, instr2) \
+ break; \
+ }
+ ATOMIC_BINOP_CASE(Add, add, adc)
+ ATOMIC_BINOP_CASE(And, and_, and_)
+ ATOMIC_BINOP_CASE(Or, or_, or_)
+ ATOMIC_BINOP_CASE(Xor, xor_, xor_)
+#undef ATOMIC_BINOP_CASE
+ case kIA32Word32AtomicPairSub: {
+ Label binop;
+ __ bind(&binop);
+ // Move memory operand into edx:eax
+ __ mov(i.OutputRegister(0), i.MemoryOperand(2));
+ __ mov(i.OutputRegister(1), i.NextMemoryOperand(2));
+ // Save input registers temporarily on the stack.
+ __ push(i.InputRegister(0));
+ __ push(i.InputRegister(1));
+ // Negate input in place
+ __ neg(i.InputRegister(0));
+ __ adc(i.InputRegister(1), 0);
+ __ neg(i.InputRegister(1));
+ // Add memory operand, negated input.
+ __ add(i.InputRegister(0), i.OutputRegister(0));
+ __ adc(i.InputRegister(1), i.OutputRegister(1));
+ __ lock();
+ __ cmpxchg8b(i.MemoryOperand(2));
+ // Restore input registers
+ __ pop(i.InputRegister(1));
+ __ pop(i.InputRegister(0));
+ __ j(not_equal, &binop);
+ break;
+ }
case kWord32AtomicLoadInt8:
case kWord32AtomicLoadUint8:
case kWord32AtomicLoadInt16:
@@ -4450,6 +4607,7 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
#undef ASSEMBLE_IEEE754_UNOP
#undef ASSEMBLE_BINOP
#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_I64ATOMIC_BINOP
#undef ASSEMBLE_MOVX
#undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index 8ffc9c3819..97f3763cf5 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -11,346 +11,377 @@ namespace compiler {
// IA32-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(IA32Add) \
- V(IA32And) \
- V(IA32Cmp) \
- V(IA32Cmp16) \
- V(IA32Cmp8) \
- V(IA32Test) \
- V(IA32Test16) \
- V(IA32Test8) \
- V(IA32Or) \
- V(IA32Xor) \
- V(IA32Sub) \
- V(IA32Imul) \
- V(IA32ImulHigh) \
- V(IA32UmulHigh) \
- V(IA32Idiv) \
- V(IA32Udiv) \
- V(IA32Not) \
- V(IA32Neg) \
- V(IA32Shl) \
- V(IA32Shr) \
- V(IA32Sar) \
- V(IA32AddPair) \
- V(IA32SubPair) \
- V(IA32MulPair) \
- V(IA32ShlPair) \
- V(IA32ShrPair) \
- V(IA32SarPair) \
- V(IA32Ror) \
- V(IA32Lzcnt) \
- V(IA32Tzcnt) \
- V(IA32Popcnt) \
- V(LFence) \
- V(SSEFloat32Cmp) \
- V(SSEFloat32Add) \
- V(SSEFloat32Sub) \
- V(SSEFloat32Mul) \
- V(SSEFloat32Div) \
- V(SSEFloat32Abs) \
- V(SSEFloat32Neg) \
- V(SSEFloat32Sqrt) \
- V(SSEFloat32Round) \
- V(SSEFloat64Cmp) \
- V(SSEFloat64Add) \
- V(SSEFloat64Sub) \
- V(SSEFloat64Mul) \
- V(SSEFloat64Div) \
- V(SSEFloat64Mod) \
- V(SSEFloat32Max) \
- V(SSEFloat64Max) \
- V(SSEFloat32Min) \
- V(SSEFloat64Min) \
- V(SSEFloat64Abs) \
- V(SSEFloat64Neg) \
- V(SSEFloat64Sqrt) \
- V(SSEFloat64Round) \
- V(SSEFloat32ToFloat64) \
- V(SSEFloat64ToFloat32) \
- V(SSEFloat32ToInt32) \
- V(SSEFloat32ToUint32) \
- V(SSEFloat64ToInt32) \
- V(SSEFloat64ToUint32) \
- V(SSEInt32ToFloat32) \
- V(SSEUint32ToFloat32) \
- V(SSEInt32ToFloat64) \
- V(SSEUint32ToFloat64) \
- V(SSEFloat64ExtractLowWord32) \
- V(SSEFloat64ExtractHighWord32) \
- V(SSEFloat64InsertLowWord32) \
- V(SSEFloat64InsertHighWord32) \
- V(SSEFloat64LoadLowWord32) \
- V(SSEFloat64SilenceNaN) \
- V(AVXFloat32Add) \
- V(AVXFloat32Sub) \
- V(AVXFloat32Mul) \
- V(AVXFloat32Div) \
- V(AVXFloat64Add) \
- V(AVXFloat64Sub) \
- V(AVXFloat64Mul) \
- V(AVXFloat64Div) \
- V(AVXFloat64Abs) \
- V(AVXFloat64Neg) \
- V(AVXFloat32Abs) \
- V(AVXFloat32Neg) \
- V(IA32Movsxbl) \
- V(IA32Movzxbl) \
- V(IA32Movb) \
- V(IA32Movsxwl) \
- V(IA32Movzxwl) \
- V(IA32Movw) \
- V(IA32Movl) \
- V(IA32Movss) \
- V(IA32Movsd) \
- V(IA32Movdqu) \
- V(IA32BitcastFI) \
- V(IA32BitcastIF) \
- V(IA32Lea) \
- V(IA32Push) \
- V(IA32PushFloat32) \
- V(IA32PushFloat64) \
- V(IA32PushSimd128) \
- V(IA32Poke) \
- V(IA32Peek) \
- V(IA32StackCheck) \
- V(SSEF32x4Splat) \
- V(AVXF32x4Splat) \
- V(SSEF32x4ExtractLane) \
- V(AVXF32x4ExtractLane) \
- V(SSEF32x4ReplaceLane) \
- V(AVXF32x4ReplaceLane) \
- V(IA32F32x4SConvertI32x4) \
- V(SSEF32x4UConvertI32x4) \
- V(AVXF32x4UConvertI32x4) \
- V(SSEF32x4Abs) \
- V(AVXF32x4Abs) \
- V(SSEF32x4Neg) \
- V(AVXF32x4Neg) \
- V(IA32F32x4RecipApprox) \
- V(IA32F32x4RecipSqrtApprox) \
- V(SSEF32x4Add) \
- V(AVXF32x4Add) \
- V(SSEF32x4AddHoriz) \
- V(AVXF32x4AddHoriz) \
- V(SSEF32x4Sub) \
- V(AVXF32x4Sub) \
- V(SSEF32x4Mul) \
- V(AVXF32x4Mul) \
- V(SSEF32x4Min) \
- V(AVXF32x4Min) \
- V(SSEF32x4Max) \
- V(AVXF32x4Max) \
- V(SSEF32x4Eq) \
- V(AVXF32x4Eq) \
- V(SSEF32x4Ne) \
- V(AVXF32x4Ne) \
- V(SSEF32x4Lt) \
- V(AVXF32x4Lt) \
- V(SSEF32x4Le) \
- V(AVXF32x4Le) \
- V(IA32I32x4Splat) \
- V(IA32I32x4ExtractLane) \
- V(SSEI32x4ReplaceLane) \
- V(AVXI32x4ReplaceLane) \
- V(SSEI32x4SConvertF32x4) \
- V(AVXI32x4SConvertF32x4) \
- V(IA32I32x4SConvertI16x8Low) \
- V(IA32I32x4SConvertI16x8High) \
- V(IA32I32x4Neg) \
- V(SSEI32x4Shl) \
- V(AVXI32x4Shl) \
- V(SSEI32x4ShrS) \
- V(AVXI32x4ShrS) \
- V(SSEI32x4Add) \
- V(AVXI32x4Add) \
- V(SSEI32x4AddHoriz) \
- V(AVXI32x4AddHoriz) \
- V(SSEI32x4Sub) \
- V(AVXI32x4Sub) \
- V(SSEI32x4Mul) \
- V(AVXI32x4Mul) \
- V(SSEI32x4MinS) \
- V(AVXI32x4MinS) \
- V(SSEI32x4MaxS) \
- V(AVXI32x4MaxS) \
- V(SSEI32x4Eq) \
- V(AVXI32x4Eq) \
- V(SSEI32x4Ne) \
- V(AVXI32x4Ne) \
- V(SSEI32x4GtS) \
- V(AVXI32x4GtS) \
- V(SSEI32x4GeS) \
- V(AVXI32x4GeS) \
- V(SSEI32x4UConvertF32x4) \
- V(AVXI32x4UConvertF32x4) \
- V(IA32I32x4UConvertI16x8Low) \
- V(IA32I32x4UConvertI16x8High) \
- V(SSEI32x4ShrU) \
- V(AVXI32x4ShrU) \
- V(SSEI32x4MinU) \
- V(AVXI32x4MinU) \
- V(SSEI32x4MaxU) \
- V(AVXI32x4MaxU) \
- V(SSEI32x4GtU) \
- V(AVXI32x4GtU) \
- V(SSEI32x4GeU) \
- V(AVXI32x4GeU) \
- V(IA32I16x8Splat) \
- V(IA32I16x8ExtractLane) \
- V(SSEI16x8ReplaceLane) \
- V(AVXI16x8ReplaceLane) \
- V(IA32I16x8SConvertI8x16Low) \
- V(IA32I16x8SConvertI8x16High) \
- V(IA32I16x8Neg) \
- V(SSEI16x8Shl) \
- V(AVXI16x8Shl) \
- V(SSEI16x8ShrS) \
- V(AVXI16x8ShrS) \
- V(SSEI16x8SConvertI32x4) \
- V(AVXI16x8SConvertI32x4) \
- V(SSEI16x8Add) \
- V(AVXI16x8Add) \
- V(SSEI16x8AddSaturateS) \
- V(AVXI16x8AddSaturateS) \
- V(SSEI16x8AddHoriz) \
- V(AVXI16x8AddHoriz) \
- V(SSEI16x8Sub) \
- V(AVXI16x8Sub) \
- V(SSEI16x8SubSaturateS) \
- V(AVXI16x8SubSaturateS) \
- V(SSEI16x8Mul) \
- V(AVXI16x8Mul) \
- V(SSEI16x8MinS) \
- V(AVXI16x8MinS) \
- V(SSEI16x8MaxS) \
- V(AVXI16x8MaxS) \
- V(SSEI16x8Eq) \
- V(AVXI16x8Eq) \
- V(SSEI16x8Ne) \
- V(AVXI16x8Ne) \
- V(SSEI16x8GtS) \
- V(AVXI16x8GtS) \
- V(SSEI16x8GeS) \
- V(AVXI16x8GeS) \
- V(IA32I16x8UConvertI8x16Low) \
- V(IA32I16x8UConvertI8x16High) \
- V(SSEI16x8ShrU) \
- V(AVXI16x8ShrU) \
- V(SSEI16x8UConvertI32x4) \
- V(AVXI16x8UConvertI32x4) \
- V(SSEI16x8AddSaturateU) \
- V(AVXI16x8AddSaturateU) \
- V(SSEI16x8SubSaturateU) \
- V(AVXI16x8SubSaturateU) \
- V(SSEI16x8MinU) \
- V(AVXI16x8MinU) \
- V(SSEI16x8MaxU) \
- V(AVXI16x8MaxU) \
- V(SSEI16x8GtU) \
- V(AVXI16x8GtU) \
- V(SSEI16x8GeU) \
- V(AVXI16x8GeU) \
- V(IA32I8x16Splat) \
- V(IA32I8x16ExtractLane) \
- V(SSEI8x16ReplaceLane) \
- V(AVXI8x16ReplaceLane) \
- V(SSEI8x16SConvertI16x8) \
- V(AVXI8x16SConvertI16x8) \
- V(IA32I8x16Neg) \
- V(SSEI8x16Shl) \
- V(AVXI8x16Shl) \
- V(IA32I8x16ShrS) \
- V(SSEI8x16Add) \
- V(AVXI8x16Add) \
- V(SSEI8x16AddSaturateS) \
- V(AVXI8x16AddSaturateS) \
- V(SSEI8x16Sub) \
- V(AVXI8x16Sub) \
- V(SSEI8x16SubSaturateS) \
- V(AVXI8x16SubSaturateS) \
- V(SSEI8x16Mul) \
- V(AVXI8x16Mul) \
- V(SSEI8x16MinS) \
- V(AVXI8x16MinS) \
- V(SSEI8x16MaxS) \
- V(AVXI8x16MaxS) \
- V(SSEI8x16Eq) \
- V(AVXI8x16Eq) \
- V(SSEI8x16Ne) \
- V(AVXI8x16Ne) \
- V(SSEI8x16GtS) \
- V(AVXI8x16GtS) \
- V(SSEI8x16GeS) \
- V(AVXI8x16GeS) \
- V(SSEI8x16UConvertI16x8) \
- V(AVXI8x16UConvertI16x8) \
- V(SSEI8x16AddSaturateU) \
- V(AVXI8x16AddSaturateU) \
- V(SSEI8x16SubSaturateU) \
- V(AVXI8x16SubSaturateU) \
- V(IA32I8x16ShrU) \
- V(SSEI8x16MinU) \
- V(AVXI8x16MinU) \
- V(SSEI8x16MaxU) \
- V(AVXI8x16MaxU) \
- V(SSEI8x16GtU) \
- V(AVXI8x16GtU) \
- V(SSEI8x16GeU) \
- V(AVXI8x16GeU) \
- V(IA32S128Zero) \
- V(SSES128Not) \
- V(AVXS128Not) \
- V(SSES128And) \
- V(AVXS128And) \
- V(SSES128Or) \
- V(AVXS128Or) \
- V(SSES128Xor) \
- V(AVXS128Xor) \
- V(SSES128Select) \
- V(AVXS128Select) \
- V(IA32S8x16Shuffle) \
- V(IA32S32x4Swizzle) \
- V(IA32S32x4Shuffle) \
- V(IA32S16x8Blend) \
- V(IA32S16x8HalfShuffle1) \
- V(IA32S16x8HalfShuffle2) \
- V(IA32S8x16Alignr) \
- V(IA32S16x8Dup) \
- V(IA32S8x16Dup) \
- V(SSES16x8UnzipHigh) \
- V(AVXS16x8UnzipHigh) \
- V(SSES16x8UnzipLow) \
- V(AVXS16x8UnzipLow) \
- V(SSES8x16UnzipHigh) \
- V(AVXS8x16UnzipHigh) \
- V(SSES8x16UnzipLow) \
- V(AVXS8x16UnzipLow) \
- V(IA32S64x2UnpackHigh) \
- V(IA32S32x4UnpackHigh) \
- V(IA32S16x8UnpackHigh) \
- V(IA32S8x16UnpackHigh) \
- V(IA32S64x2UnpackLow) \
- V(IA32S32x4UnpackLow) \
- V(IA32S16x8UnpackLow) \
- V(IA32S8x16UnpackLow) \
- V(SSES8x16TransposeLow) \
- V(AVXS8x16TransposeLow) \
- V(SSES8x16TransposeHigh) \
- V(AVXS8x16TransposeHigh) \
- V(SSES8x8Reverse) \
- V(AVXS8x8Reverse) \
- V(SSES8x4Reverse) \
- V(AVXS8x4Reverse) \
- V(SSES8x2Reverse) \
- V(AVXS8x2Reverse) \
- V(IA32S1x4AnyTrue) \
- V(IA32S1x4AllTrue) \
- V(IA32S1x8AnyTrue) \
- V(IA32S1x8AllTrue) \
- V(IA32S1x16AnyTrue) \
- V(IA32S1x16AllTrue)
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(IA32Add) \
+ V(IA32And) \
+ V(IA32Cmp) \
+ V(IA32Cmp16) \
+ V(IA32Cmp8) \
+ V(IA32Test) \
+ V(IA32Test16) \
+ V(IA32Test8) \
+ V(IA32Or) \
+ V(IA32Xor) \
+ V(IA32Sub) \
+ V(IA32Imul) \
+ V(IA32ImulHigh) \
+ V(IA32UmulHigh) \
+ V(IA32Idiv) \
+ V(IA32Udiv) \
+ V(IA32Not) \
+ V(IA32Neg) \
+ V(IA32Shl) \
+ V(IA32Shr) \
+ V(IA32Sar) \
+ V(IA32AddPair) \
+ V(IA32SubPair) \
+ V(IA32MulPair) \
+ V(IA32ShlPair) \
+ V(IA32ShrPair) \
+ V(IA32SarPair) \
+ V(IA32Ror) \
+ V(IA32Lzcnt) \
+ V(IA32Tzcnt) \
+ V(IA32Popcnt) \
+ V(IA32Bswap) \
+ V(LFence) \
+ V(SSEFloat32Cmp) \
+ V(SSEFloat32Add) \
+ V(SSEFloat32Sub) \
+ V(SSEFloat32Mul) \
+ V(SSEFloat32Div) \
+ V(SSEFloat32Abs) \
+ V(SSEFloat32Neg) \
+ V(SSEFloat32Sqrt) \
+ V(SSEFloat32Round) \
+ V(SSEFloat64Cmp) \
+ V(SSEFloat64Add) \
+ V(SSEFloat64Sub) \
+ V(SSEFloat64Mul) \
+ V(SSEFloat64Div) \
+ V(SSEFloat64Mod) \
+ V(SSEFloat32Max) \
+ V(SSEFloat64Max) \
+ V(SSEFloat32Min) \
+ V(SSEFloat64Min) \
+ V(SSEFloat64Abs) \
+ V(SSEFloat64Neg) \
+ V(SSEFloat64Sqrt) \
+ V(SSEFloat64Round) \
+ V(SSEFloat32ToFloat64) \
+ V(SSEFloat64ToFloat32) \
+ V(SSEFloat32ToInt32) \
+ V(SSEFloat32ToUint32) \
+ V(SSEFloat64ToInt32) \
+ V(SSEFloat64ToUint32) \
+ V(SSEInt32ToFloat32) \
+ V(SSEUint32ToFloat32) \
+ V(SSEInt32ToFloat64) \
+ V(SSEUint32ToFloat64) \
+ V(SSEFloat64ExtractLowWord32) \
+ V(SSEFloat64ExtractHighWord32) \
+ V(SSEFloat64InsertLowWord32) \
+ V(SSEFloat64InsertHighWord32) \
+ V(SSEFloat64LoadLowWord32) \
+ V(SSEFloat64SilenceNaN) \
+ V(AVXFloat32Add) \
+ V(AVXFloat32Sub) \
+ V(AVXFloat32Mul) \
+ V(AVXFloat32Div) \
+ V(AVXFloat64Add) \
+ V(AVXFloat64Sub) \
+ V(AVXFloat64Mul) \
+ V(AVXFloat64Div) \
+ V(AVXFloat64Abs) \
+ V(AVXFloat64Neg) \
+ V(AVXFloat32Abs) \
+ V(AVXFloat32Neg) \
+ V(IA32Movsxbl) \
+ V(IA32Movzxbl) \
+ V(IA32Movb) \
+ V(IA32Movsxwl) \
+ V(IA32Movzxwl) \
+ V(IA32Movw) \
+ V(IA32Movl) \
+ V(IA32Movss) \
+ V(IA32Movsd) \
+ V(IA32Movdqu) \
+ V(IA32BitcastFI) \
+ V(IA32BitcastIF) \
+ V(IA32Lea) \
+ V(IA32Push) \
+ V(IA32PushFloat32) \
+ V(IA32PushFloat64) \
+ V(IA32PushSimd128) \
+ V(IA32Poke) \
+ V(IA32Peek) \
+ V(IA32StackCheck) \
+ V(SSEF32x4Splat) \
+ V(AVXF32x4Splat) \
+ V(SSEF32x4ExtractLane) \
+ V(AVXF32x4ExtractLane) \
+ V(SSEF32x4ReplaceLane) \
+ V(AVXF32x4ReplaceLane) \
+ V(IA32F32x4SConvertI32x4) \
+ V(SSEF32x4UConvertI32x4) \
+ V(AVXF32x4UConvertI32x4) \
+ V(SSEF32x4Abs) \
+ V(AVXF32x4Abs) \
+ V(SSEF32x4Neg) \
+ V(AVXF32x4Neg) \
+ V(IA32F32x4RecipApprox) \
+ V(IA32F32x4RecipSqrtApprox) \
+ V(SSEF32x4Add) \
+ V(AVXF32x4Add) \
+ V(SSEF32x4AddHoriz) \
+ V(AVXF32x4AddHoriz) \
+ V(SSEF32x4Sub) \
+ V(AVXF32x4Sub) \
+ V(SSEF32x4Mul) \
+ V(AVXF32x4Mul) \
+ V(SSEF32x4Min) \
+ V(AVXF32x4Min) \
+ V(SSEF32x4Max) \
+ V(AVXF32x4Max) \
+ V(SSEF32x4Eq) \
+ V(AVXF32x4Eq) \
+ V(SSEF32x4Ne) \
+ V(AVXF32x4Ne) \
+ V(SSEF32x4Lt) \
+ V(AVXF32x4Lt) \
+ V(SSEF32x4Le) \
+ V(AVXF32x4Le) \
+ V(IA32I32x4Splat) \
+ V(IA32I32x4ExtractLane) \
+ V(SSEI32x4ReplaceLane) \
+ V(AVXI32x4ReplaceLane) \
+ V(SSEI32x4SConvertF32x4) \
+ V(AVXI32x4SConvertF32x4) \
+ V(IA32I32x4SConvertI16x8Low) \
+ V(IA32I32x4SConvertI16x8High) \
+ V(IA32I32x4Neg) \
+ V(SSEI32x4Shl) \
+ V(AVXI32x4Shl) \
+ V(SSEI32x4ShrS) \
+ V(AVXI32x4ShrS) \
+ V(SSEI32x4Add) \
+ V(AVXI32x4Add) \
+ V(SSEI32x4AddHoriz) \
+ V(AVXI32x4AddHoriz) \
+ V(SSEI32x4Sub) \
+ V(AVXI32x4Sub) \
+ V(SSEI32x4Mul) \
+ V(AVXI32x4Mul) \
+ V(SSEI32x4MinS) \
+ V(AVXI32x4MinS) \
+ V(SSEI32x4MaxS) \
+ V(AVXI32x4MaxS) \
+ V(SSEI32x4Eq) \
+ V(AVXI32x4Eq) \
+ V(SSEI32x4Ne) \
+ V(AVXI32x4Ne) \
+ V(SSEI32x4GtS) \
+ V(AVXI32x4GtS) \
+ V(SSEI32x4GeS) \
+ V(AVXI32x4GeS) \
+ V(SSEI32x4UConvertF32x4) \
+ V(AVXI32x4UConvertF32x4) \
+ V(IA32I32x4UConvertI16x8Low) \
+ V(IA32I32x4UConvertI16x8High) \
+ V(SSEI32x4ShrU) \
+ V(AVXI32x4ShrU) \
+ V(SSEI32x4MinU) \
+ V(AVXI32x4MinU) \
+ V(SSEI32x4MaxU) \
+ V(AVXI32x4MaxU) \
+ V(SSEI32x4GtU) \
+ V(AVXI32x4GtU) \
+ V(SSEI32x4GeU) \
+ V(AVXI32x4GeU) \
+ V(IA32I16x8Splat) \
+ V(IA32I16x8ExtractLane) \
+ V(SSEI16x8ReplaceLane) \
+ V(AVXI16x8ReplaceLane) \
+ V(IA32I16x8SConvertI8x16Low) \
+ V(IA32I16x8SConvertI8x16High) \
+ V(IA32I16x8Neg) \
+ V(SSEI16x8Shl) \
+ V(AVXI16x8Shl) \
+ V(SSEI16x8ShrS) \
+ V(AVXI16x8ShrS) \
+ V(SSEI16x8SConvertI32x4) \
+ V(AVXI16x8SConvertI32x4) \
+ V(SSEI16x8Add) \
+ V(AVXI16x8Add) \
+ V(SSEI16x8AddSaturateS) \
+ V(AVXI16x8AddSaturateS) \
+ V(SSEI16x8AddHoriz) \
+ V(AVXI16x8AddHoriz) \
+ V(SSEI16x8Sub) \
+ V(AVXI16x8Sub) \
+ V(SSEI16x8SubSaturateS) \
+ V(AVXI16x8SubSaturateS) \
+ V(SSEI16x8Mul) \
+ V(AVXI16x8Mul) \
+ V(SSEI16x8MinS) \
+ V(AVXI16x8MinS) \
+ V(SSEI16x8MaxS) \
+ V(AVXI16x8MaxS) \
+ V(SSEI16x8Eq) \
+ V(AVXI16x8Eq) \
+ V(SSEI16x8Ne) \
+ V(AVXI16x8Ne) \
+ V(SSEI16x8GtS) \
+ V(AVXI16x8GtS) \
+ V(SSEI16x8GeS) \
+ V(AVXI16x8GeS) \
+ V(IA32I16x8UConvertI8x16Low) \
+ V(IA32I16x8UConvertI8x16High) \
+ V(SSEI16x8ShrU) \
+ V(AVXI16x8ShrU) \
+ V(SSEI16x8UConvertI32x4) \
+ V(AVXI16x8UConvertI32x4) \
+ V(SSEI16x8AddSaturateU) \
+ V(AVXI16x8AddSaturateU) \
+ V(SSEI16x8SubSaturateU) \
+ V(AVXI16x8SubSaturateU) \
+ V(SSEI16x8MinU) \
+ V(AVXI16x8MinU) \
+ V(SSEI16x8MaxU) \
+ V(AVXI16x8MaxU) \
+ V(SSEI16x8GtU) \
+ V(AVXI16x8GtU) \
+ V(SSEI16x8GeU) \
+ V(AVXI16x8GeU) \
+ V(IA32I8x16Splat) \
+ V(IA32I8x16ExtractLane) \
+ V(SSEI8x16ReplaceLane) \
+ V(AVXI8x16ReplaceLane) \
+ V(SSEI8x16SConvertI16x8) \
+ V(AVXI8x16SConvertI16x8) \
+ V(IA32I8x16Neg) \
+ V(SSEI8x16Shl) \
+ V(AVXI8x16Shl) \
+ V(IA32I8x16ShrS) \
+ V(SSEI8x16Add) \
+ V(AVXI8x16Add) \
+ V(SSEI8x16AddSaturateS) \
+ V(AVXI8x16AddSaturateS) \
+ V(SSEI8x16Sub) \
+ V(AVXI8x16Sub) \
+ V(SSEI8x16SubSaturateS) \
+ V(AVXI8x16SubSaturateS) \
+ V(SSEI8x16Mul) \
+ V(AVXI8x16Mul) \
+ V(SSEI8x16MinS) \
+ V(AVXI8x16MinS) \
+ V(SSEI8x16MaxS) \
+ V(AVXI8x16MaxS) \
+ V(SSEI8x16Eq) \
+ V(AVXI8x16Eq) \
+ V(SSEI8x16Ne) \
+ V(AVXI8x16Ne) \
+ V(SSEI8x16GtS) \
+ V(AVXI8x16GtS) \
+ V(SSEI8x16GeS) \
+ V(AVXI8x16GeS) \
+ V(SSEI8x16UConvertI16x8) \
+ V(AVXI8x16UConvertI16x8) \
+ V(SSEI8x16AddSaturateU) \
+ V(AVXI8x16AddSaturateU) \
+ V(SSEI8x16SubSaturateU) \
+ V(AVXI8x16SubSaturateU) \
+ V(IA32I8x16ShrU) \
+ V(SSEI8x16MinU) \
+ V(AVXI8x16MinU) \
+ V(SSEI8x16MaxU) \
+ V(AVXI8x16MaxU) \
+ V(SSEI8x16GtU) \
+ V(AVXI8x16GtU) \
+ V(SSEI8x16GeU) \
+ V(AVXI8x16GeU) \
+ V(IA32S128Zero) \
+ V(SSES128Not) \
+ V(AVXS128Not) \
+ V(SSES128And) \
+ V(AVXS128And) \
+ V(SSES128Or) \
+ V(AVXS128Or) \
+ V(SSES128Xor) \
+ V(AVXS128Xor) \
+ V(SSES128Select) \
+ V(AVXS128Select) \
+ V(IA32S8x16Shuffle) \
+ V(IA32S32x4Swizzle) \
+ V(IA32S32x4Shuffle) \
+ V(IA32S16x8Blend) \
+ V(IA32S16x8HalfShuffle1) \
+ V(IA32S16x8HalfShuffle2) \
+ V(IA32S8x16Alignr) \
+ V(IA32S16x8Dup) \
+ V(IA32S8x16Dup) \
+ V(SSES16x8UnzipHigh) \
+ V(AVXS16x8UnzipHigh) \
+ V(SSES16x8UnzipLow) \
+ V(AVXS16x8UnzipLow) \
+ V(SSES8x16UnzipHigh) \
+ V(AVXS8x16UnzipHigh) \
+ V(SSES8x16UnzipLow) \
+ V(AVXS8x16UnzipLow) \
+ V(IA32S64x2UnpackHigh) \
+ V(IA32S32x4UnpackHigh) \
+ V(IA32S16x8UnpackHigh) \
+ V(IA32S8x16UnpackHigh) \
+ V(IA32S64x2UnpackLow) \
+ V(IA32S32x4UnpackLow) \
+ V(IA32S16x8UnpackLow) \
+ V(IA32S8x16UnpackLow) \
+ V(SSES8x16TransposeLow) \
+ V(AVXS8x16TransposeLow) \
+ V(SSES8x16TransposeHigh) \
+ V(AVXS8x16TransposeHigh) \
+ V(SSES8x8Reverse) \
+ V(AVXS8x8Reverse) \
+ V(SSES8x4Reverse) \
+ V(AVXS8x4Reverse) \
+ V(SSES8x2Reverse) \
+ V(AVXS8x2Reverse) \
+ V(IA32S1x4AnyTrue) \
+ V(IA32S1x4AllTrue) \
+ V(IA32S1x8AnyTrue) \
+ V(IA32S1x8AllTrue) \
+ V(IA32S1x16AnyTrue) \
+ V(IA32S1x16AllTrue) \
+ V(IA32Word32AtomicPairLoad) \
+ V(IA32Word32AtomicPairStore) \
+ V(IA32Word32AtomicPairAdd) \
+ V(IA32Word32AtomicPairSub) \
+ V(IA32Word32AtomicPairAnd) \
+ V(IA32Word32AtomicPairOr) \
+ V(IA32Word32AtomicPairXor) \
+ V(IA32Word32AtomicPairExchange) \
+ V(IA32Word32AtomicPairCompareExchange) \
+ V(IA32Word64AtomicNarrowAddUint8) \
+ V(IA32Word64AtomicNarrowAddUint16) \
+ V(IA32Word64AtomicNarrowAddUint32) \
+ V(IA32Word64AtomicNarrowSubUint8) \
+ V(IA32Word64AtomicNarrowSubUint16) \
+ V(IA32Word64AtomicNarrowSubUint32) \
+ V(IA32Word64AtomicNarrowAndUint8) \
+ V(IA32Word64AtomicNarrowAndUint16) \
+ V(IA32Word64AtomicNarrowAndUint32) \
+ V(IA32Word64AtomicNarrowOrUint8) \
+ V(IA32Word64AtomicNarrowOrUint16) \
+ V(IA32Word64AtomicNarrowOrUint32) \
+ V(IA32Word64AtomicNarrowXorUint8) \
+ V(IA32Word64AtomicNarrowXorUint16) \
+ V(IA32Word64AtomicNarrowXorUint32) \
+ V(IA32Word64AtomicNarrowExchangeUint8) \
+ V(IA32Word64AtomicNarrowExchangeUint16) \
+ V(IA32Word64AtomicNarrowExchangeUint32) \
+ V(IA32Word64AtomicNarrowCompareExchangeUint8) \
+ V(IA32Word64AtomicNarrowCompareExchangeUint16) \
+ V(IA32Word64AtomicNarrowCompareExchangeUint32)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index 82d6fb88a3..07d42bc614 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -43,6 +43,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Lzcnt:
case kIA32Tzcnt:
case kIA32Popcnt:
+ case kIA32Bswap:
case kIA32Lea:
case kSSEFloat32Cmp:
case kSSEFloat32Add:
@@ -368,6 +369,40 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kLFence:
return kHasSideEffect;
+ case kIA32Word32AtomicPairLoad:
+ return kIsLoadOperation;
+
+ case kIA32Word32AtomicPairStore:
+ case kIA32Word32AtomicPairAdd:
+ case kIA32Word32AtomicPairSub:
+ case kIA32Word32AtomicPairAnd:
+ case kIA32Word32AtomicPairOr:
+ case kIA32Word32AtomicPairXor:
+ case kIA32Word32AtomicPairExchange:
+ case kIA32Word32AtomicPairCompareExchange:
+ case kIA32Word64AtomicNarrowAddUint8:
+ case kIA32Word64AtomicNarrowAddUint16:
+ case kIA32Word64AtomicNarrowAddUint32:
+ case kIA32Word64AtomicNarrowSubUint8:
+ case kIA32Word64AtomicNarrowSubUint16:
+ case kIA32Word64AtomicNarrowSubUint32:
+ case kIA32Word64AtomicNarrowAndUint8:
+ case kIA32Word64AtomicNarrowAndUint16:
+ case kIA32Word64AtomicNarrowAndUint32:
+ case kIA32Word64AtomicNarrowOrUint8:
+ case kIA32Word64AtomicNarrowOrUint16:
+ case kIA32Word64AtomicNarrowOrUint32:
+ case kIA32Word64AtomicNarrowXorUint8:
+ case kIA32Word64AtomicNarrowXorUint16:
+ case kIA32Word64AtomicNarrowXorUint32:
+ case kIA32Word64AtomicNarrowExchangeUint8:
+ case kIA32Word64AtomicNarrowExchangeUint16:
+ case kIA32Word64AtomicNarrowExchangeUint32:
+ case kIA32Word64AtomicNarrowCompareExchangeUint8:
+ case kIA32Word64AtomicNarrowCompareExchangeUint16:
+ case kIA32Word64AtomicNarrowCompareExchangeUint32:
+ return kHasSideEffect;
+
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 4144254285..ce2f14e97f 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -164,6 +164,17 @@ class IA32OperandGenerator final : public OperandGenerator {
}
}
+ InstructionOperand GetEffectiveIndexOperand(Node* index,
+ AddressingMode* mode) {
+ if (CanBeImmediate(index)) {
+ *mode = kMode_MRI;
+ return UseImmediate(index);
+ } else {
+ *mode = kMode_MR1;
+ return UseUniqueRegister(index);
+ }
+ }
+
bool CanBeBetterLeftOperand(Node* node) const {
return !selector()->IsLive(node);
}
@@ -331,17 +342,10 @@ void InstructionSelector::VisitStore(Node* node) {
if (write_barrier_kind != kNoWriteBarrier) {
DCHECK(CanBeTaggedPointer(rep));
AddressingMode addressing_mode;
- InstructionOperand inputs[3];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- if (g.CanBeImmediate(index)) {
- inputs[input_count++] = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- inputs[input_count++] = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode),
+ g.UseUniqueRegister(value)};
RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
switch (write_barrier_kind) {
case kNoWriteBarrier:
@@ -362,7 +366,7 @@ void InstructionSelector::VisitStore(Node* node) {
InstructionCode code = kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, temp_count, temps);
} else {
ArchOpcode opcode = kArchNop;
switch (rep) {
@@ -823,7 +827,10 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32Bswap, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
+}
void InstructionSelector::VisitInt32Add(Node* node) {
IA32OperandGenerator g(this);
@@ -1293,30 +1300,86 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
Node* value = node->InputAt(2);
AddressingMode addressing_mode;
- InstructionOperand inputs[3];
- size_t input_count = 0;
- if (rep == MachineRepresentation::kWord8) {
- inputs[input_count++] = g.UseFixed(value, edx);
- } else {
- inputs[input_count++] = g.UseUniqueRegister(value);
- }
- inputs[input_count++] = g.UseUniqueRegister(base);
- if (g.CanBeImmediate(index)) {
- inputs[input_count++] = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- inputs[input_count++] = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- InstructionOperand outputs[1];
- if (rep == MachineRepresentation::kWord8) {
- // Using DefineSameAsFirst requires the register to be unallocated.
- outputs[0] = g.DefineAsFixed(node, edx);
- } else {
- outputs[0] = g.DefineSameAsFirst(node);
- }
+ InstructionOperand value_operand = (rep == MachineRepresentation::kWord8)
+ ? g.UseFixed(value, edx)
+ : g.UseUniqueRegister(value);
+ InstructionOperand inputs[] = {
+ value_operand, g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {
+ (rep == MachineRepresentation::kWord8)
+ // Using DefineSameAsFirst requires the register to be unallocated.
+ ? g.DefineAsFixed(node, edx)
+ : g.DefineSameAsFirst(node)};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, arraysize(inputs), inputs);
+}
+
+void VisitAtomicBinOp(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, MachineRepresentation rep) {
+ AddressingMode addressing_mode;
+ IA32OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(value), g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {g.DefineAsFixed(node, eax)};
+ InstructionOperand temp[] = {(rep == MachineRepresentation::kWord8)
+ ? g.UseByteRegister(node)
+ : g.TempRegister()};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temp), temp);
+}
+
+void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ IA32OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ // For Word64 operations, the value input is split into the a high node,
+ // and a low node in the int64-lowering phase.
+ Node* value_high = node->InputAt(3);
+
+ // Wasm lives in 32-bit address space, so we do not need to worry about
+ // base/index lowering. This will need to be fixed for Wasm64.
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseFixed(value, ebx), g.UseFixed(value_high, ecx),
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
+}
+
+void VisitNarrowAtomicBinOp(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, MachineType type) {
+ IA32OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ // Wasm lives in 32-bit address space, so we do not need to worry about
+ // base/index lowering. This will need to be fixed for Wasm64.
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(value), g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
+ InstructionOperand temp[] = {(type == MachineType::Uint8())
+ ? g.UseByteRegister(node)
+ : g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- selector->Emit(code, 1, outputs, input_count, inputs);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temp), temp);
}
} // namespace
@@ -1608,7 +1671,7 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
IA32OperandGenerator g(this);
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
@@ -1634,7 +1697,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
@@ -1650,38 +1713,23 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
UNREACHABLE();
return;
}
- InstructionOperand outputs[1];
AddressingMode addressing_mode;
- InstructionOperand inputs[4];
- size_t input_count = 0;
- inputs[input_count++] = g.UseFixed(old_value, eax);
- if (type == MachineType::Int8() || type == MachineType::Uint8()) {
- inputs[input_count++] = g.UseByteRegister(new_value);
- } else {
- inputs[input_count++] = g.UseUniqueRegister(new_value);
- }
- inputs[input_count++] = g.UseUniqueRegister(base);
- if (g.CanBeImmediate(index)) {
- inputs[input_count++] = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- inputs[input_count++] = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- outputs[0] = g.DefineAsFixed(node, eax);
+ InstructionOperand new_val_operand =
+ (type.representation() == MachineRepresentation::kWord8)
+ ? g.UseByteRegister(new_value)
+ : g.UseUniqueRegister(new_value);
+ InstructionOperand inputs[] = {
+ g.UseFixed(old_value, eax), new_val_operand, g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {g.DefineAsFixed(node, eax)};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs);
+ Emit(code, 1, outputs, arraysize(inputs), inputs);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
- IA32OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
-
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = int8_op;
@@ -1697,28 +1745,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
UNREACHABLE();
return;
}
- InstructionOperand outputs[1];
- AddressingMode addressing_mode;
- InstructionOperand inputs[3];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(value);
- inputs[input_count++] = g.UseUniqueRegister(base);
- if (g.CanBeImmediate(index)) {
- inputs[input_count++] = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- inputs[input_count++] = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- outputs[0] = g.DefineAsFixed(node, eax);
- InstructionOperand temp[1];
- if (type == MachineType::Int8() || type == MachineType::Uint8()) {
- temp[0] = g.UseByteRegister(node);
- } else {
- temp[0] = g.TempRegister();
- }
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 1, temp);
+ VisitAtomicBinOp(this, node, opcode, type.representation());
}
#define VISIT_ATOMIC_BINOP(op) \
@@ -1735,6 +1762,193 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
+void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
+ IA32OperandGenerator g(this);
+ AddressingMode mode;
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ InstructionOperand inputs[] = {g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &mode)};
+ InstructionOperand temps[] = {g.TempDoubleRegister()};
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 0)),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ InstructionCode code =
+ kIA32Word32AtomicPairLoad | AddressingModeField::encode(mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
+ IA32OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ Node* value_high = node->InputAt(3);
+
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseFixed(value, ebx), g.UseFixed(value_high, ecx),
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ // Allocating temp registers here as stores are performed using an atomic
+ // exchange, the output of which is stored in edx:eax, which should be saved
+ // and restored at the end of the instruction.
+ InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
+ InstructionCode code =
+ kIA32Word32AtomicPairStore | AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
+ VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairAdd);
+}
+
+void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
+ VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairSub);
+}
+
+void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
+ VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairAnd);
+}
+
+void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
+ VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairOr);
+}
+
+void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
+ VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairXor);
+}
+
+void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
+ VisitPairAtomicBinOp(this, node, kIA32Word32AtomicPairExchange);
+}
+
+void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
+ IA32OperandGenerator g(this);
+ Node* index = node->InputAt(1);
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ // High, Low values of old value
+ g.UseFixed(node->InputAt(2), eax), g.UseFixed(node->InputAt(3), edx),
+ // High, Low values of new value
+ g.UseFixed(node->InputAt(4), ebx), g.UseFixed(node->InputAt(5), ecx),
+ // InputAt(0) => base
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
+ InstructionCode code = kIA32Word32AtomicPairCompareExchange |
+ AddressingModeField::encode(addressing_mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowBinop(Node* node,
+ ArchOpcode uint8_op,
+ ArchOpcode uint16_op,
+ ArchOpcode uint32_op) {
+ MachineType type = AtomicOpType(node->op());
+ DCHECK(type != MachineType::Uint64());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Uint32()) {
+ opcode = uint32_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ VisitNarrowAtomicBinOp(this, node, opcode, type);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64AtomicNarrow##op(Node* node) { \
+ VisitWord64AtomicNarrowBinop(node, kIA32Word64AtomicNarrow##op##Uint8, \
+ kIA32Word64AtomicNarrow##op##Uint16, \
+ kIA32Word64AtomicNarrow##op##Uint32); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
+ MachineType type = AtomicOpType(node->op());
+ DCHECK(type != MachineType::Uint64());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Uint32()) {
+ opcode = kIA32Word64AtomicNarrowExchangeUint32;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kIA32Word64AtomicNarrowExchangeUint16;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kIA32Word64AtomicNarrowExchangeUint8;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ IA32OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ AddressingMode addressing_mode;
+ InstructionOperand value_operand =
+ (type.representation() == MachineRepresentation::kWord8)
+ ? g.UseFixed(value, edx)
+ : g.UseUniqueRegister(value);
+ InstructionOperand inputs[] = {
+ value_operand, g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[2];
+ if (type.representation() == MachineRepresentation::kWord8) {
+ // Using DefineSameAsFirst requires the register to be unallocated.
+ outputs[0] = g.DefineAsFixed(NodeProperties::FindProjection(node, 0), edx);
+ } else {
+ outputs[0] = g.DefineSameAsFirst(NodeProperties::FindProjection(node, 0));
+ }
+ outputs[1] = g.DefineAsRegister(NodeProperties::FindProjection(node, 1));
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
+ MachineType type = AtomicOpType(node->op());
+ DCHECK(type != MachineType::Uint64());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Uint32()) {
+ opcode = kIA32Word64AtomicNarrowCompareExchangeUint32;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kIA32Word64AtomicNarrowCompareExchangeUint16;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kIA32Word64AtomicNarrowCompareExchangeUint8;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ IA32OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+ AddressingMode addressing_mode;
+ InstructionOperand new_value_operand =
+ (type.representation() == MachineRepresentation::kWord8)
+ ? g.UseByteRegister(new_value)
+ : g.UseUniqueRegister(new_value);
+ InstructionOperand inputs[] = {
+ g.UseFixed(old_value, eax), new_value_operand, g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 0), eax),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
+}
+
#define SIMD_INT_TYPES(V) \
V(I32x4) \
V(I16x8) \
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index f1ca52b14d..d15a633257 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -587,7 +587,7 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
}
return entries;
} else {
- // Crankshaft counts duplicate objects for the running id, so we have
+ // Deoptimizer counts duplicate objects for the running id, so we have
// to push the input again.
deduplicator->InsertObject(input);
values->PushDuplicate(id);
@@ -1705,11 +1705,18 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitWord32AtomicStore(node);
case IrOpcode::kWord64AtomicStore:
return VisitWord64AtomicStore(node);
-#define ATOMIC_CASE(name, rep) \
- case IrOpcode::k##rep##Atomic##name: { \
- MachineType type = AtomicOpRepresentationOf(node->op()); \
- MarkAsRepresentation(type.representation(), node); \
- return Visit##rep##Atomic##name(node); \
+ case IrOpcode::kWord32AtomicPairStore:
+ return VisitWord32AtomicPairStore(node);
+ case IrOpcode::kWord32AtomicPairLoad: {
+ MarkAsWord32(node);
+ MarkPairProjectionsAsWord32(node);
+ return VisitWord32AtomicPairLoad(node);
+ }
+#define ATOMIC_CASE(name, rep) \
+ case IrOpcode::k##rep##Atomic##name: { \
+ MachineType type = AtomicOpType(node->op()); \
+ MarkAsRepresentation(type.representation(), node); \
+ return Visit##rep##Atomic##name(node); \
}
ATOMIC_CASE(Add, Word32)
ATOMIC_CASE(Add, Word64)
@@ -1726,6 +1733,35 @@ void InstructionSelector::VisitNode(Node* node) {
ATOMIC_CASE(CompareExchange, Word32)
ATOMIC_CASE(CompareExchange, Word64)
#undef ATOMIC_CASE
+#define ATOMIC_CASE(name) \
+ case IrOpcode::kWord32AtomicPair##name: { \
+ MarkAsWord32(node); \
+ MarkPairProjectionsAsWord32(node); \
+ return VisitWord32AtomicPair##name(node); \
+ }
+ ATOMIC_CASE(Add)
+ ATOMIC_CASE(Sub)
+ ATOMIC_CASE(And)
+ ATOMIC_CASE(Or)
+ ATOMIC_CASE(Xor)
+ ATOMIC_CASE(Exchange)
+ ATOMIC_CASE(CompareExchange)
+#undef ATOMIC_CASE
+#define ATOMIC_CASE(name) \
+ case IrOpcode::kWord64AtomicNarrow##name: { \
+ MachineType type = AtomicOpType(node->op()); \
+ MarkAsRepresentation(type.representation(), node); \
+ MarkPairProjectionsAsWord32(node); \
+ return VisitWord64AtomicNarrow##name(node); \
+ }
+ ATOMIC_CASE(Add)
+ ATOMIC_CASE(Sub)
+ ATOMIC_CASE(And)
+ ATOMIC_CASE(Or)
+ ATOMIC_CASE(Xor)
+ ATOMIC_CASE(Exchange)
+ ATOMIC_CASE(CompareExchange)
+#undef ATOMIC_CASE
case IrOpcode::kSpeculationFence:
return VisitSpeculationFence(node);
case IrOpcode::kProtectedLoad: {
@@ -2353,6 +2389,72 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT
+#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowAdd(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowSub(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowAnd(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowOr(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowXor(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowExchange(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord64AtomicNarrowCompareExchange(Node* node) {
+ UNIMPLEMENTED();
+}
+#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
+
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 39d0c01ee9..435b7185a6 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -731,6 +731,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
ArchOpcode uint16_op,
ArchOpcode uint32_op,
ArchOpcode uint64_op);
+ void VisitWord64AtomicNarrowBinop(Node* node, ArchOpcode uint8_op,
+ ArchOpcode uint16_op, ArchOpcode uint32_op);
// ===========================================================================
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index 803f3e0c1d..1991e309d3 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -811,7 +811,7 @@ class V8_EXPORT_PRIVATE Instruction final {
return &operands_[i];
}
- bool HasOutput() const { return OutputCount() == 1; }
+ bool HasOutput() const { return OutputCount() > 0; }
const InstructionOperand* Output() const { return OutputAt(0); }
InstructionOperand* Output() { return OutputAt(0); }
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 410e78a30d..8066ce5dca 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -117,6 +117,21 @@ int GetReturnCountAfterLowering(Signature<MachineRepresentation>* signature) {
} // namespace
+void Int64Lowering::LowerWord64AtomicBinop(Node* node, const Operator* op) {
+ DCHECK_EQ(5, node->InputCount());
+ Node* value = node->InputAt(2);
+ node->ReplaceInput(2, GetReplacementLow(value));
+ node->InsertInput(zone(), 3, GetReplacementHigh(value));
+ NodeProperties::ChangeOp(node, op);
+ ReplaceNodeWithProjections(node);
+}
+
+void Int64Lowering::LowerWord64AtomicNarrowOp(Node* node, const Operator* op) {
+ DefaultLowering(node, true);
+ NodeProperties::ChangeOp(node, op);
+ ReplaceNodeWithProjections(node);
+}
+
// static
int Int64Lowering::GetParameterCountAfterLowering(
Signature<MachineRepresentation>* signature) {
@@ -338,11 +353,7 @@ void Int64Lowering::LowerNode(Node* node) {
size_t return_arity = call_descriptor->ReturnCount();
if (return_arity == 1) {
// We access the additional return values through projections.
- Node* low_node =
- graph()->NewNode(common()->Projection(0), node, graph()->start());
- Node* high_node =
- graph()->NewNode(common()->Projection(1), node, graph()->start());
- ReplaceNode(node, low_node, high_node);
+ ReplaceNodeWithProjections(node);
} else {
ZoneVector<Node*> projections(return_arity, zone());
NodeProperties::CollectValueProjections(node, projections.data(),
@@ -405,11 +416,7 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Int32PairAdd());
// We access the additional return values through projections.
- Node* low_node =
- graph()->NewNode(common()->Projection(0), node, graph()->start());
- Node* high_node =
- graph()->NewNode(common()->Projection(1), node, graph()->start());
- ReplaceNode(node, low_node, high_node);
+ ReplaceNodeWithProjections(node);
break;
}
case IrOpcode::kInt64Sub: {
@@ -425,11 +432,7 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Int32PairSub());
// We access the additional return values through projections.
- Node* low_node =
- graph()->NewNode(common()->Projection(0), node, graph()->start());
- Node* high_node =
- graph()->NewNode(common()->Projection(1), node, graph()->start());
- ReplaceNode(node, low_node, high_node);
+ ReplaceNodeWithProjections(node);
break;
}
case IrOpcode::kInt64Mul: {
@@ -445,11 +448,7 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Int32PairMul());
// We access the additional return values through projections.
- Node* low_node =
- graph()->NewNode(common()->Projection(0), node, graph()->start());
- Node* high_node =
- graph()->NewNode(common()->Projection(1), node, graph()->start());
- ReplaceNode(node, low_node, high_node);
+ ReplaceNodeWithProjections(node);
break;
}
case IrOpcode::kWord64Or: {
@@ -497,11 +496,7 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Word32PairShl());
// We access the additional return values through projections.
- Node* low_node =
- graph()->NewNode(common()->Projection(0), node, graph()->start());
- Node* high_node =
- graph()->NewNode(common()->Projection(1), node, graph()->start());
- ReplaceNode(node, low_node, high_node);
+ ReplaceNodeWithProjections(node);
break;
}
case IrOpcode::kWord64Shr: {
@@ -521,11 +516,7 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Word32PairShr());
// We access the additional return values through projections.
- Node* low_node =
- graph()->NewNode(common()->Projection(0), node, graph()->start());
- Node* high_node =
- graph()->NewNode(common()->Projection(1), node, graph()->start());
- ReplaceNode(node, low_node, high_node);
+ ReplaceNodeWithProjections(node);
break;
}
case IrOpcode::kWord64Sar: {
@@ -545,11 +536,7 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Word32PairSar());
// We access the additional return values through projections.
- Node* low_node =
- graph()->NewNode(common()->Projection(0), node, graph()->start());
- Node* high_node =
- graph()->NewNode(common()->Projection(1), node, graph()->start());
- ReplaceNode(node, low_node, high_node);
+ ReplaceNodeWithProjections(node);
break;
}
case IrOpcode::kWord64Equal: {
@@ -855,9 +842,10 @@ void Int64Lowering::LowerNode(Node* node) {
}
case IrOpcode::kWord64ReverseBytes: {
Node* input = node->InputAt(0);
- ReplaceNode(node, graph()->NewNode(machine()->Word32ReverseBytes().op(),
- GetReplacementHigh(input)),
- graph()->NewNode(machine()->Word32ReverseBytes().op(),
+ ReplaceNode(node,
+ graph()->NewNode(machine()->Word32ReverseBytes(),
+ GetReplacementHigh(input)),
+ graph()->NewNode(machine()->Word32ReverseBytes(),
GetReplacementLow(input)));
break;
}
@@ -895,6 +883,68 @@ void Int64Lowering::LowerNode(Node* node) {
node->NullAllInputs();
break;
}
+ case IrOpcode::kWord64AtomicLoad: {
+ DCHECK_EQ(4, node->InputCount());
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint64()) {
+ NodeProperties::ChangeOp(node, machine()->Word32AtomicPairLoad());
+ ReplaceNodeWithProjections(node);
+ } else {
+ NodeProperties::ChangeOp(node, machine()->Word32AtomicLoad(type));
+ ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
+ }
+ break;
+ }
+ case IrOpcode::kWord64AtomicStore: {
+ DCHECK_EQ(5, node->InputCount());
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ if (rep == MachineRepresentation::kWord64) {
+ Node* value = node->InputAt(2);
+ node->ReplaceInput(2, GetReplacementLow(value));
+ node->InsertInput(zone(), 3, GetReplacementHigh(value));
+ NodeProperties::ChangeOp(node, machine()->Word32AtomicPairStore());
+ } else {
+ DefaultLowering(node, true);
+ NodeProperties::ChangeOp(node, machine()->Word32AtomicStore(rep));
+ }
+ break;
+ }
+#define ATOMIC_CASE(name) \
+ case IrOpcode::kWord64Atomic##name: { \
+ MachineType type = AtomicOpType(node->op()); \
+ if (type == MachineType::Uint64()) { \
+ LowerWord64AtomicBinop(node, machine()->Word32AtomicPair##name()); \
+ } else { \
+ LowerWord64AtomicNarrowOp(node, \
+ machine()->Word64AtomicNarrow##name(type)); \
+ } \
+ break; \
+ }
+ ATOMIC_CASE(Add)
+ ATOMIC_CASE(Sub)
+ ATOMIC_CASE(And)
+ ATOMIC_CASE(Or)
+ ATOMIC_CASE(Xor)
+ ATOMIC_CASE(Exchange)
+#undef ATOMIC_CASE
+ case IrOpcode::kWord64AtomicCompareExchange: {
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint64()) {
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+ node->ReplaceInput(2, GetReplacementLow(old_value));
+ node->ReplaceInput(3, GetReplacementHigh(old_value));
+ node->InsertInput(zone(), 4, GetReplacementLow(new_value));
+ node->InsertInput(zone(), 5, GetReplacementHigh(new_value));
+ NodeProperties::ChangeOp(node,
+ machine()->Word32AtomicPairCompareExchange());
+ ReplaceNodeWithProjections(node);
+ } else {
+ LowerWord64AtomicNarrowOp(
+ node, machine()->Word64AtomicNarrowCompareExchange(type));
+ }
+ break;
+ }
default: { DefaultLowering(node); }
}
@@ -987,6 +1037,16 @@ void Int64Lowering::PreparePhiReplacement(Node* phi) {
value_count + 1, inputs_high, false));
}
}
+
+void Int64Lowering::ReplaceNodeWithProjections(Node* node) {
+ DCHECK(node != nullptr);
+ Node* low_node =
+ graph()->NewNode(common()->Projection(0), node, graph()->start());
+ Node* high_node =
+ graph()->NewNode(common()->Projection(1), node, graph()->start());
+ ReplaceNode(node, low_node, high_node);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index 6109cd5847..ab403f904a 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -50,6 +50,8 @@ class V8_EXPORT_PRIVATE Int64Lowering {
bool DefaultLowering(Node* node, bool low_word_only = false);
void LowerComparison(Node* node, const Operator* signed_op,
const Operator* unsigned_op);
+ void LowerWord64AtomicBinop(Node* node, const Operator* op);
+ void LowerWord64AtomicNarrowOp(Node* node, const Operator* op);
void ReplaceNode(Node* old, Node* new_low, Node* new_high);
bool HasReplacementLow(Node* node);
@@ -58,6 +60,7 @@ class V8_EXPORT_PRIVATE Int64Lowering {
Node* GetReplacementHigh(Node* node);
void PreparePhiReplacement(Node* phi);
void GetIndexNodes(Node* index, Node*& index_low, Node*& index_high);
+ void ReplaceNodeWithProjections(Node* node);
struct NodeState {
Node* node;
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 6b545c2853..a06f4490a6 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -4,7 +4,7 @@
#include "src/compiler/js-call-reducer.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/builtins/builtins-promise-gen.h"
#include "src/builtins/builtins-utils.h"
#include "src/code-factory.h"
@@ -23,6 +23,8 @@
#include "src/ic/call-optimization.h"
#include "src/objects-inl.h"
#include "src/objects/arguments-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/vector-slot-pair.h"
namespace v8 {
@@ -218,11 +220,11 @@ Reduction JSCallReducer::ReduceArrayConstructor(Node* node) {
// Turn the {node} into a {JSCreateArray} call.
DCHECK_LE(2u, p.arity());
- Handle<AllocationSite> site;
size_t const arity = p.arity() - 2;
NodeProperties::ReplaceValueInput(node, target, 0);
NodeProperties::ReplaceValueInput(node, target, 1);
- NodeProperties::ChangeOp(node, javascript()->CreateArray(arity, site));
+ NodeProperties::ChangeOp(
+ node, javascript()->CreateArray(arity, MaybeHandle<AllocationSite>()));
return Changed(node);
}
@@ -937,7 +939,7 @@ Reduction JSCallReducer::ReduceReflectHas(Node* node) {
Node* vtrue;
{
vtrue = etrue = if_true =
- graph()->NewNode(javascript()->HasProperty(), key, target, context,
+ graph()->NewNode(javascript()->HasProperty(), target, key, context,
frame_state, etrue, if_true);
}
@@ -976,7 +978,6 @@ bool CanInlineArrayIteratingBuiltin(Isolate* isolate,
isolate);
return receiver_map->instance_type() == JS_ARRAY_TYPE &&
IsFastElementsKind(receiver_map->elements_kind()) &&
- (!receiver_map->is_prototype_map() || receiver_map->is_stable()) &&
isolate->IsNoElementsProtectorIntact() &&
isolate->IsAnyInitialArrayPrototype(receiver_prototype);
}
@@ -1507,6 +1508,11 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
if (receiver_map->elements_kind() != kind) return NoChange();
}
+ if (IsHoleyElementsKind(kind)) {
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ }
+
dependencies()->DependOnProtector(
PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
@@ -1531,11 +1537,10 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
effect, control);
- // This array should be HOLEY_SMI_ELEMENTS because of the non-zero length.
// Even though {JSCreateArray} is not marked as {kNoThrow}, we can elide the
// exceptional projections because it cannot throw with the given parameters.
Node* a = control = effect = graph()->NewNode(
- javascript()->CreateArray(1, Handle<AllocationSite>::null()),
+ javascript()->CreateArray(1, MaybeHandle<AllocationSite>()),
array_constructor, array_constructor, original_length, context,
outer_frame_state, effect, control);
@@ -1633,6 +1638,9 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
&check_fail, &control);
}
+ // The array {a} should be HOLEY_SMI_ELEMENTS because we'd only come into this
+ // loop if the input array length is non-zero, and "new Array({x > 0})" always
+ // produces a HOLEY array.
Handle<Map> double_map(Map::cast(native_context()->get(
Context::ArrayMapIndex(HOLEY_DOUBLE_ELEMENTS))),
isolate());
@@ -1715,6 +1723,11 @@ Reduction JSCallReducer::ReduceArrayFilter(Node* node,
if (receiver_map->elements_kind() != kind) return NoChange();
}
+ if (IsHoleyElementsKind(kind)) {
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ }
+
dependencies()->DependOnProtector(
PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
@@ -1979,11 +1992,6 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
const ElementsKind kind = receiver_maps[0]->elements_kind();
- // TODO(pwong): Handle holey double elements kinds.
- if (IsDoubleElementsKind(kind) && IsHoleyElementsKind(kind)) {
- return NoChange();
- }
-
for (Handle<Map> receiver_map : receiver_maps) {
if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
return NoChange();
@@ -2069,12 +2077,15 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
// Replace holes with undefined.
- if (IsHoleyElementsKind(kind)) {
- element = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
- graph()->NewNode(simplified()->ReferenceEqual(), element,
- jsgraph()->TheHoleConstant()),
- jsgraph()->UndefinedConstant(), element);
+ if (kind == HOLEY_DOUBLE_ELEMENTS) {
+ // TODO(7409): avoid deopt if not all uses of value are truncated.
+ CheckFloat64HoleMode mode = CheckFloat64HoleMode::kAllowReturnHole;
+ element = effect =
+ graph()->NewNode(simplified()->CheckFloat64Hole(mode, p.feedback()),
+ element, effect, control);
+ } else if (IsHoleyElementsKind(kind)) {
+ element =
+ graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), element);
}
Node* if_found_return_value =
@@ -2310,6 +2321,11 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
if (receiver_map->elements_kind() != kind) return NoChange();
}
+ if (IsHoleyElementsKind(kind)) {
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ }
+
dependencies()->DependOnProtector(
PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
@@ -2555,8 +2571,13 @@ Reduction JSCallReducer::ReduceArrayIndexOfIncludes(
if (!NodeProperties::GetMapWitness(isolate(), node).ToHandle(&receiver_map))
return NoChange();
- if (receiver_map->instance_type() != JS_ARRAY_TYPE) return NoChange();
- if (!IsFastElementsKind(receiver_map->elements_kind())) return NoChange();
+ if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
+ return NoChange();
+
+ if (IsHoleyElementsKind(receiver_map->elements_kind())) {
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ }
Callable const callable =
search_variant == SearchVariant::kIndexOf
@@ -2651,6 +2672,11 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
if (receiver_map->elements_kind() != kind) return NoChange();
}
+ if (IsHoleyElementsKind(kind)) {
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ }
+
dependencies()->DependOnProtector(
PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
@@ -3419,6 +3445,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceArrayPrototypePop(node);
case Builtins::kArrayPrototypeShift:
return ReduceArrayPrototypeShift(node);
+ case Builtins::kArrayPrototypeSlice:
+ return ReduceArrayPrototypeSlice(node);
case Builtins::kArrayPrototypeEntries:
return ReduceArrayIterator(node, IterationKind::kEntries);
case Builtins::kArrayPrototypeKeys:
@@ -3675,6 +3703,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceDatePrototypeGetTime(node);
case Builtins::kDateNow:
return ReduceDateNow(node);
+ case Builtins::kNumberConstructor:
+ return ReduceNumberConstructor(node);
default:
break;
}
@@ -4287,7 +4317,7 @@ Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
namespace {
-// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
+// TODO(turbofan): This was copied from old compiler, might be too restrictive.
bool IsReadOnlyLengthDescriptor(Isolate* isolate, Handle<Map> jsarray_map) {
DCHECK(!jsarray_map->is_dictionary_map());
Handle<Name> length_string = isolate->factory()->length_string();
@@ -4297,7 +4327,7 @@ bool IsReadOnlyLengthDescriptor(Isolate* isolate, Handle<Map> jsarray_map) {
return descriptors->GetDetails(number).IsReadOnly();
}
-// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
+// TODO(turbofan): This was copied from old compiler, might be too restrictive.
bool CanInlineArrayResizeOperation(Isolate* isolate, Handle<Map> receiver_map) {
if (!receiver_map->prototype()->IsJSArray()) return false;
Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
@@ -4739,6 +4769,85 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
return Replace(value);
}
+// ES6 section 22.1.3.23 Array.prototype.slice ( )
+Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ int arity = static_cast<int>(p.arity() - 2);
+ // Here we only optimize for cloning, that is when slice is called
+ // without arguments, or with a single argument that is the constant 0.
+ if (arity >= 2) return NoChange();
+ if (arity == 1) {
+ NumberMatcher m(NodeProperties::GetValueInput(node, 2));
+ if (!m.HasValue()) return NoChange();
+ if (m.Value() != 0) return NoChange();
+ }
+
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Try to determine the {receiver} map.
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ // Ensure that any changes to the Array species constructor cause deopt.
+ if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->array_species_protector()));
+
+ bool can_be_holey = false;
+ // Check that the maps are of JSArray (and more)
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayIteratingBuiltin(isolate(), receiver_map))
+ return NoChange();
+
+ if (IsHoleyElementsKind(receiver_map->elements_kind())) can_be_holey = true;
+ }
+
+ // Install code dependency on the array protector for holey arrays.
+ if (can_be_holey) {
+ dependencies()->DependOnProtector(
+ PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
+ }
+
+ // If we have unreliable maps, we need a map check.
+ // This is actually redundant due to how JSNativeContextSpecialization
+ // reduces the load of slice, but we do it here nevertheless for consistency
+ // and robustness.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ Node* context = NodeProperties::GetContextInput(node);
+
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kCloneFastJSArray);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags,
+ Operator::kNoThrow | Operator::kNoDeopt);
+
+ // Calls to Builtins::kCloneFastJSArray produce COW arrays
+ // if the original array is COW
+ Node* clone = effect = graph()->NewNode(
+ common()->Call(call_descriptor), jsgraph()->HeapConstant(callable.code()),
+ receiver, context, effect, control);
+
+ ReplaceWithValue(node, clone, effect, control);
+ return Replace(clone);
+}
+
// ES6 section 22.1.2.2 Array.isArray ( arg )
Reduction JSCallReducer::ReduceArrayIsArray(Node* node) {
// We certainly know that undefined is not an array.
@@ -4864,17 +4973,13 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
PropertyCellRef(js_heap_broker(), factory()->no_elements_protector()));
}
- // Load the (current) {iterated_object} from the {iterator}; this might be
- // either undefined or the JSReceiver that was passed to the JSArrayIterator
- // creation.
+ // Load the (current) {iterated_object} from the {iterator}.
Node* iterated_object = effect =
graph()->NewNode(simplified()->LoadField(
AccessBuilder::ForJSArrayIteratorIteratedObject()),
iterator, effect, control);
- // Ensure that the {iterated_object} map didn't change. This also rules
- // out the undefined that we put as a termination marker into the
- // iterator.[[IteratedObject]] field once we reach the end.
+ // Ensure that the {iterated_object} map didn't change.
effect = graph()->NewNode(
simplified()->CheckMaps(CheckMapsFlag::kNone, iterated_object_maps,
p.feedback()),
@@ -4919,6 +5024,16 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
Node* index = effect = graph()->NewNode(simplified()->LoadField(index_access),
iterator, effect, control);
+ // Load the elements of the {iterated_object}. While it feels
+ // counter-intuitive to place the elements pointer load before
+ // the condition below, as it might not be needed (if the {index}
+ // is out of bounds for the {iterated_object}), it's better this
+ // way as it allows the LoadElimination to eliminate redundant
+ // reloads of the elements pointer.
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ iterated_object, effect, control);
+
// Load the length of the {iterated_object}. Due to the map checks we
// already know something about the length here, which we can leverage
// to generate Word32 operations below without additional checking.
@@ -4953,10 +5068,6 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
DCHECK(iteration_kind == IterationKind::kEntries ||
iteration_kind == IterationKind::kValues);
- Node* elements = etrue = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- iterated_object, etrue, if_true);
-
if (IsFixedTypedArrayElementsKind(elements_kind)) {
Node* base_ptr = etrue = graph()->NewNode(
simplified()->LoadField(
@@ -4969,9 +5080,9 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
ExternalArrayType array_type = kExternalInt8Array;
switch (elements_kind) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
- array_type = kExternal##Type##Array; \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
+ array_type = kExternal##Type##Array; \
break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
@@ -5002,7 +5113,8 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
// TODO(6587): avoid deopt if not all uses of value are truncated.
CheckFloat64HoleMode mode = CheckFloat64HoleMode::kAllowReturnHole;
value_true = etrue = graph()->NewNode(
- simplified()->CheckFloat64Hole(mode), value_true, etrue, if_true);
+ simplified()->CheckFloat64Hole(mode, p.feedback()), value_true,
+ etrue, if_true);
}
}
@@ -5032,10 +5144,22 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
// iterator.[[NextIndex]] >= array.length, stop iterating.
done_false = jsgraph()->TrueConstant();
value_false = jsgraph()->UndefinedConstant();
- efalse =
- graph()->NewNode(simplified()->StoreField(
- AccessBuilder::ForJSArrayIteratorIteratedObject()),
- iterator, value_false, efalse, if_false);
+
+ if (!IsFixedTypedArrayElementsKind(elements_kind)) {
+ // Mark the {iterator} as exhausted by setting the [[NextIndex]] to a
+ // value that will never pass the length check again (aka the maximum
+ // value possible for the specific iterated object). Note that this is
+ // different from what the specification says, which is changing the
+ // [[IteratedObject]] field to undefined, but that makes it difficult
+ // to eliminate the map checks and "length" accesses in for..of loops.
+ //
+ // This is not necessary for JSTypedArray's, since the length of those
+ // cannot change later and so if we were ever out of bounds for them
+ // we will stay out-of-bounds forever.
+ Node* end_index = jsgraph()->Constant(index_access.type.Max());
+ efalse = graph()->NewNode(simplified()->StoreField(index_access),
+ iterator, end_index, efalse, if_false);
+ }
}
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
@@ -6034,7 +6158,7 @@ Reduction JSCallReducer::ReduceTypedArrayConstructor(
Node* const parameters[] = {jsgraph()->TheHoleConstant()};
int const num_parameters = static_cast<int>(arraysize(parameters));
frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), shared, Builtins::kTypedArrayConstructorLazyDeoptContinuation,
+ jsgraph(), shared, Builtins::kGenericConstructorLazyDeoptContinuation,
target, context, parameters, num_parameters, frame_state,
ContinuationFrameStateMode::LAZY);
@@ -6082,7 +6206,7 @@ Reduction JSCallReducer::ReduceTypedArrayPrototypeToStringTag(Node* node) {
simplified()->NumberSubtract(), receiver_elements_kind,
jsgraph()->Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND));
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
do { \
Node* check = graph()->NewNode( \
simplified()->NumberEqual(), receiver_elements_kind, \
@@ -6629,11 +6753,12 @@ Reduction JSCallReducer::ReduceArrayBufferViewAccessor(
}
namespace {
-int ExternalArrayElementSize(const ExternalArrayType element_type) {
+uint32_t ExternalArrayElementSize(const ExternalArrayType element_type) {
switch (element_type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- return size;
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case kExternal##Type##Array: \
+ DCHECK_LE(sizeof(ctype), 8); \
+ return sizeof(ctype);
TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
UNREACHABLE();
@@ -6644,14 +6769,15 @@ int ExternalArrayElementSize(const ExternalArrayType element_type) {
Reduction JSCallReducer::ReduceDataViewPrototypeGet(
Node* node, ExternalArrayType element_type) {
+ uint32_t const element_size = ExternalArrayElementSize(element_type);
+ CallParameters const& p = CallParametersOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
-
- CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
Node* offset = node->op()->ValueInputCount() > 2
? NodeProperties::GetValueInput(node, 2)
@@ -6664,16 +6790,68 @@ Reduction JSCallReducer::ReduceDataViewPrototypeGet(
// Only do stuff if the {receiver} is really a DataView.
if (NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
JS_DATA_VIEW_TYPE)) {
- // Check that the {offset} is a positive Smi.
- offset = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
- offset, effect, control);
+ // Check that the {offset} is within range for the {receiver}.
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue()) {
+ // We only deal with DataViews here whose [[ByteLength]] is at least
+ // {element_size} and less than 2^31-{element_size}.
+ Handle<JSDataView> dataview = Handle<JSDataView>::cast(m.Value());
+ if (dataview->byte_length()->Number() < element_size ||
+ dataview->byte_length()->Number() - element_size > kMaxInt) {
+ return NoChange();
+ }
- Node* is_positive = graph()->NewNode(simplified()->NumberLessThanOrEqual(),
- jsgraph()->ZeroConstant(), offset);
+ // The {receiver}s [[ByteOffset]] must be within Unsigned31 range.
+ if (dataview->byte_offset()->Number() > kMaxInt) {
+ return NoChange();
+ }
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNotASmi, p.feedback()),
- is_positive, effect, control);
+ // Check that the {offset} is within range of the {byte_length}.
+ Node* byte_length = jsgraph()->Constant(
+ dataview->byte_length()->Number() - (element_size - 1));
+ offset = effect =
+ graph()->NewNode(simplified()->CheckBounds(p.feedback()), offset,
+ byte_length, effect, control);
+
+ // Add the [[ByteOffset]] to compute the effective offset.
+ Node* byte_offset =
+ jsgraph()->Constant(dataview->byte_offset()->Number());
+ offset = graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
+ } else {
+ // We only deal with DataViews here that have Smi [[ByteLength]]s.
+ Node* byte_length = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewByteLength()),
+ receiver, effect, control);
+ byte_length = effect = graph()->NewNode(
+ simplified()->CheckSmi(p.feedback()), byte_length, effect, control);
+
+ // Check that the {offset} is within range of the {byte_length}.
+ offset = effect =
+ graph()->NewNode(simplified()->CheckBounds(p.feedback()), offset,
+ byte_length, effect, control);
+
+ if (element_size > 0) {
+ // For non-byte accesses we also need to check that the {offset}
+ // plus the {element_size}-1 fits within the given {byte_length}.
+ Node* end_offset =
+ graph()->NewNode(simplified()->NumberAdd(), offset,
+ jsgraph()->Constant(element_size - 1));
+ effect = graph()->NewNode(simplified()->CheckBounds(p.feedback()),
+ end_offset, byte_length, effect, control);
+ }
+
+ // The {receiver}s [[ByteOffset]] also needs to be a (positive) Smi.
+ Node* byte_offset = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewByteOffset()),
+ receiver, effect, control);
+ byte_offset = effect = graph()->NewNode(
+ simplified()->CheckSmi(p.feedback()), byte_offset, effect, control);
+
+ // Compute the buffer index at which we'll read.
+ offset = graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
+ }
// Coerce {is_little_endian} to boolean.
is_little_endian =
@@ -6684,139 +6862,52 @@ Reduction JSCallReducer::ReduceDataViewPrototypeGet(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
receiver, effect, control);
- Node* check_neutered = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
- Node* branch_neutered = graph()->NewNode(
- common()->Branch(BranchHint::kFalse), check_neutered, control);
-
- // Raise an error if it was neuteured.
- Node* if_true_neutered =
- graph()->NewNode(common()->IfTrue(), branch_neutered);
- Node* etrue_neutered = effect;
- {
- if_true_neutered = etrue_neutered = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
- jsgraph()->Constant(MessageTemplate::kDetachedOperation),
- jsgraph()->HeapConstant(
- factory()->NewStringFromAsciiChecked("DataView.prototype.get")),
- context, frame_state, etrue_neutered, if_true_neutered);
- }
-
- // Otherwise, proceed.
- Node* if_false_neutered =
- graph()->NewNode(common()->IfFalse(), branch_neutered);
- Node* efalse_neutered = effect;
-
- // Get the byte offset and byte length of the {receiver}.
- Node* byte_offset = efalse_neutered =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferViewByteOffset()),
- receiver, efalse_neutered, if_false_neutered);
-
- Node* byte_length = efalse_neutered =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferViewByteLength()),
- receiver, efalse_neutered, if_false_neutered);
-
- // The end offset is the offset plus the element size
- // of the type that we want to load.
- int element_size = ExternalArrayElementSize(element_type);
- Node* end_offset = graph()->NewNode(simplified()->NumberAdd(), offset,
- jsgraph()->Constant(element_size));
-
- // We need to check that {end_offset} <= {byte_length}, ie
- // throw a RangeError if {byte_length} < {end_offset}.
- Node* check_range = graph()->NewNode(simplified()->NumberLessThan(),
- byte_length, end_offset);
- Node* branch_range = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check_range, if_false_neutered);
-
- Node* if_true_range = graph()->NewNode(common()->IfTrue(), branch_range);
- Node* etrue_range = efalse_neutered;
- {
- if_true_range = etrue_range = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowRangeError, 2),
- jsgraph()->Constant(MessageTemplate::kInvalidDataViewAccessorOffset),
- jsgraph()->HeapConstant(
- factory()->NewStringFromAsciiChecked("DataView.prototype.get")),
- context, frame_state, etrue_range, if_true_range);
- }
-
- Node* if_false_range = graph()->NewNode(common()->IfFalse(), branch_range);
- Node* efalse_range = efalse_neutered;
- Node* vfalse_range;
- {
- // Get the buffer's backing store.
- Node* backing_store = efalse_range =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferBackingStore()),
- buffer, efalse_range, if_false_range);
-
- // Compute the buffer index at which we'll read.
- Node* buffer_index =
- graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
-
- // Perform the load.
- vfalse_range = efalse_range =
- graph()->NewNode(simplified()->LoadDataViewElement(element_type),
- buffer, backing_store, buffer_index,
- is_little_endian, efalse_range, if_false_range);
+ if (isolate()->IsArrayBufferNeuteringIntact()) {
+ // Add a code dependency so we are deoptimized in case an ArrayBuffer
+ // gets neutered.
+ dependencies()->DependOnProtector(PropertyCellRef(
+ js_heap_broker(), factory()->array_buffer_neutering_protector()));
+ } else {
+ // If the buffer was neutered, deopt and let the unoptimized code throw.
+ Node* check_neutered = effect = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+ check_neutered =
+ graph()->NewNode(simplified()->BooleanNot(), check_neutered);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasNeutered,
+ p.feedback()),
+ check_neutered, effect, control);
}
- // Rewire potential exception edges.
- Node* on_exception = nullptr;
- if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
- // Create appropriate {IfException} and {IfSuccess} nodes.
- Node* extrue_neutered = graph()->NewNode(
- common()->IfException(), etrue_neutered,
- if_true_neutered); // We threw because the array was neutered.
- if_true_neutered =
- graph()->NewNode(common()->IfSuccess(), if_true_neutered);
+ // Get the buffer's backing store.
+ Node* backing_store = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferBackingStore()),
+ buffer, effect, control);
- Node* extrue_range =
- graph()->NewNode(common()->IfException(), etrue_range,
- if_true_range); // We threw because out of bounds.
- if_true_range = graph()->NewNode(common()->IfSuccess(), if_true_range);
-
- // We can't throw in LoadDataViewElement(),
- // so we don't need to handle that path here.
-
- // Join the exception edges.
- Node* merge =
- graph()->NewNode(common()->Merge(2), extrue_neutered, extrue_range);
- Node* ephi = graph()->NewNode(common()->EffectPhi(2), extrue_neutered,
- extrue_range, merge);
- Node* phi =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- extrue_neutered, extrue_range, merge);
- ReplaceWithValue(on_exception, phi, ephi, merge);
- }
-
- // Connect the throwing paths to end.
- if_true_neutered =
- graph()->NewNode(common()->Throw(), etrue_neutered, if_true_neutered);
- NodeProperties::MergeControlToEnd(graph(), common(), if_true_neutered);
- if_true_range =
- graph()->NewNode(common()->Throw(), etrue_range, if_true_range);
- NodeProperties::MergeControlToEnd(graph(), common(), if_true_range);
+ // Perform the load.
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadDataViewElement(element_type), buffer, backing_store,
+ offset, is_little_endian, effect, control);
// Continue on the regular path.
- ReplaceWithValue(node, vfalse_range, efalse_range, if_false_range);
- return Changed(vfalse_range);
+ ReplaceWithValue(node, value, effect, control);
+ return Changed(value);
}
return NoChange();
}
+
Reduction JSCallReducer::ReduceDataViewPrototypeSet(
Node* node, ExternalArrayType element_type) {
+ uint32_t const element_size = ExternalArrayElementSize(element_type);
+ CallParameters const& p = CallParametersOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
-
- CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
Node* offset = node->op()->ValueInputCount() > 2
? NodeProperties::GetValueInput(node, 2)
@@ -6833,16 +6924,68 @@ Reduction JSCallReducer::ReduceDataViewPrototypeSet(
// Only do stuff if the {receiver} is really a DataView.
if (NodeProperties::HasInstanceTypeWitness(isolate(), receiver, effect,
JS_DATA_VIEW_TYPE)) {
- // Check that the {offset} is a positive Smi.
- offset = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
- offset, effect, control);
+ // Check that the {offset} is within range for the {receiver}.
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue()) {
+ // We only deal with DataViews here whose [[ByteLength]] is at least
+ // {element_size} and less than 2^31-{element_size}.
+ Handle<JSDataView> dataview = Handle<JSDataView>::cast(m.Value());
+ if (dataview->byte_length()->Number() < element_size ||
+ dataview->byte_length()->Number() - element_size > kMaxInt) {
+ return NoChange();
+ }
- Node* is_positive = graph()->NewNode(simplified()->NumberLessThanOrEqual(),
- jsgraph()->ZeroConstant(), offset);
+ // The {receiver}s [[ByteOffset]] must be within Unsigned31 range.
+ if (dataview->byte_offset()->Number() > kMaxInt) {
+ return NoChange();
+ }
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNotASmi, p.feedback()),
- is_positive, effect, control);
+ // Check that the {offset} is within range of the {byte_length}.
+ Node* byte_length = jsgraph()->Constant(
+ dataview->byte_length()->Number() - (element_size - 1));
+ offset = effect =
+ graph()->NewNode(simplified()->CheckBounds(p.feedback()), offset,
+ byte_length, effect, control);
+
+ // Add the [[ByteOffset]] to compute the effective offset.
+ Node* byte_offset =
+ jsgraph()->Constant(dataview->byte_offset()->Number());
+ offset = graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
+ } else {
+ // We only deal with DataViews here that have Smi [[ByteLength]]s.
+ Node* byte_length = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewByteLength()),
+ receiver, effect, control);
+ byte_length = effect = graph()->NewNode(
+ simplified()->CheckSmi(p.feedback()), byte_length, effect, control);
+
+ // Check that the {offset} is within range of the {byte_length}.
+ offset = effect =
+ graph()->NewNode(simplified()->CheckBounds(p.feedback()), offset,
+ byte_length, effect, control);
+
+ if (element_size > 0) {
+ // For non-byte accesses we also need to check that the {offset}
+ // plus the {element_size}-1 fits within the given {byte_length}.
+ Node* end_offset =
+ graph()->NewNode(simplified()->NumberAdd(), offset,
+ jsgraph()->Constant(element_size - 1));
+ effect = graph()->NewNode(simplified()->CheckBounds(p.feedback()),
+ end_offset, byte_length, effect, control);
+ }
+
+ // The {receiver}s [[ByteOffset]] also needs to be a (positive) Smi.
+ Node* byte_offset = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewByteOffset()),
+ receiver, effect, control);
+ byte_offset = effect = graph()->NewNode(
+ simplified()->CheckSmi(p.feedback()), byte_offset, effect, control);
+
+ // Compute the buffer index at which we'll read.
+ offset = graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
+ }
// Coerce {is_little_endian} to boolean.
is_little_endian =
@@ -6859,125 +7002,38 @@ Reduction JSCallReducer::ReduceDataViewPrototypeSet(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
receiver, effect, control);
- Node* check_neutered = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
- Node* branch_neutered = graph()->NewNode(
- common()->Branch(BranchHint::kFalse), check_neutered, control);
-
- // Raise an error if it was neuteured.
- Node* if_true_neutered =
- graph()->NewNode(common()->IfTrue(), branch_neutered);
- Node* etrue_neutered = effect;
- {
- if_true_neutered = etrue_neutered = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
- jsgraph()->Constant(MessageTemplate::kDetachedOperation),
- jsgraph()->HeapConstant(
- factory()->NewStringFromAsciiChecked("DataView.prototype.set")),
- context, frame_state, etrue_neutered, if_true_neutered);
+ if (isolate()->IsArrayBufferNeuteringIntact()) {
+ // Add a code dependency so we are deoptimized in case an ArrayBuffer
+ // gets neutered.
+ dependencies()->DependOnProtector(PropertyCellRef(
+ js_heap_broker(), factory()->array_buffer_neutering_protector()));
+ } else {
+ // If the buffer was neutered, deopt and let the unoptimized code throw.
+ Node* check_neutered = effect = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+ check_neutered =
+ graph()->NewNode(simplified()->BooleanNot(), check_neutered);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasNeutered,
+ p.feedback()),
+ check_neutered, effect, control);
}
- // Otherwise, proceed.
- Node* if_false_neutered =
- graph()->NewNode(common()->IfFalse(), branch_neutered);
- Node* efalse_neutered = effect;
-
- // Get the byte offset and byte length of the {receiver}.
- Node* byte_offset = efalse_neutered =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferViewByteOffset()),
- receiver, efalse_neutered, if_false_neutered);
-
- Node* byte_length = efalse_neutered =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferViewByteLength()),
- receiver, efalse_neutered, if_false_neutered);
-
- // The end offset is the offset plus the element size
- // of the type that we want to store.
- int element_size = ExternalArrayElementSize(element_type);
- Node* end_offset = graph()->NewNode(simplified()->NumberAdd(), offset,
- jsgraph()->Constant(element_size));
-
- // We need to check that {end_offset} <= {byte_length}, ie
- // throw a RangeError if {byte_length} < {end_offset}.
- Node* check_range = graph()->NewNode(simplified()->NumberLessThan(),
- byte_length, end_offset);
- Node* branch_range = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check_range, if_false_neutered);
-
- Node* if_true_range = graph()->NewNode(common()->IfTrue(), branch_range);
- Node* etrue_range = efalse_neutered;
- {
- if_true_range = etrue_range = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowRangeError, 2),
- jsgraph()->Constant(MessageTemplate::kInvalidDataViewAccessorOffset),
- jsgraph()->HeapConstant(
- factory()->NewStringFromAsciiChecked("DataView.prototype.set")),
- context, frame_state, etrue_range, if_true_range);
- }
+ // Get the buffer's backing store.
+ Node* backing_store = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferBackingStore()),
+ buffer, effect, control);
- Node* if_false_range = graph()->NewNode(common()->IfFalse(), branch_range);
- Node* efalse_range = efalse_neutered;
- Node* vfalse_range = jsgraph()->UndefinedConstant(); // Return value.
- {
- // Get the buffer's backing store.
- Node* backing_store = efalse_range =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferBackingStore()),
- buffer, efalse_range, if_false_range);
-
- // Compute the buffer index at which we'll write.
- Node* buffer_index =
- graph()->NewNode(simplified()->NumberAdd(), offset, byte_offset);
-
- // Perform the store.
- efalse_range =
- graph()->NewNode(simplified()->StoreDataViewElement(element_type),
- buffer, backing_store, buffer_index, value,
- is_little_endian, efalse_range, if_false_range);
- }
+ // Perform the store.
+ effect = graph()->NewNode(simplified()->StoreDataViewElement(element_type),
+ buffer, backing_store, offset, value,
+ is_little_endian, effect, control);
- // Rewire potential exception edges.
- Node* on_exception = nullptr;
- if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
- // Create appropriate {IfException} and {IfSuccess} nodes.
- Node* extrue_neutered = graph()->NewNode(
- common()->IfException(), etrue_neutered,
- if_true_neutered); // We threw because the array was neutered.
- if_true_neutered =
- graph()->NewNode(common()->IfSuccess(), if_true_neutered);
-
- Node* extrue_range =
- graph()->NewNode(common()->IfException(), etrue_range,
- if_true_range); // We threw because out of bounds.
- if_true_range = graph()->NewNode(common()->IfSuccess(), if_true_range);
-
- // We can't throw in StoreDataViewElement(),
- // so we don't need to handle that path here.
-
- // Join the exception edges.
- Node* merge =
- graph()->NewNode(common()->Merge(2), extrue_neutered, extrue_range);
- Node* ephi = graph()->NewNode(common()->EffectPhi(2), extrue_neutered,
- extrue_range, merge);
- Node* phi =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- extrue_neutered, extrue_range, merge);
- ReplaceWithValue(on_exception, phi, ephi, merge);
- }
-
- // Connect the throwing paths to end.
- if_true_neutered =
- graph()->NewNode(common()->Throw(), etrue_neutered, if_true_neutered);
- NodeProperties::MergeControlToEnd(graph(), common(), if_true_neutered);
- if_true_range =
- graph()->NewNode(common()->Throw(), etrue_range, if_true_range);
- NodeProperties::MergeControlToEnd(graph(), common(), if_true_range);
+ Node* value = jsgraph()->UndefinedConstant();
// Continue on the regular path.
- ReplaceWithValue(node, vfalse_range, efalse_range, if_false_range);
- return Changed(vfalse_range);
+ ReplaceWithValue(node, value, effect, control);
+ return Changed(value);
}
return NoChange();
@@ -7182,6 +7238,45 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
return Changed(node);
}
+// ES section #sec-number-constructor
+Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+
+ if (p.arity() <= 2) {
+ ReplaceWithValue(node, jsgraph()->ZeroConstant());
+ }
+
+ // We don't have a new.target argument, so we can convert to number,
+ // but must also convert BigInts.
+ if (p.arity() == 3) {
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* value = NodeProperties::GetValueInput(node, 2);
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Handle<SharedFunctionInfo> number_constructor(
+ handle(native_context()->number_function()->shared(), isolate()));
+
+ const std::vector<Node*> checkpoint_parameters({
+ jsgraph()->UndefinedConstant(), /* receiver */
+ });
+ int checkpoint_parameters_size =
+ static_cast<int>(checkpoint_parameters.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), number_constructor,
+ Builtins::kGenericConstructorLazyDeoptContinuation, target, context,
+ checkpoint_parameters.data(), checkpoint_parameters_size,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ NodeProperties::ReplaceValueInputs(node, value);
+ NodeProperties::ChangeOp(node, javascript()->ToNumberConvertBigInt());
+ NodeProperties::ReplaceFrameStateInput(node, frame_state);
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 6e3f531647..e04870ed2f 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -94,6 +94,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceArrayPrototypePush(Node* node);
Reduction ReduceArrayPrototypePop(Node* node);
Reduction ReduceArrayPrototypeShift(Node* node);
+ Reduction ReduceArrayPrototypeSlice(Node* node);
Reduction ReduceArrayIsArray(Node* node);
enum class ArrayIteratorKind { kArray, kTypedArray };
Reduction ReduceArrayIterator(Node* node, IterationKind kind);
@@ -190,6 +191,8 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceDateNow(Node* node);
Reduction ReduceNumberParseInt(Node* node);
+ Reduction ReduceNumberConstructor(Node* node);
+
// Returns the updated {to} node, and updates control and effect along the
// way.
Node* DoFilterPostCallbackWork(ElementsKind kind, Node** control,
@@ -231,7 +234,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
Isolate* isolate() const;
Factory* factory() const;
Handle<Context> native_context() const { return native_context_; }
@@ -243,7 +246,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
CompilationDependencies* dependencies() const { return dependencies_; }
JSGraph* const jsgraph_;
- const JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const js_heap_broker_;
Flags const flags_;
Handle<Context> const native_context_;
CompilationDependencies* const dependencies_;
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 85a80a2b2f..ef2297c9d6 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -17,7 +17,6 @@ namespace internal {
namespace compiler {
Reduction JSContextSpecialization::Reduce(Node* node) {
- DisallowHeapAccess no_heap_access;
switch (node->opcode()) {
case IrOpcode::kParameter:
return ReduceParameter(node);
@@ -101,7 +100,7 @@ bool IsContextParameter(Node* node) {
// specialization context. If successful, update {distance} to whatever
// distance remains from the specialization context.
base::Optional<ContextRef> GetSpecializationContext(
- const JSHeapBroker* broker, Node* node, size_t* distance,
+ JSHeapBroker* broker, Node* node, size_t* distance,
Maybe<OuterContext> maybe_outer) {
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
index d2f56d50f1..7324c5aaf0 100644
--- a/deps/v8/src/compiler/js-context-specialization.h
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
#include "src/compiler/graph-reducer.h"
+#include "src/maybe-handles.h"
namespace v8 {
namespace internal {
@@ -62,12 +63,12 @@ class JSContextSpecialization final : public AdvancedReducer {
JSGraph* jsgraph() const { return jsgraph_; }
Maybe<OuterContext> outer() const { return outer_; }
MaybeHandle<JSFunction> closure() const { return closure_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
JSGraph* const jsgraph_;
Maybe<OuterContext> outer_;
MaybeHandle<JSFunction> closure_;
- const JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const js_heap_broker_;
DISALLOW_COPY_AND_ASSIGN(JSContextSpecialization);
};
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index a9ce42e1e2..6484e05061 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -21,6 +21,7 @@
#include "src/objects-inl.h"
#include "src/objects/arguments.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-generator.h"
#include "src/objects/js-promise.h"
#include "src/objects/js-regexp-inl.h"
@@ -43,8 +44,9 @@ Node* GetArgumentsFrameState(Node* frame_state) {
// inlined.
bool IsAllocationInlineable(const JSFunctionRef& target,
const JSFunctionRef& new_target) {
+ CHECK_IMPLIES(new_target.has_initial_map(),
+ !new_target.initial_map().is_dictionary_map());
return new_target.has_initial_map() &&
- !new_target.initial_map().is_dictionary_map() &&
new_target.initial_map().constructor_or_backpointer().equals(target);
}
@@ -59,6 +61,7 @@ const int kBlockContextAllocationLimit = 16;
} // namespace
Reduction JSCreateLowering::Reduce(Node* node) {
+ DisallowHeapAccess disallow_heap_access;
switch (node->opcode()) {
case IrOpcode::kJSCreate:
return ReduceJSCreate(node);
@@ -110,7 +113,6 @@ Reduction JSCreateLowering::Reduce(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreate, node->opcode());
Node* const target = NodeProperties::GetValueInput(node, 0);
Type const target_type = NodeProperties::GetType(target);
@@ -137,25 +139,22 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
return NoChange();
}
- // Add a dependency on the {initial_map} to make sure that this code is
- // deoptimized whenever the {initial_map} changes.
- MapRef initial_map = dependencies()->DependOnInitialMap(original_constructor);
-
- // Force completion of inobject slack tracking before
- // generating code to finalize the instance size.
- SlackTrackingResult slack_tracking_result =
- original_constructor.FinishSlackTracking();
+ SlackTrackingPrediction slack_tracking_prediction =
+ dependencies()->DependOnInitialMapInstanceSizePrediction(
+ original_constructor);
+ MapRef initial_map = original_constructor.initial_map();
// Emit code to allocate the JSObject instance for the
// {original_constructor}.
AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(slack_tracking_result.instance_size);
+ a.Allocate(slack_tracking_prediction.instance_size());
a.Store(AccessBuilder::ForMap(), initial_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
jsgraph()->EmptyFixedArrayConstant());
a.Store(AccessBuilder::ForJSObjectElements(),
jsgraph()->EmptyFixedArrayConstant());
- for (int i = 0; i < slack_tracking_result.inobject_property_count; ++i) {
+ for (int i = 0; i < slack_tracking_prediction.inobject_property_count();
+ ++i) {
a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
jsgraph()->UndefinedConstant());
}
@@ -166,7 +165,6 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateArguments, node->opcode());
CreateArgumentsType type = CreateArgumentsTypeOf(node->op());
Node* const frame_state = NodeProperties::GetFrameStateInput(node);
@@ -260,7 +258,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
arguments_frame, rest_length, effect);
// Load the JSArray object map.
Node* const jsarray_map = jsgraph()->Constant(
- native_context_ref().js_array_fast_elements_map_index());
+ native_context_ref().js_array_packed_elements_map());
// Actually allocate and initialize the jsarray.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -379,7 +377,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
// Load the JSArray object map.
Node* const jsarray_map = jsgraph()->Constant(
- native_context_ref().js_array_fast_elements_map_index());
+ native_context_ref().js_array_packed_elements_map());
// Actually allocate and initialize the jsarray.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -404,7 +402,6 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateGeneratorObject, node->opcode());
Node* const closure = NodeProperties::GetValueInput(node, 0);
Node* const receiver = NodeProperties::GetValueInput(node, 1);
@@ -416,16 +413,12 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
DCHECK(closure_type.AsHeapConstant()->Ref().IsJSFunction());
JSFunctionRef js_function =
closure_type.AsHeapConstant()->Ref().AsJSFunction();
- js_function.EnsureHasInitialMap();
+ if (!js_function.has_initial_map()) return NoChange();
- // Force completion of inobject slack tracking before
- // generating code to finalize the instance size.
- SlackTrackingResult slack_tracking_result =
- js_function.FinishSlackTracking();
+ SlackTrackingPrediction slack_tracking_prediction =
+ dependencies()->DependOnInitialMapInstanceSizePrediction(js_function);
- // Add a dependency on the {initial_map} to make sure that this code is
- // deoptimized whenever the {initial_map} changes.
- MapRef initial_map = dependencies()->DependOnInitialMap(js_function);
+ MapRef initial_map = js_function.initial_map();
DCHECK(initial_map.instance_type() == JS_GENERATOR_OBJECT_TYPE ||
initial_map.instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE);
@@ -433,8 +426,8 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
SharedFunctionInfoRef shared = js_function.shared();
DCHECK(shared.HasBytecodeArray());
int parameter_count_no_receiver = shared.internal_formal_parameter_count();
- int size =
- parameter_count_no_receiver + shared.GetBytecodeArrayRegisterCount();
+ int size = parameter_count_no_receiver +
+ shared.GetBytecodeArray().register_count();
AllocationBuilder ab(jsgraph(), effect, control);
ab.AllocateArray(size, factory()->fixed_array_map());
for (int i = 0; i < size; ++i) {
@@ -445,7 +438,7 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
// Emit code to allocate the JS[Async]GeneratorObject instance.
AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(slack_tracking_result.instance_size);
+ a.Allocate(slack_tracking_prediction.instance_size());
Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
Node* undefined = jsgraph()->UndefinedConstant();
a.Store(AccessBuilder::ForMap(), initial_map);
@@ -469,7 +462,8 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
}
// Handle in-object properties, too.
- for (int i = 0; i < slack_tracking_result.inobject_property_count; ++i) {
+ for (int i = 0; i < slack_tracking_prediction.inobject_property_count();
+ ++i) {
a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
undefined);
}
@@ -481,20 +475,18 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
// Constructs an array with a variable {length} when no upper bound
// is known for the capacity.
-Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
- Handle<Map> initial_map,
- PretenureFlag pretenure) {
+Reduction JSCreateLowering::ReduceNewArray(
+ Node* node, Node* length, MapRef initial_map, PretenureFlag pretenure,
+ const SlackTrackingPrediction& slack_tracking_prediction) {
DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Constructing an Array via new Array(N) where N is an unsigned
// integer, always creates a holey backing store.
- if (!IsHoleyElementsKind(initial_map->elements_kind())) {
- initial_map =
- Map::AsElementsKind(isolate(), initial_map,
- GetHoleyElementsKind(initial_map->elements_kind()));
- }
+ ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
+ initial_map, initial_map.AsElementsKind(
+ GetHoleyElementsKind(initial_map.elements_kind())));
// Check that the {limit} is an unsigned integer in the valid range.
// This has to be kept in sync with src/runtime/runtime-array.cc,
@@ -506,7 +498,7 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
// Construct elements and properties for the resulting JSArray.
Node* elements = effect =
- graph()->NewNode(IsDoubleElementsKind(initial_map->elements_kind())
+ graph()->NewNode(IsDoubleElementsKind(initial_map.elements_kind())
? simplified()->NewDoubleElements(pretenure)
: simplified()->NewSmiOrObjectElements(pretenure),
length, effect, control);
@@ -514,15 +506,14 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
// Perform the allocation of the actual JSArray object.
AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(initial_map->instance_size(), pretenure);
+ a.Allocate(slack_tracking_prediction.instance_size(), pretenure);
a.Store(AccessBuilder::ForMap(), initial_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForJSArrayLength(initial_map->elements_kind()),
- length);
- for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
- a.Store(AccessBuilder::ForJSObjectInObjectProperty(
- MapRef(js_heap_broker(), initial_map), i),
+ a.Store(AccessBuilder::ForJSArrayLength(initial_map.elements_kind()), length);
+ for (int i = 0; i < slack_tracking_prediction.inobject_property_count();
+ ++i) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
jsgraph()->UndefinedConstant());
}
RelaxControls(node);
@@ -532,20 +523,21 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
// Constructs an array with a variable {length} when an actual
// upper bound is known for the {capacity}.
-Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
- int capacity,
- Handle<Map> initial_map,
- PretenureFlag pretenure) {
+Reduction JSCreateLowering::ReduceNewArray(
+ Node* node, Node* length, int capacity, MapRef initial_map,
+ PretenureFlag pretenure,
+ const SlackTrackingPrediction& slack_tracking_prediction) {
DCHECK(node->opcode() == IrOpcode::kJSCreateArray ||
node->opcode() == IrOpcode::kJSCreateEmptyLiteralArray);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Determine the appropriate elements kind.
- ElementsKind elements_kind = initial_map->elements_kind();
+ ElementsKind elements_kind = initial_map.elements_kind();
if (NodeProperties::GetType(length).Max() > 0.0) {
elements_kind = GetHoleyElementsKind(elements_kind);
- initial_map = Map::AsElementsKind(isolate(), initial_map, elements_kind);
+ ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
+ initial_map, initial_map.AsElementsKind(elements_kind));
}
DCHECK(IsFastElementsKind(elements_kind));
@@ -561,14 +553,14 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
// Perform the allocation of the actual JSArray object.
AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(initial_map->instance_size(), pretenure);
+ a.Allocate(slack_tracking_prediction.instance_size(), pretenure);
a.Store(AccessBuilder::ForMap(), initial_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
- for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
- a.Store(AccessBuilder::ForJSObjectInObjectProperty(
- MapRef(js_heap_broker(), initial_map), i),
+ for (int i = 0; i < slack_tracking_prediction.inobject_property_count();
+ ++i) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
jsgraph()->UndefinedConstant());
}
RelaxControls(node);
@@ -576,16 +568,16 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
return Changed(node);
}
-Reduction JSCreateLowering::ReduceNewArray(Node* node,
- std::vector<Node*> values,
- Handle<Map> initial_map,
- PretenureFlag pretenure) {
+Reduction JSCreateLowering::ReduceNewArray(
+ Node* node, std::vector<Node*> values, MapRef initial_map,
+ PretenureFlag pretenure,
+ const SlackTrackingPrediction& slack_tracking_prediction) {
DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Determine the appropriate elements kind.
- ElementsKind elements_kind = initial_map->elements_kind();
+ ElementsKind elements_kind = initial_map.elements_kind();
DCHECK(IsFastElementsKind(elements_kind));
// Check {values} based on the {elements_kind}. These checks are guarded
@@ -618,14 +610,14 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node,
// Perform the allocation of the actual JSArray object.
AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(initial_map->instance_size(), pretenure);
+ a.Allocate(slack_tracking_prediction.instance_size(), pretenure);
a.Store(AccessBuilder::ForMap(), initial_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
- for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
- a.Store(AccessBuilder::ForJSObjectInObjectProperty(
- MapRef(js_heap_broker(), initial_map), i),
+ for (int i = 0; i < slack_tracking_prediction.inobject_property_count();
+ ++i) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
jsgraph()->UndefinedConstant());
}
RelaxControls(node);
@@ -634,19 +626,19 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node,
}
Reduction JSCreateLowering::ReduceNewArrayToStubCall(
- Node* node, Handle<AllocationSite> site) {
+ Node* node, base::Optional<AllocationSiteRef> site) {
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, 1);
Type new_target_type = NodeProperties::GetType(new_target);
- Node* type_info = site.is_null() ? jsgraph()->UndefinedConstant()
- : jsgraph()->HeapConstant(site);
+ Node* type_info =
+ site ? jsgraph()->Constant(*site) : jsgraph()->UndefinedConstant();
ElementsKind elements_kind =
- site.is_null() ? GetInitialFastElementsKind() : site->GetElementsKind();
+ site ? site->GetElementsKind() : GetInitialFastElementsKind();
AllocationSiteOverrideMode override_mode =
- (site.is_null() || AllocationSite::ShouldTrack(elements_kind))
+ (!site || AllocationSite::ShouldTrack(elements_kind))
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
@@ -699,54 +691,48 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
- Handle<AllocationSite> const site = p.site();
+ base::Optional<AllocationSiteRef> site_ref;
+ {
+ Handle<AllocationSite> site;
+ if (p.site().ToHandle(&site)) {
+ site_ref = AllocationSiteRef(js_heap_broker(), site);
+ }
+ }
PretenureFlag pretenure = NOT_TENURED;
- Handle<JSFunction> constructor(native_context()->array_function(), isolate());
+ JSFunctionRef constructor = native_context_ref().array_function();
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, 1);
- Type new_target_type =
- (target == new_target)
- ? Type::HeapConstant(js_heap_broker(), constructor, zone())
- : NodeProperties::GetType(new_target);
+ Type new_target_type = (target == new_target)
+ ? Type::HeapConstant(constructor, zone())
+ : NodeProperties::GetType(new_target);
// Extract original constructor function.
if (new_target_type.IsHeapConstant() &&
- new_target_type.AsHeapConstant()->Value()->IsJSFunction()) {
- Handle<JSFunction> original_constructor =
- Handle<JSFunction>::cast(new_target_type.AsHeapConstant()->Value());
- DCHECK(constructor->IsConstructor());
- DCHECK(original_constructor->IsConstructor());
+ new_target_type.AsHeapConstant()->Ref().IsJSFunction()) {
+ JSFunctionRef original_constructor =
+ new_target_type.AsHeapConstant()->Ref().AsJSFunction();
+ DCHECK(constructor.IsConstructor());
+ DCHECK(original_constructor.IsConstructor());
// Check if we can inline the allocation.
- if (IsAllocationInlineable(
- JSFunctionRef(js_heap_broker(), constructor),
- JSFunctionRef(js_heap_broker(), original_constructor))) {
- // Force completion of inobject slack tracking before
- // generating code to finalize the instance size.
- original_constructor->CompleteInobjectSlackTrackingIfActive();
-
- // Add a dependency on the {initial_map} to make sure that this code is
- // deoptimized whenever the {initial_map} changes.
- MapRef initial_map = dependencies()->DependOnInitialMap(
- JSFunctionRef(js_heap_broker(), original_constructor));
+ if (IsAllocationInlineable(constructor, original_constructor)) {
+ SlackTrackingPrediction slack_tracking_prediction =
+ dependencies()->DependOnInitialMapInstanceSizePrediction(
+ original_constructor);
+ MapRef initial_map = original_constructor.initial_map();
// Tells whether we are protected by either the {site} or a
// protector cell to do certain speculative optimizations.
bool can_inline_call = false;
// Check if we have a feedback {site} on the {node}.
- if (!site.is_null()) {
- ElementsKind elements_kind = site->GetElementsKind();
- if (initial_map.elements_kind() != elements_kind) {
- initial_map =
- MapRef(js_heap_broker(),
- Map::AsElementsKind(isolate(), initial_map.object<Map>(),
- elements_kind));
- }
- can_inline_call = site->CanInlineCall();
- auto site_ref = AllocationSiteRef(js_heap_broker(), site);
- pretenure = dependencies()->DependOnPretenureMode(site_ref);
- dependencies()->DependOnElementsKind(site_ref);
+ if (site_ref) {
+ ElementsKind elements_kind = site_ref->GetElementsKind();
+ ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
+ initial_map, initial_map.AsElementsKind(elements_kind));
+ can_inline_call = site_ref->CanInlineCall();
+ pretenure = dependencies()->DependOnPretenureMode(*site_ref);
+ dependencies()->DependOnElementsKind(*site_ref);
} else {
can_inline_call = isolate()->IsArrayConstructorIntact();
}
@@ -754,8 +740,8 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
if (arity == 0) {
Node* length = jsgraph()->ZeroConstant();
int capacity = JSArray::kPreallocatedArrayElements;
- return ReduceNewArray(node, length, capacity, initial_map.object<Map>(),
- pretenure);
+ return ReduceNewArray(node, length, capacity, initial_map, pretenure,
+ slack_tracking_prediction);
} else if (arity == 1) {
Node* length = NodeProperties::GetValueInput(node, 2);
Type length_type = NodeProperties::GetType(length);
@@ -767,23 +753,21 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
elements_kind, IsHoleyElementsKind(elements_kind)
? HOLEY_ELEMENTS
: PACKED_ELEMENTS);
- initial_map =
- MapRef(js_heap_broker(),
- Map::AsElementsKind(isolate(), initial_map.object<Map>(),
- elements_kind));
- return ReduceNewArray(node, std::vector<Node*>{length},
- initial_map.object<Map>(), pretenure);
+ ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
+ initial_map, initial_map.AsElementsKind(elements_kind));
+ return ReduceNewArray(node, std::vector<Node*>{length}, initial_map,
+ pretenure, slack_tracking_prediction);
}
if (length_type.Is(Type::SignedSmall()) && length_type.Min() >= 0 &&
length_type.Max() <= kElementLoopUnrollLimit &&
length_type.Min() == length_type.Max()) {
int capacity = static_cast<int>(length_type.Max());
- return ReduceNewArray(node, length, capacity,
- initial_map.object<Map>(), pretenure);
+ return ReduceNewArray(node, length, capacity, initial_map, pretenure,
+ slack_tracking_prediction);
}
if (length_type.Maybe(Type::UnsignedSmall()) && can_inline_call) {
- return ReduceNewArray(node, length, initial_map.object<Map>(),
- pretenure);
+ return ReduceNewArray(node, length, initial_map, pretenure,
+ slack_tracking_prediction);
}
} else if (arity <= JSArray::kInitialMaxFastElementArray) {
// Gather the values to store into the newly created array.
@@ -828,13 +812,10 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
// we cannot inline this invocation of the Array constructor here.
return NoChange();
}
- initial_map =
- MapRef(js_heap_broker(),
- Map::AsElementsKind(isolate(), initial_map.object<Map>(),
- elements_kind));
-
- return ReduceNewArray(node, values, initial_map.object<Map>(),
- pretenure);
+ ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
+ initial_map, initial_map.AsElementsKind(elements_kind));
+ return ReduceNewArray(node, values, initial_map, pretenure,
+ slack_tracking_prediction);
}
}
}
@@ -842,11 +823,10 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
// TODO(bmeurer): Optimize the subclassing case.
if (target != new_target) return NoChange();
- return ReduceNewArrayToStubCall(node, site);
+ return ReduceNewArrayToStubCall(node, site_ref);
}
Reduction JSCreateLowering::ReduceJSCreateArrayIterator(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateArrayIterator, node->opcode());
CreateArrayIteratorParameters const& p =
CreateArrayIteratorParametersOf(node->op());
@@ -906,7 +886,6 @@ MapRef MapForCollectionIterationKind(const NativeContextRef& native_context,
} // namespace
Reduction JSCreateLowering::ReduceJSCreateCollectionIterator(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateCollectionIterator, node->opcode());
CreateCollectionIteratorParameters const& p =
CreateCollectionIteratorParametersOf(node->op());
@@ -938,7 +917,6 @@ Reduction JSCreateLowering::ReduceJSCreateCollectionIterator(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateBoundFunction, node->opcode());
CreateBoundFunctionParameters const& p =
CreateBoundFunctionParametersOf(node->op());
@@ -979,7 +957,6 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
SharedFunctionInfoRef shared(js_heap_broker(), p.shared_info());
@@ -1042,7 +1019,6 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
Node* value = NodeProperties::GetValueInput(node, 0);
Node* done = NodeProperties::GetValueInput(node, 1);
@@ -1067,7 +1043,6 @@ Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateStringIterator(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateStringIterator, node->opcode());
Node* string = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -1089,14 +1064,13 @@ Reduction JSCreateLowering::ReduceJSCreateStringIterator(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateKeyValueArray, node->opcode());
Node* key = NodeProperties::GetValueInput(node, 0);
Node* value = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
- Node* array_map = jsgraph()->Constant(
- native_context_ref().js_array_fast_elements_map_index());
+ Node* array_map =
+ jsgraph()->Constant(native_context_ref().js_array_packed_elements_map());
Node* properties = jsgraph()->EmptyFixedArrayConstant();
Node* length = jsgraph()->Constant(2);
@@ -1120,11 +1094,10 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreatePromise(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreatePromise, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
- MapRef promise_map = native_context_ref().promise_function_initial_map();
+ MapRef promise_map = native_context_ref().promise_function().initial_map();
AllocationBuilder a(jsgraph(), effect, graph()->start());
a.Allocate(promise_map.instance_size());
@@ -1165,7 +1138,7 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
pretenure = dependencies()->DependOnPretenureMode(site);
}
dependencies()->DependOnElementsKinds(site);
- JSObjectRef boilerplate = site.boilerplate();
+ JSObjectRef boilerplate = site.boilerplate().value();
Node* value = effect =
AllocateFastLiteral(effect, control, boilerplate, pretenure);
ReplaceWithValue(node, value, effect, control);
@@ -1178,20 +1151,21 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateEmptyLiteralArray, node->opcode());
FeedbackParameter const& p = FeedbackParameterOf(node->op());
- Handle<Object> feedback(
- p.feedback().vector()->Get(p.feedback().slot())->ToObject(), isolate());
- if (feedback->IsAllocationSite()) {
- Handle<AllocationSite> site = Handle<AllocationSite>::cast(feedback);
- DCHECK(!site->PointsToLiteral());
- Handle<Map> const initial_map(
- native_context()->GetInitialJSArrayMap(site->GetElementsKind()),
- isolate());
- auto site_ref = AllocationSiteRef(js_heap_broker(), site);
- PretenureFlag const pretenure =
- dependencies()->DependOnPretenureMode(site_ref);
- dependencies()->DependOnElementsKind(site_ref);
+ FeedbackVectorRef fv(js_heap_broker(), p.feedback().vector());
+ ObjectRef feedback = fv.get(p.feedback().slot());
+ if (feedback.IsAllocationSite()) {
+ AllocationSiteRef site = feedback.AsAllocationSite();
+ DCHECK(!site.PointsToLiteral());
+ MapRef initial_map =
+ native_context_ref().GetInitialJSArrayMap(site.GetElementsKind());
+ PretenureFlag const pretenure = dependencies()->DependOnPretenureMode(site);
+ dependencies()->DependOnElementsKind(site);
Node* length = jsgraph()->ZeroConstant();
- return ReduceNewArray(node, length, 0, initial_map, pretenure);
+ DCHECK(!initial_map.IsInobjectSlackTrackingInProgress());
+ SlackTrackingPrediction slack_tracking_prediction(
+ initial_map, initial_map.instance_size());
+ return ReduceNewArray(node, length, 0, initial_map, pretenure,
+ slack_tracking_prediction);
}
return NoChange();
}
@@ -1202,10 +1176,10 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralObject(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
// Retrieve the initial map for the object.
- Handle<Map> map = factory()->ObjectLiteralMapFromCache(native_context(), 0);
- DCHECK(!map->is_dictionary_map());
- DCHECK(!map->IsInobjectSlackTrackingInProgress());
- Node* js_object_map = jsgraph()->HeapConstant(map);
+ MapRef map = native_context_ref().object_function().initial_map();
+ DCHECK(!map.is_dictionary_map());
+ DCHECK(!map.IsInobjectSlackTrackingInProgress());
+ Node* js_object_map = jsgraph()->Constant(map);
// Setup elements and properties.
Node* elements = jsgraph()->EmptyFixedArrayConstant();
@@ -1213,13 +1187,12 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralObject(Node* node) {
// Perform the allocation of the actual JSArray object.
AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(map->instance_size());
+ a.Allocate(map.instance_size());
a.Store(AccessBuilder::ForMap(), js_object_map);
a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
- for (int i = 0; i < map->GetInObjectProperties(); i++) {
- a.Store(AccessBuilder::ForJSObjectInObjectProperty(
- MapRef(js_heap_broker(), map), i),
+ for (int i = 0; i < map.GetInObjectProperties(); i++) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(map, i),
jsgraph()->UndefinedConstant());
}
@@ -1229,7 +1202,6 @@ Reduction JSCreateLowering::ReduceJSCreateEmptyLiteralObject(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateLiteralRegExp(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateLiteralRegExp, node->opcode());
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
@@ -1247,7 +1219,6 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralRegExp(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
const CreateFunctionContextParameters& parameters =
CreateFunctionContextParametersOf(node->op());
@@ -1295,7 +1266,6 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
ScopeInfoRef scope_info(js_heap_broker(), ScopeInfoOf(node->op()));
Node* extension = NodeProperties::GetValueInput(node, 0);
@@ -1317,7 +1287,6 @@ Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
ScopeInfoRef scope_info(js_heap_broker(), ScopeInfoOf(node->op()));
Node* exception = NodeProperties::GetValueInput(node, 0);
@@ -1343,7 +1312,6 @@ Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
ScopeInfoRef scope_info(js_heap_broker(), ScopeInfoOf(node->op()));
int const context_length = scope_info.ContextLength();
@@ -1377,7 +1345,6 @@ Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateObject(Node* node) {
- DisallowHeapAccess no_heap_access;
DCHECK_EQ(IrOpcode::kJSCreateObject, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 667298c238..151be1b35c 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -26,14 +26,14 @@ class JSGraph;
class JSOperatorBuilder;
class MachineOperatorBuilder;
class SimplifiedOperatorBuilder;
-
+class SlackTrackingPrediction;
// Lowers JSCreate-level operators to fast (inline) allocations.
class V8_EXPORT_PRIVATE JSCreateLowering final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
JSCreateLowering(Editor* editor, CompilationDependencies* dependencies,
- JSGraph* jsgraph, const JSHeapBroker* js_heap_broker,
+ JSGraph* jsgraph, JSHeapBroker* js_heap_broker,
Handle<Context> native_context, Zone* zone)
: AdvancedReducer(editor),
dependencies_(dependencies),
@@ -68,12 +68,17 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Reduction ReduceJSCreateCatchContext(Node* node);
Reduction ReduceJSCreateBlockContext(Node* node);
Reduction ReduceJSCreateGeneratorObject(Node* node);
- Reduction ReduceNewArray(Node* node, Node* length, Handle<Map> initial_map,
- PretenureFlag pretenure);
- Reduction ReduceNewArray(Node* node, Node* length, int capacity,
- Handle<Map> initial_map, PretenureFlag pretenure);
- Reduction ReduceNewArray(Node* node, std::vector<Node*> values,
- Handle<Map> initial_map, PretenureFlag pretenure);
+ Reduction ReduceNewArray(
+ Node* node, Node* length, MapRef initial_map, PretenureFlag pretenure,
+ const SlackTrackingPrediction& slack_tracking_prediction);
+ Reduction ReduceNewArray(
+ Node* node, Node* length, int capacity, MapRef initial_map,
+ PretenureFlag pretenure,
+ const SlackTrackingPrediction& slack_tracking_prediction);
+ Reduction ReduceNewArray(
+ Node* node, std::vector<Node*> values, MapRef initial_map,
+ PretenureFlag pretenure,
+ const SlackTrackingPrediction& slack_tracking_prediction);
Reduction ReduceJSCreateObject(Node* node);
Node* AllocateArguments(Node* effect, Node* control, Node* frame_state);
@@ -104,7 +109,8 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Node* AllocateLiteralRegExp(Node* effect, Node* control,
JSRegExpRef boilerplate);
- Reduction ReduceNewArrayToStubCall(Node* node, Handle<AllocationSite> site);
+ Reduction ReduceNewArrayToStubCall(Node* node,
+ base::Optional<AllocationSiteRef> site);
Factory* factory() const;
Graph* graph() const;
@@ -115,12 +121,12 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
CompilationDependencies* dependencies() const { return dependencies_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
Zone* zone() const { return zone_; }
CompilationDependencies* const dependencies_;
JSGraph* const jsgraph_;
- const JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const js_heap_broker_;
Handle<Context> const native_context_;
Zone* const zone_;
};
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 5e134307f4..0903f181b9 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -82,6 +82,7 @@ REPLACE_STUB_CALL(Equal)
REPLACE_STUB_CALL(ToInteger)
REPLACE_STUB_CALL(ToLength)
REPLACE_STUB_CALL(ToNumber)
+REPLACE_STUB_CALL(ToNumberConvertBigInt)
REPLACE_STUB_CALL(ToNumeric)
REPLACE_STUB_CALL(ToName)
REPLACE_STUB_CALL(ToObject)
@@ -358,14 +359,15 @@ void JSGenericLowering::LowerJSCreateArguments(Node* node) {
void JSGenericLowering::LowerJSCreateArray(Node* node) {
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
- Handle<AllocationSite> const site = p.site();
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), ArrayConstructorDescriptor{}, arity + 1,
CallDescriptor::kNeedsFrameState, node->op()->properties());
Node* stub_code = jsgraph()->ArrayConstructorStubConstant();
Node* stub_arity = jsgraph()->Int32Constant(arity);
- Node* type_info = site.is_null() ? jsgraph()->UndefinedConstant()
- : jsgraph()->HeapConstant(site);
+ MaybeHandle<AllocationSite> const maybe_site = p.site();
+ Handle<AllocationSite> site;
+ Node* type_info = maybe_site.ToHandle(&site) ? jsgraph()->HeapConstant(site)
+ : jsgraph()->UndefinedConstant();
Node* receiver = jsgraph()->UndefinedConstant();
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 3, stub_arity);
@@ -532,6 +534,17 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
}
}
+void JSGenericLowering::LowerJSCloneObject(Node* node) {
+ CloneObjectParameters const& p = CloneObjectParametersOf(node->op());
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kCloneObjectIC);
+ node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.flags()));
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 3, jsgraph()->HeapConstant(p.feedback().vector()));
+ ReplaceWithStubCall(node, callable, flags);
+}
+
void JSGenericLowering::LowerJSCreateEmptyLiteralObject(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 2624387165..949dca377d 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -4,41 +4,528 @@
#include "src/compiler/js-heap-broker.h"
-#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/graph-reducer.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/module-inl.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
namespace compiler {
-MapRef HeapObjectRef::map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(),
- handle(object<HeapObject>()->map(), broker()->isolate()));
+#define FORWARD_DECL(Name) class Name##Data;
+HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
+#undef FORWARD_DECL
+
+// TODO(neis): It would be nice to share the serialized data for read-only
+// objects.
+
+class ObjectData : public ZoneObject {
+ public:
+ static ObjectData* Serialize(JSHeapBroker* broker, Handle<Object> object);
+
+ ObjectData(JSHeapBroker* broker_, Handle<Object> object_, bool is_smi_)
+ : broker(broker_), object(object_), is_smi(is_smi_) {
+ broker->AddData(object, this);
+ }
+
+#define DECLARE_IS_AND_AS(Name) \
+ bool Is##Name() const; \
+ Name##Data* As##Name();
+ HEAP_BROKER_OBJECT_LIST(DECLARE_IS_AND_AS)
+#undef DECLARE_IS_AND_AS
+
+ JSHeapBroker* const broker;
+ Handle<Object> const object;
+ bool const is_smi;
+};
+
+// TODO(neis): Perhaps add a boolean that indicates whether serialization of an
+// object has completed. That could be used to add safety checks.
+
+#define GET_OR_CREATE(name) \
+ broker->GetOrCreateData(handle(object_->name(), broker->isolate()))
+
+class HeapObjectData : public ObjectData {
+ public:
+ static HeapObjectData* Serialize(JSHeapBroker* broker,
+ Handle<HeapObject> object);
+
+ HeapObjectType const type;
+ MapData* const map;
+
+ HeapObjectData(JSHeapBroker* broker_, Handle<HeapObject> object_,
+ HeapObjectType type_)
+ : ObjectData(broker_, object_, false),
+ type(type_),
+ map(GET_OR_CREATE(map)->AsMap()) {
+ CHECK(broker_->SerializingAllowed());
+ }
+};
+
+class PropertyCellData : public HeapObjectData {
+ public:
+ PropertyCellData(JSHeapBroker* broker_, Handle<PropertyCell> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class JSObjectData : public HeapObjectData {
+ public:
+ JSObjectData(JSHeapBroker* broker_, Handle<JSObject> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class JSFunctionData : public JSObjectData {
+ public:
+ JSGlobalProxyData* const global_proxy;
+ MapData* const initial_map; // Can be nullptr.
+ bool const has_prototype;
+ ObjectData* const prototype; // Can be nullptr.
+ bool const PrototypeRequiresRuntimeLookup;
+ SharedFunctionInfoData* const shared;
+
+ JSFunctionData(JSHeapBroker* broker_, Handle<JSFunction> object_,
+ HeapObjectType type_);
+};
+
+class JSRegExpData : public JSObjectData {
+ public:
+ JSRegExpData(JSHeapBroker* broker_, Handle<JSRegExp> object_,
+ HeapObjectType type_)
+ : JSObjectData(broker_, object_, type_) {}
+};
+
+class HeapNumberData : public HeapObjectData {
+ public:
+ HeapNumberData(JSHeapBroker* broker_, Handle<HeapNumber> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class MutableHeapNumberData : public HeapObjectData {
+ public:
+ MutableHeapNumberData(JSHeapBroker* broker_,
+ Handle<MutableHeapNumber> object_, HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class ContextData : public HeapObjectData {
+ public:
+ ContextData(JSHeapBroker* broker_, Handle<Context> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class NativeContextData : public ContextData {
+ public:
+#define DECL_MEMBER(type, name) type##Data* const name;
+ BROKER_NATIVE_CONTEXT_FIELDS(DECL_MEMBER)
+#undef DECL_MEMBER
+
+ NativeContextData(JSHeapBroker* broker_, Handle<NativeContext> object_,
+ HeapObjectType type_)
+ : ContextData(broker_, object_, type_)
+#define INIT_MEMBER(type, name) , name(GET_OR_CREATE(name)->As##type())
+ BROKER_NATIVE_CONTEXT_FIELDS(INIT_MEMBER)
+#undef INIT_MEMBER
+ {
+ }
+};
+
+class NameData : public HeapObjectData {
+ public:
+ NameData(JSHeapBroker* broker, Handle<Name> object, HeapObjectType type)
+ : HeapObjectData(broker, object, type) {}
+};
+
+class StringData : public NameData {
+ public:
+ StringData(JSHeapBroker* broker, Handle<String> object, HeapObjectType type)
+ : NameData(broker, object, type),
+ length(object->length()),
+ first_char(length > 0 ? object->Get(0) : 0) {
+ int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
+ if (length <= kMaxLengthForDoubleConversion) {
+ to_number = StringToDouble(
+ broker->isolate(), broker->isolate()->unicode_cache(), object, flags);
+ }
+ }
+
+ int const length;
+ uint16_t const first_char;
+ base::Optional<double> to_number;
+
+ private:
+ static constexpr int kMaxLengthForDoubleConversion = 23;
+};
+
+class InternalizedStringData : public StringData {
+ public:
+ InternalizedStringData(JSHeapBroker* broker,
+ Handle<InternalizedString> object, HeapObjectType type)
+ : StringData(broker, object, type) {}
+};
+
+namespace {
+
+bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
+ int* max_properties) {
+ DCHECK_GE(max_depth, 0);
+ DCHECK_GE(*max_properties, 0);
+
+ // Make sure the boilerplate map is not deprecated.
+ if (!JSObject::TryMigrateInstance(boilerplate)) return false;
+
+ // Check for too deep nesting.
+ if (max_depth == 0) return false;
+
+ // Check the elements.
+ Isolate* const isolate = boilerplate->GetIsolate();
+ Handle<FixedArrayBase> elements(boilerplate->elements(), isolate);
+ if (elements->length() > 0 &&
+ elements->map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) {
+ if (boilerplate->HasSmiOrObjectElements()) {
+ Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
+ int length = elements->length();
+ for (int i = 0; i < length; i++) {
+ if ((*max_properties)-- == 0) return false;
+ Handle<Object> value(fast_elements->get(i), isolate);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ if (!IsFastLiteralHelper(value_object, max_depth - 1,
+ max_properties)) {
+ return false;
+ }
+ }
+ }
+ } else if (boilerplate->HasDoubleElements()) {
+ if (elements->Size() > kMaxRegularHeapObjectSize) return false;
+ } else {
+ return false;
+ }
+ }
+
+ // TODO(turbofan): Do we want to support out-of-object properties?
+ if (!(boilerplate->HasFastProperties() &&
+ boilerplate->property_array()->length() == 0)) {
+ return false;
+ }
+
+ // Check the in-object properties.
+ Handle<DescriptorArray> descriptors(
+ boilerplate->map()->instance_descriptors(), isolate);
+ int limit = boilerplate->map()->NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.location() != kField) continue;
+ DCHECK_EQ(kData, details.kind());
+ if ((*max_properties)-- == 0) return false;
+ FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
+ if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
+ Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ if (!IsFastLiteralHelper(value_object, max_depth - 1, max_properties)) {
+ return false;
+ }
+ }
+ }
+ return true;
}
-double HeapNumberRef::value() const {
- AllowHandleDereference allow_handle_dereference;
- return object<HeapNumber>()->value();
+// Maximum depth and total number of elements and properties for literal
+// graphs to be considered for fast deep-copying. The limit is chosen to
+// match the maximum number of inobject properties, to ensure that the
+// performance of using object literals is not worse than using constructor
+// functions, see crbug.com/v8/6211 for details.
+const int kMaxFastLiteralDepth = 3;
+const int kMaxFastLiteralProperties = JSObject::kMaxInObjectProperties;
+
+// Determines whether the given array or object literal boilerplate satisfies
+// all limits to be considered for fast deep-copying and computes the total
+// size of all objects that are part of the graph.
+bool IsInlinableFastLiteral(Handle<JSObject> boilerplate) {
+ int max_properties = kMaxFastLiteralProperties;
+ return IsFastLiteralHelper(boilerplate, kMaxFastLiteralDepth,
+ &max_properties);
}
-double MutableHeapNumberRef::value() const {
- AllowHandleDereference allow_handle_dereference;
- return object<MutableHeapNumber>()->value();
+} // namespace
+
+class AllocationSiteData : public HeapObjectData {
+ public:
+ AllocationSiteData(JSHeapBroker* broker, Handle<AllocationSite> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker, object_, type_),
+ PointsToLiteral(object_->PointsToLiteral()),
+ GetPretenureMode(object_->GetPretenureMode()),
+ nested_site(GET_OR_CREATE(nested_site)) {
+ if (PointsToLiteral) {
+ if (IsInlinableFastLiteral(
+ handle(object_->boilerplate(), broker->isolate()))) {
+ boilerplate = GET_OR_CREATE(boilerplate)->AsJSObject();
+ }
+ } else {
+ GetElementsKind = object_->GetElementsKind();
+ CanInlineCall = object_->CanInlineCall();
+ }
+ }
+
+ bool const PointsToLiteral;
+ PretenureFlag const GetPretenureMode;
+ ObjectData* const nested_site;
+ JSObjectData* boilerplate = nullptr;
+
+ // These are only valid if PointsToLiteral is false.
+ ElementsKind GetElementsKind = NO_ELEMENTS;
+ bool CanInlineCall = false;
+};
+
+// Only used in JSNativeContextSpecialization.
+class ScriptContextTableData : public HeapObjectData {
+ public:
+ ScriptContextTableData(JSHeapBroker* broker_,
+ Handle<ScriptContextTable> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class MapData : public HeapObjectData {
+ public:
+ InstanceType const instance_type;
+ int const instance_size;
+ byte const bit_field;
+ byte const bit_field2;
+ uint32_t const bit_field3;
+
+ MapData(JSHeapBroker* broker_, Handle<Map> object_, HeapObjectType type_);
+
+ // Extra information.
+ void SerializeElementsKindGeneralizations();
+ const ZoneVector<MapData*>& elements_kind_generalizations() {
+ return elements_kind_generalizations_;
+ }
+
+ private:
+ ZoneVector<MapData*> elements_kind_generalizations_;
+};
+
+MapData::MapData(JSHeapBroker* broker_, Handle<Map> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_),
+ instance_type(object_->instance_type()),
+ instance_size(object_->instance_size()),
+ bit_field(object_->bit_field()),
+ bit_field2(object_->bit_field2()),
+ bit_field3(object_->bit_field3()),
+ elements_kind_generalizations_(broker->zone()) {}
+
+JSFunctionData::JSFunctionData(JSHeapBroker* broker_,
+ Handle<JSFunction> object_, HeapObjectType type_)
+ : JSObjectData(broker_, object_, type_),
+ global_proxy(GET_OR_CREATE(global_proxy)->AsJSGlobalProxy()),
+ initial_map(object_->has_prototype_slot() && object_->has_initial_map()
+ ? GET_OR_CREATE(initial_map)->AsMap()
+ : nullptr),
+ has_prototype(object_->has_prototype_slot() && object_->has_prototype()),
+ prototype(has_prototype ? GET_OR_CREATE(prototype) : nullptr),
+ PrototypeRequiresRuntimeLookup(object_->PrototypeRequiresRuntimeLookup()),
+ shared(GET_OR_CREATE(shared)->AsSharedFunctionInfo()) {
+ if (initial_map != nullptr && initial_map->instance_type == JS_ARRAY_TYPE) {
+ initial_map->SerializeElementsKindGeneralizations();
+ }
}
-bool ObjectRef::IsSmi() const {
- AllowHandleDereference allow_handle_dereference;
- return object_->IsSmi();
+void MapData::SerializeElementsKindGeneralizations() {
+ broker->Trace("Computing ElementsKind generalizations of %p.\n", *object);
+ DCHECK_EQ(instance_type, JS_ARRAY_TYPE);
+ MapRef self(this);
+ ElementsKind from_kind = self.elements_kind();
+ for (int i = FIRST_FAST_ELEMENTS_KIND; i <= LAST_FAST_ELEMENTS_KIND; i++) {
+ ElementsKind to_kind = static_cast<ElementsKind>(i);
+ if (IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
+ Handle<Map> target =
+ Map::AsElementsKind(broker->isolate(), self.object<Map>(), to_kind);
+ elements_kind_generalizations_.push_back(
+ broker->GetOrCreateData(target)->AsMap());
+ }
+ }
}
-int ObjectRef::AsSmi() const { return object<Smi>()->value(); }
+class FeedbackVectorData : public HeapObjectData {
+ public:
+ const ZoneVector<ObjectData*>& feedback() { return feedback_; }
+
+ FeedbackVectorData(JSHeapBroker* broker_, Handle<FeedbackVector> object_,
+ HeapObjectType type_);
+
+ private:
+ ZoneVector<ObjectData*> feedback_;
+};
+
+FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker_,
+ Handle<FeedbackVector> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_), feedback_(broker_->zone()) {
+ feedback_.reserve(object_->length());
+ for (int i = 0; i < object_->length(); ++i) {
+ MaybeObject* value = object_->get(i);
+ feedback_.push_back(value->IsObject()
+ ? broker->GetOrCreateData(
+ handle(value->ToObject(), broker->isolate()))
+ : nullptr);
+ }
+ DCHECK_EQ(object_->length(), feedback_.size());
+}
+
+class FixedArrayBaseData : public HeapObjectData {
+ public:
+ int const length;
+
+ FixedArrayBaseData(JSHeapBroker* broker_, Handle<FixedArrayBase> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_), length(object_->length()) {}
+};
+
+class FixedArrayData : public FixedArrayBaseData {
+ public:
+ FixedArrayData(JSHeapBroker* broker_, Handle<FixedArray> object_,
+ HeapObjectType type_)
+ : FixedArrayBaseData(broker_, object_, type_) {}
+};
+
+class FixedDoubleArrayData : public FixedArrayBaseData {
+ public:
+ FixedDoubleArrayData(JSHeapBroker* broker_, Handle<FixedDoubleArray> object_,
+ HeapObjectType type_)
+ : FixedArrayBaseData(broker_, object_, type_) {}
+};
+
+class BytecodeArrayData : public FixedArrayBaseData {
+ public:
+ int const register_count;
+
+ BytecodeArrayData(JSHeapBroker* broker_, Handle<BytecodeArray> object_,
+ HeapObjectType type_)
+ : FixedArrayBaseData(broker_, object_, type_),
+ register_count(object_->register_count()) {}
+};
+
+class JSArrayData : public JSObjectData {
+ public:
+ JSArrayData(JSHeapBroker* broker_, Handle<JSArray> object_,
+ HeapObjectType type_)
+ : JSObjectData(broker_, object_, type_) {}
+};
+
+class ScopeInfoData : public HeapObjectData {
+ public:
+ ScopeInfoData(JSHeapBroker* broker_, Handle<ScopeInfo> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class SharedFunctionInfoData : public HeapObjectData {
+ public:
+ int const builtin_id;
+ BytecodeArrayData* const GetBytecodeArray; // Can be nullptr.
+#define DECL_MEMBER(type, name) type const name;
+ BROKER_SFI_FIELDS(DECL_MEMBER)
+#undef DECL_MEMBER
+
+ SharedFunctionInfoData(JSHeapBroker* broker_,
+ Handle<SharedFunctionInfo> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_),
+ builtin_id(object_->HasBuiltinId() ? object_->builtin_id()
+ : Builtins::kNoBuiltinId),
+ GetBytecodeArray(
+ object_->HasBytecodeArray()
+ ? GET_OR_CREATE(GetBytecodeArray)->AsBytecodeArray()
+ : nullptr)
+#define INIT_MEMBER(type, name) , name(object_->name())
+ BROKER_SFI_FIELDS(INIT_MEMBER)
+#undef INIT_MEMBER
+ {
+ DCHECK_EQ(HasBuiltinId, builtin_id != Builtins::kNoBuiltinId);
+ DCHECK_EQ(HasBytecodeArray, GetBytecodeArray != nullptr);
+ }
+};
+
+class ModuleData : public HeapObjectData {
+ public:
+ ModuleData(JSHeapBroker* broker_, Handle<Module> object_,
+ HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class CellData : public HeapObjectData {
+ public:
+ CellData(JSHeapBroker* broker_, Handle<Cell> object_, HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+class JSGlobalProxyData : public JSObjectData {
+ public:
+ JSGlobalProxyData(JSHeapBroker* broker_, Handle<JSGlobalProxy> object_,
+ HeapObjectType type_)
+ : JSObjectData(broker_, object_, type_) {}
+};
+
+class CodeData : public HeapObjectData {
+ public:
+ CodeData(JSHeapBroker* broker_, Handle<Code> object_, HeapObjectType type_)
+ : HeapObjectData(broker_, object_, type_) {}
+};
+
+#define DEFINE_IS_AND_AS(Name) \
+ bool ObjectData::Is##Name() const { \
+ if (broker->mode() == JSHeapBroker::kDisabled) { \
+ AllowHandleDereference allow_handle_dereference; \
+ return object->Is##Name(); \
+ } \
+ if (is_smi) return false; \
+ InstanceType instance_type = \
+ static_cast<const HeapObjectData*>(this)->type.instance_type(); \
+ return InstanceTypeChecker::Is##Name(instance_type); \
+ } \
+ Name##Data* ObjectData::As##Name() { \
+ CHECK_NE(broker->mode(), JSHeapBroker::kDisabled); \
+ CHECK(Is##Name()); \
+ return static_cast<Name##Data*>(this); \
+ }
+HEAP_BROKER_OBJECT_LIST(DEFINE_IS_AND_AS)
+#undef DEFINE_IS_AND_AS
+
+ObjectData* ObjectData::Serialize(JSHeapBroker* broker, Handle<Object> object) {
+ CHECK(broker->SerializingAllowed());
+ return object->IsSmi() ? new (broker->zone()) ObjectData(broker, object, true)
+ : HeapObjectData::Serialize(
+ broker, Handle<HeapObject>::cast(object));
+}
+
+HeapObjectData* HeapObjectData::Serialize(JSHeapBroker* broker,
+ Handle<HeapObject> object) {
+ CHECK(broker->SerializingAllowed());
+ Handle<Map> map(object->map(), broker->isolate());
+ HeapObjectType type = broker->HeapObjectTypeFromMap(map);
+
+#define RETURN_CREATE_DATA_IF_MATCH(name) \
+ if (object->Is##name()) { \
+ return new (broker->zone()) \
+ name##Data(broker, Handle<name>::cast(object), type); \
+ }
+ HEAP_BROKER_OBJECT_LIST(RETURN_CREATE_DATA_IF_MATCH)
+#undef RETURN_CREATE_DATA_IF_MATCH
+ UNREACHABLE();
+}
bool ObjectRef::equals(const ObjectRef& other) const {
- return object<Object>().equals(other.object<Object>());
+ return data_ == other.data_;
}
StringRef ObjectRef::TypeOf() const {
@@ -48,6 +535,8 @@ StringRef ObjectRef::TypeOf() const {
Object::TypeOf(broker()->isolate(), object<Object>()));
}
+Isolate* ObjectRef::isolate() const { return broker()->isolate(); }
+
base::Optional<ContextRef> ContextRef::previous() const {
AllowHandleAllocation handle_allocation;
AllowHandleDereference handle_dereference;
@@ -63,7 +552,84 @@ ObjectRef ContextRef::get(int index) const {
return ObjectRef(broker(), value);
}
-JSHeapBroker::JSHeapBroker(Isolate* isolate) : isolate_(isolate) {}
+JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* zone)
+ : isolate_(isolate),
+ zone_(zone),
+ refs_(zone),
+ mode_(FLAG_concurrent_compiler_frontend ? kSerializing : kDisabled) {
+ Trace("%s", "Constructing heap broker.\n");
+}
+
+void JSHeapBroker::Trace(const char* format, ...) const {
+ if (FLAG_trace_heap_broker) {
+ PrintF("[%p] ", this);
+ va_list arguments;
+ va_start(arguments, format);
+ base::OS::VPrint(format, arguments);
+ va_end(arguments);
+ }
+}
+
+bool JSHeapBroker::SerializingAllowed() const {
+ return mode() == kSerializing ||
+ (!FLAG_strict_heap_broker && mode() == kSerialized);
+}
+
+void JSHeapBroker::SerializeStandardObjects() {
+ Trace("Serializing standard objects.\n");
+
+ Builtins* const b = isolate()->builtins();
+ Factory* const f = isolate()->factory();
+
+ // Stuff used by JSGraph:
+ GetOrCreateData(f->empty_fixed_array());
+
+ // Stuff used by JSCreateLowering:
+ GetOrCreateData(f->block_context_map());
+ GetOrCreateData(f->catch_context_map());
+ GetOrCreateData(f->eval_context_map());
+ GetOrCreateData(f->fixed_array_map());
+ GetOrCreateData(f->fixed_double_array_map());
+ GetOrCreateData(f->function_context_map());
+ GetOrCreateData(f->many_closures_cell_map());
+ GetOrCreateData(f->mutable_heap_number_map());
+ GetOrCreateData(f->name_dictionary_map());
+ GetOrCreateData(f->one_pointer_filler_map());
+ GetOrCreateData(f->sloppy_arguments_elements_map());
+ GetOrCreateData(f->with_context_map());
+
+ // Stuff used by TypedOptimization:
+ // Strings produced by typeof:
+ GetOrCreateData(f->boolean_string());
+ GetOrCreateData(f->number_string());
+ GetOrCreateData(f->string_string());
+ GetOrCreateData(f->bigint_string());
+ GetOrCreateData(f->symbol_string());
+ GetOrCreateData(f->undefined_string());
+ GetOrCreateData(f->object_string());
+ GetOrCreateData(f->function_string());
+
+ // Stuff used by JSTypedLowering:
+ GetOrCreateData(f->length_string());
+ Builtins::Name builtins[] = {
+ Builtins::kArgumentsAdaptorTrampoline,
+ Builtins::kCallFunctionForwardVarargs,
+ Builtins::kStringAdd_CheckNone_NotTenured,
+ Builtins::kStringAdd_CheckNone_Tenured,
+ Builtins::kStringAdd_ConvertLeft_NotTenured,
+ Builtins::kStringAdd_ConvertRight_NotTenured,
+ };
+ for (auto id : builtins) {
+ GetOrCreateData(b->builtin_handle(id));
+ }
+ for (int32_t id = 0; id < Builtins::builtin_count; ++id) {
+ if (Builtins::KindOf(id) == Builtins::TFJ) {
+ GetOrCreateData(b->builtin_handle(id));
+ }
+ }
+
+ Trace("Finished serializing standard objects.\n");
+}
HeapObjectType JSHeapBroker::HeapObjectTypeFromMap(Map* map) const {
AllowHandleDereference allow_handle_dereference;
@@ -95,28 +661,58 @@ HeapObjectType JSHeapBroker::HeapObjectTypeFromMap(Map* map) const {
return HeapObjectType(map->instance_type(), flags, oddball_type);
}
-// static
-base::Optional<int> JSHeapBroker::TryGetSmi(Handle<Object> object) {
- AllowHandleDereference allow_handle_dereference;
- if (!object->IsSmi()) return base::Optional<int>();
- return Smi::cast(*object)->value();
+ObjectData* JSHeapBroker::GetData(Handle<Object> object) const {
+ auto it = refs_.find(object.address());
+ return it != refs_.end() ? it->second : nullptr;
+}
+
+ObjectData* JSHeapBroker::GetOrCreateData(Handle<Object> object) {
+ CHECK(SerializingAllowed());
+ ObjectData* data = GetData(object);
+ if (data == nullptr) {
+ // TODO(neis): Remove these Allow* once we serialize everything upfront.
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ data = ObjectData::Serialize(this, object);
+ }
+ CHECK_NOT_NULL(data);
+ return data;
}
-#define DEFINE_IS_AND_AS(Name) \
- bool ObjectRef::Is##Name() const { \
- AllowHandleDereference allow_handle_dereference; \
- return object<Object>()->Is##Name(); \
- } \
- Name##Ref ObjectRef::As##Name() const { \
- DCHECK(Is##Name()); \
- return Name##Ref(broker(), object<HeapObject>()); \
+void JSHeapBroker::AddData(Handle<Object> object, ObjectData* data) {
+ Trace("Creating data %p for handle %" V8PRIuPTR " (", data, object.address());
+ if (FLAG_trace_heap_broker) {
+ object->ShortPrint();
+ PrintF(")\n");
+ }
+ CHECK_NOT_NULL(isolate()->handle_scope_data()->canonical_scope);
+ CHECK(refs_.insert({object.address(), data}).second);
+}
+
+#define DEFINE_IS_AND_AS(Name) \
+ bool ObjectRef::Is##Name() const { return data()->Is##Name(); } \
+ Name##Ref ObjectRef::As##Name() const { \
+ DCHECK(Is##Name()); \
+ return Name##Ref(data()); \
}
HEAP_BROKER_OBJECT_LIST(DEFINE_IS_AND_AS)
#undef DEFINE_IS_AND_AS
+bool ObjectRef::IsSmi() const { return data()->is_smi; }
+
+int ObjectRef::AsSmi() const {
+ DCHECK(IsSmi());
+ // Handle-dereference is always allowed for Handle<Smi>.
+ return object<Smi>()->value();
+}
+
HeapObjectType HeapObjectRef::type() const {
- AllowHandleDereference allow_handle_dereference;
- return broker()->HeapObjectTypeFromMap(object<HeapObject>()->map());
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return broker()->HeapObjectTypeFromMap(object<HeapObject>()->map());
+ } else {
+ return data()->AsHeapObject()->type;
+ }
}
base::Optional<MapRef> HeapObjectRef::TryGetObjectCreateMap() const {
@@ -131,75 +727,31 @@ base::Optional<MapRef> HeapObjectRef::TryGetObjectCreateMap() const {
}
}
-bool HeapObjectRef::IsSeqString() const {
- AllowHandleDereference allow_handle_dereference;
- return object<HeapObject>()->IsSeqString();
-}
-
-bool HeapObjectRef::IsExternalString() const {
- AllowHandleDereference allow_handle_dereference;
- return object<HeapObject>()->IsExternalString();
-}
-
-bool JSFunctionRef::HasBuiltinFunctionId() const {
- AllowHandleDereference allow_handle_dereference;
- return object<JSFunction>()->shared()->HasBuiltinFunctionId();
-}
-
-BuiltinFunctionId JSFunctionRef::GetBuiltinFunctionId() const {
- AllowHandleDereference allow_handle_dereference;
- return object<JSFunction>()->shared()->builtin_function_id();
-}
-
-bool JSFunctionRef::IsConstructor() const {
- AllowHandleDereference allow_handle_dereference;
- return object<JSFunction>()->IsConstructor();
-}
-
-void JSFunctionRef::EnsureHasInitialMap() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- AllowHeapAllocation heap_allocation;
- // TODO(jarin) Eventually, we will prepare initial maps for resumable
- // functions (i.e., generators).
- DCHECK(IsResumableFunction(object<JSFunction>()->shared()->kind()));
- JSFunction::EnsureHasInitialMap(object<JSFunction>());
-}
-
-SlackTrackingResult JSFunctionRef::FinishSlackTracking() const {
- AllowHandleDereference allow_handle_dereference;
- AllowHandleAllocation handle_allocation;
- object<JSFunction>()->CompleteInobjectSlackTrackingIfActive();
- int instance_size = object<JSFunction>()->initial_map()->instance_size();
- int inobject_property_count =
- object<JSFunction>()->initial_map()->GetInObjectProperties();
- return SlackTrackingResult(instance_size, inobject_property_count);
-}
-
-bool JSFunctionRef::has_initial_map() const {
- AllowHandleDereference allow_handle_dereference;
- return object<JSFunction>()->has_initial_map();
+base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHeapAllocation heap_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return MapRef(broker(), Map::AsElementsKind(broker()->isolate(),
+ object<Map>(), kind));
+ } else {
+ if (kind == elements_kind()) return *this;
+ const ZoneVector<MapData*>& elements_kind_generalizations =
+ data()->AsMap()->elements_kind_generalizations();
+ for (auto data : elements_kind_generalizations) {
+ MapRef map(data);
+ if (map.elements_kind() == kind) return map;
+ }
+ return base::Optional<MapRef>();
+ }
}
-MapRef JSFunctionRef::initial_map() const {
- AllowHandleAllocation handle_allocation;
+int JSFunctionRef::InitialMapInstanceSizeWithMinSlack() const {
AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(), handle(object<JSFunction>()->initial_map(),
- broker()->isolate()));
-}
-
-SharedFunctionInfoRef JSFunctionRef::shared() const {
AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return SharedFunctionInfoRef(
- broker(), handle(object<JSFunction>()->shared(), broker()->isolate()));
-}
-JSGlobalProxyRef JSFunctionRef::global_proxy() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return JSGlobalProxyRef(broker(), handle(object<JSFunction>()->global_proxy(),
- broker()->isolate()));
+ return object<JSFunction>()->ComputeInstanceSizeWithMinSlack(
+ broker()->isolate());
}
base::Optional<ScriptContextTableRef::LookupResult>
@@ -221,50 +773,20 @@ ScriptContextTableRef::lookup(const NameRef& name) const {
return result;
}
-ScriptContextTableRef NativeContextRef::script_context_table() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference handle_dereference;
- return ScriptContextTableRef(
- broker(),
- handle(object<Context>()->script_context_table(), broker()->isolate()));
-}
-
OddballType ObjectRef::oddball_type() const {
return IsSmi() ? OddballType::kNone : AsHeapObject().type().oddball_type();
}
ObjectRef FeedbackVectorRef::get(FeedbackSlot slot) const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference handle_dereference;
- Handle<Object> value(object<FeedbackVector>()->Get(slot)->ToObject(),
- broker()->isolate());
- return ObjectRef(broker(), value);
-}
-
-JSObjectRef AllocationSiteRef::boilerplate() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference handle_dereference;
- Handle<JSObject> value(object<AllocationSite>()->boilerplate(),
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ Handle<Object> value(object<FeedbackVector>()->Get(slot)->ToObject(),
broker()->isolate());
- return JSObjectRef(broker(), value);
-}
-
-ObjectRef AllocationSiteRef::nested_site() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference handle_dereference;
- Handle<Object> obj(object<AllocationSite>()->nested_site(),
- broker()->isolate());
- return ObjectRef(broker(), obj);
-}
-
-bool AllocationSiteRef::PointsToLiteral() const {
- AllowHandleDereference handle_dereference;
- return object<AllocationSite>()->PointsToLiteral();
-}
-
-ElementsKind AllocationSiteRef::GetElementsKind() const {
- AllowHandleDereference handle_dereference;
- return object<AllocationSite>()->GetElementsKind();
+ return ObjectRef(broker(), value);
+ }
+ int i = FeedbackVector::GetIndex(slot);
+ return ObjectRef(data()->AsFeedbackVector()->feedback().at(i));
}
bool JSObjectRef::IsUnboxedDoubleField(FieldIndex index) const {
@@ -285,114 +807,18 @@ ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const {
broker()->isolate()));
}
-ElementsKind JSObjectRef::GetElementsKind() {
- AllowHandleDereference handle_dereference;
- return object<JSObject>()->GetElementsKind();
-}
-
-FixedArrayBaseRef JSObjectRef::elements() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference handle_dereference;
- return FixedArrayBaseRef(
- broker(), handle(object<JSObject>()->elements(), broker()->isolate()));
-}
-
-namespace {
-
-// Determines whether the given array or object literal boilerplate satisfies
-// all limits to be considered for fast deep-copying and computes the total
-// size of all objects that are part of the graph.
-bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
- int* max_properties) {
- DCHECK_GE(max_depth, 0);
- DCHECK_GE(*max_properties, 0);
-
- // Make sure the boilerplate map is not deprecated.
- if (!JSObject::TryMigrateInstance(boilerplate)) return false;
-
- // Check for too deep nesting.
- if (max_depth == 0) return false;
-
- // Check the elements.
- Isolate* const isolate = boilerplate->GetIsolate();
- Handle<FixedArrayBase> elements(boilerplate->elements(), isolate);
- if (elements->length() > 0 &&
- elements->map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) {
- if (boilerplate->HasSmiOrObjectElements()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- int length = elements->length();
- for (int i = 0; i < length; i++) {
- if ((*max_properties)-- == 0) return false;
- Handle<Object> value(fast_elements->get(i), isolate);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- if (!IsFastLiteralHelper(value_object, max_depth - 1,
- max_properties)) {
- return false;
- }
- }
- }
- } else if (boilerplate->HasDoubleElements()) {
- if (elements->Size() > kMaxRegularHeapObjectSize) return false;
- } else {
- return false;
- }
- }
-
- // TODO(turbofan): Do we want to support out-of-object properties?
- if (!(boilerplate->HasFastProperties() &&
- boilerplate->property_array()->length() == 0)) {
- return false;
- }
-
- // Check the in-object properties.
- Handle<DescriptorArray> descriptors(
- boilerplate->map()->instance_descriptors(), isolate);
- int limit = boilerplate->map()->NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.location() != kField) continue;
- DCHECK_EQ(kData, details.kind());
- if ((*max_properties)-- == 0) return false;
- FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
- if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
- Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- if (!IsFastLiteralHelper(value_object, max_depth - 1, max_properties)) {
- return false;
- }
- }
- }
- return true;
-}
-
-} // namespace
-
-// Maximum depth and total number of elements and properties for literal
-// graphs to be considered for fast deep-copying. The limit is chosen to
-// match the maximum number of inobject properties, to ensure that the
-// performance of using object literals is not worse than using constructor
-// functions, see crbug.com/v8/6211 for details.
-const int kMaxFastLiteralDepth = 3;
-const int kMaxFastLiteralProperties = JSObject::kMaxInObjectProperties;
-// Determines whether the given array or object literal boilerplate satisfies
-// all limits to be considered for fast deep-copying and computes the total
-// size of all objects that are part of the graph.
bool AllocationSiteRef::IsFastLiteral() const {
- AllowHandleAllocation allow_handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- int max_properties = kMaxFastLiteralProperties;
- Handle<JSObject> boilerplate(object<AllocationSite>()->boilerplate(),
- broker()->isolate());
- return IsFastLiteralHelper(boilerplate, kMaxFastLiteralDepth,
- &max_properties);
-}
-
-PretenureFlag AllocationSiteRef::GetPretenureMode() const {
- AllowHandleDereference allow_handle_dereference;
- return object<AllocationSite>()->GetPretenureMode();
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHeapAllocation
+ allow_heap_allocation; // This is needed for TryMigrateInstance.
+ AllowHandleAllocation allow_handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return IsInlinableFastLiteral(
+ handle(object<AllocationSite>()->boilerplate(), broker()->isolate()));
+ } else {
+ return data()->AsAllocationSite()->boilerplate != nullptr;
+ }
}
void JSObjectRef::EnsureElementsTenured() {
@@ -400,43 +826,20 @@ void JSObjectRef::EnsureElementsTenured() {
// the compilation job starts.
AllowHandleAllocation allow_handle_allocation;
AllowHandleDereference allow_handle_dereference;
+ AllowHeapAllocation allow_heap_allocation;
+
Handle<FixedArrayBase> object_elements = elements().object<FixedArrayBase>();
if (Heap::InNewSpace(*object_elements)) {
// If we would like to pretenure a fixed cow array, we must ensure that
// the array is already in old space, otherwise we'll create too many
// old-to-new-space pointers (overflowing the store buffer).
- object_elements = Handle<FixedArrayBase>(
+ object_elements =
broker()->isolate()->factory()->CopyAndTenureFixedCOWArray(
- Handle<FixedArray>::cast(object_elements)));
+ Handle<FixedArray>::cast(object_elements));
object<JSObject>()->set_elements(*object_elements);
}
}
-ElementsKind MapRef::elements_kind() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->elements_kind();
-}
-
-bool MapRef::is_deprecated() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->is_deprecated();
-}
-
-bool MapRef::CanBeDeprecated() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->CanBeDeprecated();
-}
-
-int MapRef::GetInObjectProperties() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->GetInObjectProperties();
-}
-
-int MapRef::NumberOfOwnDescriptors() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->NumberOfOwnDescriptors();
-}
-
FieldIndex MapRef::GetFieldIndexFor(int i) const {
AllowHandleDereference allow_handle_dereference;
return FieldIndex::ForDescriptor(*object<Map>(), i);
@@ -447,28 +850,6 @@ int MapRef::GetInObjectPropertyOffset(int i) const {
return object<Map>()->GetInObjectPropertyOffset(i);
}
-bool MapRef::is_dictionary_map() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->is_dictionary_map();
-}
-
-ObjectRef MapRef::constructor_or_backpointer() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return ObjectRef(broker(), handle(object<Map>()->constructor_or_backpointer(),
- broker()->isolate()));
-}
-
-int MapRef::instance_size() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->instance_size();
-}
-
-InstanceType MapRef::instance_type() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->instance_type();
-}
-
PropertyDetails MapRef::GetPropertyDetails(int i) const {
AllowHandleDereference allow_handle_dereference;
return object<Map>()->instance_descriptors()->GetDetails(i);
@@ -482,37 +863,12 @@ NameRef MapRef::GetPropertyKey(int i) const {
broker()->isolate()));
}
-bool MapRef::IsJSArrayMap() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->IsJSArrayMap();
-}
-
-bool MapRef::IsInobjectSlackTrackingInProgress() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->IsInobjectSlackTrackingInProgress();
-}
-
bool MapRef::IsFixedCowArrayMap() const {
AllowHandleDereference allow_handle_dereference;
return *object<Map>() ==
ReadOnlyRoots(broker()->isolate()).fixed_cow_array_map();
}
-bool MapRef::has_prototype_slot() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->has_prototype_slot();
-}
-
-bool MapRef::is_stable() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->is_stable();
-}
-
-bool MapRef::CanTransition() const {
- AllowHandleDereference allow_handle_dereference;
- return object<Map>()->CanTransition();
-}
-
MapRef MapRef::FindFieldOwner(int descriptor) const {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
@@ -522,86 +878,36 @@ MapRef MapRef::FindFieldOwner(int descriptor) const {
return MapRef(broker(), owner);
}
-FieldTypeRef MapRef::GetFieldType(int descriptor) const {
+ObjectRef MapRef::GetFieldType(int descriptor) const {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
Handle<FieldType> field_type(
object<Map>()->instance_descriptors()->GetFieldType(descriptor),
broker()->isolate());
- return FieldTypeRef(broker(), field_type);
-}
-
-ElementsKind JSArrayRef::GetElementsKind() const {
- AllowHandleDereference allow_handle_dereference;
- return object<JSArray>()->GetElementsKind();
-}
-
-ObjectRef JSArrayRef::length() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return ObjectRef(broker(),
- handle(object<JSArray>()->length(), broker()->isolate()));
-}
-
-int StringRef::length() const {
- AllowHandleDereference allow_handle_dereference;
- return object<String>()->length();
+ return ObjectRef(broker(), field_type);
}
uint16_t StringRef::GetFirstChar() {
- AllowHandleDereference allow_handle_dereference;
- return object<String>()->Get(0);
-}
-
-double StringRef::ToNumber() {
- AllowHandleDereference allow_handle_dereference;
- AllowHandleAllocation allow_handle_allocation;
- AllowHeapAllocation allow_heap_allocation;
- int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
- return StringToDouble(broker()->isolate(),
- broker()->isolate()->unicode_cache(), object<String>(),
- flags);
-}
-
-ObjectRef JSRegExpRef::raw_properties_or_hash() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return ObjectRef(broker(),
- handle(object<JSRegExp>()->raw_properties_or_hash(),
- broker()->isolate()));
-}
-
-ObjectRef JSRegExpRef::data() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return ObjectRef(broker(),
- handle(object<JSRegExp>()->data(), broker()->isolate()));
-}
-
-ObjectRef JSRegExpRef::source() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return ObjectRef(broker(),
- handle(object<JSRegExp>()->source(), broker()->isolate()));
-}
-
-ObjectRef JSRegExpRef::flags() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return ObjectRef(broker(),
- handle(object<JSRegExp>()->flags(), broker()->isolate()));
-}
-
-ObjectRef JSRegExpRef::last_index() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return ObjectRef(
- broker(), handle(object<JSRegExp>()->last_index(), broker()->isolate()));
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return object<String>()->Get(0);
+ } else {
+ return data()->AsString()->first_char;
+ }
}
-int FixedArrayBaseRef::length() const {
- AllowHandleDereference allow_handle_dereference;
- return object<FixedArrayBase>()->length();
+base::Optional<double> StringRef::ToNumber() {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+ AllowHeapAllocation allow_heap_allocation;
+ int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
+ return StringToDouble(broker()->isolate(),
+ broker()->isolate()->unicode_cache(),
+ object<String>(), flags);
+ } else {
+ return data()->AsString()->to_number;
+ }
}
bool FixedArrayRef::is_the_hole(int i) const {
@@ -626,166 +932,139 @@ double FixedDoubleArrayRef::get_scalar(int i) const {
return object<FixedDoubleArray>()->get_scalar(i);
}
-int ScopeInfoRef::ContextLength() const {
- AllowHandleDereference allow_handle_dereference;
- return object<ScopeInfo>()->ContextLength();
-}
+#define IF_BROKER_DISABLED_ACCESS_HANDLE_C(holder, name) \
+ if (broker()->mode() == JSHeapBroker::kDisabled) { \
+ AllowHandleAllocation handle_allocation; \
+ AllowHandleDereference allow_handle_dereference; \
+ return object<holder>()->name(); \
+ }
-int SharedFunctionInfoRef::internal_formal_parameter_count() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->internal_formal_parameter_count();
-}
+// Macros for definining a const getter that, depending on the broker mode,
+// either looks into the handle or into the serialized data. The first one is
+// used for the rare case of a XYZRef class that does not have a corresponding
+// XYZ class in objects.h. The second one is used otherwise.
+#define BIMODAL_ACCESSOR(holder, result, name) \
+ result##Ref holder##Ref::name() const { \
+ if (broker()->mode() == JSHeapBroker::kDisabled) { \
+ AllowHandleAllocation handle_allocation; \
+ AllowHandleDereference allow_handle_dereference; \
+ return result##Ref( \
+ broker(), handle(object<holder>()->name(), broker()->isolate())); \
+ } else { \
+ return result##Ref(data()->As##holder()->name); \
+ } \
+ }
-int SharedFunctionInfoRef::function_map_index() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->function_map_index();
-}
+// Like HANDLE_ACCESSOR except that the result type is not an XYZRef.
+#define BIMODAL_ACCESSOR_C(holder, result, name) \
+ result holder##Ref::name() const { \
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(holder, name); \
+ return data()->As##holder()->name; \
+ }
-bool SharedFunctionInfoRef::has_duplicate_parameters() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->has_duplicate_parameters();
-}
+// Like HANDLE_ACCESSOR_C but for BitFields.
+#define BIMODAL_ACCESSOR_B(holder, field, name, BitField) \
+ typename BitField::FieldType holder##Ref::name() const { \
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(holder, name); \
+ return BitField::decode(data()->As##holder()->field); \
+ }
-FunctionKind SharedFunctionInfoRef::kind() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->kind();
-}
+// Macros for definining a const getter that always looks into the handle.
+// (These will go away once we serialize everything.) The first one is used for
+// the rare case of a XYZRef class that does not have a corresponding XYZ class
+// in objects.h. The second one is used otherwise.
+#define HANDLE_ACCESSOR(holder, result, name) \
+ result##Ref holder##Ref::name() const { \
+ AllowHandleAllocation handle_allocation; \
+ AllowHandleDereference allow_handle_dereference; \
+ return result##Ref(broker(), \
+ handle(object<holder>()->name(), broker()->isolate())); \
+ }
-LanguageMode SharedFunctionInfoRef::language_mode() {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->language_mode();
-}
+// Like HANDLE_ACCESSOR except that the result type is not an XYZRef.
+#define HANDLE_ACCESSOR_C(holder, result, name) \
+ result holder##Ref::name() const { \
+ AllowHandleAllocation handle_allocation; \
+ AllowHandleDereference allow_handle_dereference; \
+ return object<holder>()->name(); \
+ }
-bool SharedFunctionInfoRef::native() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->native();
-}
+BIMODAL_ACCESSOR(AllocationSite, Object, nested_site)
+BIMODAL_ACCESSOR_C(AllocationSite, bool, CanInlineCall)
+BIMODAL_ACCESSOR_C(AllocationSite, bool, PointsToLiteral)
+BIMODAL_ACCESSOR_C(AllocationSite, ElementsKind, GetElementsKind)
+BIMODAL_ACCESSOR_C(AllocationSite, PretenureFlag, GetPretenureMode)
-bool SharedFunctionInfoRef::HasBreakInfo() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->HasBreakInfo();
-}
+BIMODAL_ACCESSOR_C(BytecodeArray, int, register_count)
-bool SharedFunctionInfoRef::HasBuiltinId() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->HasBuiltinId();
-}
+BIMODAL_ACCESSOR_C(FixedArrayBase, int, length)
-int SharedFunctionInfoRef::builtin_id() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->builtin_id();
-}
+BIMODAL_ACCESSOR(HeapObject, Map, map)
+HANDLE_ACCESSOR_C(HeapObject, bool, IsExternalString)
+HANDLE_ACCESSOR_C(HeapObject, bool, IsSeqString)
-bool SharedFunctionInfoRef::construct_as_builtin() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->construct_as_builtin();
-}
+HANDLE_ACCESSOR_C(HeapNumber, double, value)
-bool SharedFunctionInfoRef::HasBytecodeArray() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->HasBytecodeArray();
-}
+HANDLE_ACCESSOR(JSArray, Object, length)
-int SharedFunctionInfoRef::GetBytecodeArrayRegisterCount() const {
- AllowHandleDereference allow_handle_dereference;
- return object<SharedFunctionInfo>()->GetBytecodeArray()->register_count();
-}
+BIMODAL_ACCESSOR_C(JSFunction, bool, has_prototype)
+BIMODAL_ACCESSOR_C(JSFunction, bool, PrototypeRequiresRuntimeLookup)
+BIMODAL_ACCESSOR(JSFunction, Map, initial_map)
+BIMODAL_ACCESSOR(JSFunction, Object, prototype)
+HANDLE_ACCESSOR_C(JSFunction, bool, IsConstructor)
+HANDLE_ACCESSOR(JSFunction, JSGlobalProxy, global_proxy)
+HANDLE_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
-MapRef NativeContextRef::fast_aliased_arguments_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(),
- handle(object<Context>()->fast_aliased_arguments_map(),
- broker()->isolate()));
-}
+HANDLE_ACCESSOR(JSObject, FixedArrayBase, elements)
-MapRef NativeContextRef::sloppy_arguments_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(), handle(object<Context>()->sloppy_arguments_map(),
- broker()->isolate()));
-}
+HANDLE_ACCESSOR(JSRegExp, Object, data)
+HANDLE_ACCESSOR(JSRegExp, Object, flags)
+HANDLE_ACCESSOR(JSRegExp, Object, last_index)
+HANDLE_ACCESSOR(JSRegExp, Object, raw_properties_or_hash)
+HANDLE_ACCESSOR(JSRegExp, Object, source)
-MapRef NativeContextRef::strict_arguments_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(), handle(object<Context>()->strict_arguments_map(),
- broker()->isolate()));
-}
+BIMODAL_ACCESSOR_B(Map, bit_field2, elements_kind, Map::ElementsKindBits)
+BIMODAL_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit)
+BIMODAL_ACCESSOR_B(Map, bit_field3, is_dictionary_map, Map::IsDictionaryMapBit)
+BIMODAL_ACCESSOR_B(Map, bit_field, has_prototype_slot, Map::HasPrototypeSlotBit)
+BIMODAL_ACCESSOR_C(Map, int, instance_size)
+HANDLE_ACCESSOR_C(Map, bool, CanBeDeprecated)
+HANDLE_ACCESSOR_C(Map, bool, CanTransition)
+HANDLE_ACCESSOR_C(Map, bool, IsInobjectSlackTrackingInProgress)
+HANDLE_ACCESSOR_C(Map, bool, IsJSArrayMap)
+HANDLE_ACCESSOR_C(Map, bool, is_stable)
+HANDLE_ACCESSOR_C(Map, InstanceType, instance_type)
+HANDLE_ACCESSOR_C(Map, int, GetInObjectProperties)
+HANDLE_ACCESSOR_C(Map, int, GetInObjectPropertiesStartInWords)
+HANDLE_ACCESSOR_C(Map, int, NumberOfOwnDescriptors)
+HANDLE_ACCESSOR(Map, Object, constructor_or_backpointer)
-MapRef NativeContextRef::js_array_fast_elements_map_index() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(),
- handle(object<Context>()->js_array_fast_elements_map_index(),
- broker()->isolate()));
-}
+HANDLE_ACCESSOR_C(MutableHeapNumber, double, value)
-MapRef NativeContextRef::initial_array_iterator_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(),
- handle(object<Context>()->initial_array_iterator_map(),
- broker()->isolate()));
-}
+#define DEF_NATIVE_CONTEXT_ACCESSOR(type, name) \
+ BIMODAL_ACCESSOR(NativeContext, type, name)
+BROKER_NATIVE_CONTEXT_FIELDS(DEF_NATIVE_CONTEXT_ACCESSOR)
+#undef DEF_NATIVE_CONTEXT_ACCESSOR
-MapRef NativeContextRef::set_value_iterator_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(), handle(object<Context>()->set_value_iterator_map(),
- broker()->isolate()));
-}
+HANDLE_ACCESSOR(PropertyCell, Object, value)
+HANDLE_ACCESSOR_C(PropertyCell, PropertyDetails, property_details)
-MapRef NativeContextRef::set_key_value_iterator_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(),
- handle(object<Context>()->set_key_value_iterator_map(),
- broker()->isolate()));
-}
+HANDLE_ACCESSOR_C(ScopeInfo, int, ContextLength)
-MapRef NativeContextRef::map_key_iterator_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(), handle(object<Context>()->map_key_iterator_map(),
- broker()->isolate()));
-}
+BIMODAL_ACCESSOR_C(SharedFunctionInfo, int, builtin_id)
+BIMODAL_ACCESSOR(SharedFunctionInfo, BytecodeArray, GetBytecodeArray)
+#define DEF_SFI_ACCESSOR(type, name) \
+ BIMODAL_ACCESSOR_C(SharedFunctionInfo, type, name)
+BROKER_SFI_FIELDS(DEF_SFI_ACCESSOR)
+#undef DEF_SFI_ACCESSOR
-MapRef NativeContextRef::map_value_iterator_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(), handle(object<Context>()->map_value_iterator_map(),
- broker()->isolate()));
-}
+BIMODAL_ACCESSOR_C(String, int, length)
-MapRef NativeContextRef::map_key_value_iterator_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(),
- handle(object<Context>()->map_key_value_iterator_map(),
- broker()->isolate()));
-}
+// TODO(neis): Provide StringShape() on StringRef.
-MapRef NativeContextRef::iterator_result_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(), handle(object<Context>()->iterator_result_map(),
- broker()->isolate()));
-}
-
-MapRef NativeContextRef::string_iterator_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(), handle(object<Context>()->string_iterator_map(),
- broker()->isolate()));
-}
-
-MapRef NativeContextRef::promise_function_initial_map() const {
- AllowHandleAllocation handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return MapRef(broker(),
- handle(object<Context>()->promise_function()->initial_map(),
- broker()->isolate()));
+bool JSFunctionRef::has_initial_map() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(JSFunction, has_initial_map);
+ return data()->AsJSFunction()->initial_map != nullptr;
}
MapRef NativeContextRef::GetFunctionMapFromIndex(int index) const {
@@ -794,6 +1073,25 @@ MapRef NativeContextRef::GetFunctionMapFromIndex(int index) const {
return get(index).AsMap();
}
+MapRef NativeContextRef::GetInitialJSArrayMap(ElementsKind kind) const {
+ switch (kind) {
+ case PACKED_SMI_ELEMENTS:
+ return js_array_packed_smi_elements_map();
+ case HOLEY_SMI_ELEMENTS:
+ return js_array_holey_smi_elements_map();
+ case PACKED_DOUBLE_ELEMENTS:
+ return js_array_packed_double_elements_map();
+ case HOLEY_DOUBLE_ELEMENTS:
+ return js_array_holey_double_elements_map();
+ case PACKED_ELEMENTS:
+ return js_array_packed_elements_map();
+ case HOLEY_ELEMENTS:
+ return js_array_holey_elements_map();
+ default:
+ UNREACHABLE();
+ }
+}
+
bool ObjectRef::BooleanValue() {
AllowHandleDereference allow_handle_dereference;
return object<Object>()->BooleanValue(broker()->isolate());
@@ -831,18 +1129,70 @@ CellRef ModuleRef::GetCell(int cell_index) {
broker()->isolate()));
}
-ObjectRef PropertyCellRef::value() const {
- AllowHandleAllocation allow_handle_allocation;
- AllowHandleDereference allow_handle_dereference;
- return ObjectRef(
- broker(), handle(object<PropertyCell>()->value(), broker()->isolate()));
+ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object) {
+ switch (broker->mode()) {
+ case JSHeapBroker::kSerialized:
+ data_ = FLAG_strict_heap_broker ? broker->GetData(object)
+ : broker->GetOrCreateData(object);
+ break;
+ case JSHeapBroker::kSerializing:
+ data_ = broker->GetOrCreateData(object);
+ break;
+ case JSHeapBroker::kDisabled:
+ data_ = broker->GetData(object);
+ if (data_ == nullptr) {
+ AllowHandleDereference handle_dereference;
+ data_ =
+ new (broker->zone()) ObjectData(broker, object, object->IsSmi());
+ }
+ break;
+ }
+ CHECK_NOT_NULL(data_);
}
-PropertyDetails PropertyCellRef::property_details() const {
- AllowHandleDereference allow_handle_dereference;
- return object<PropertyCell>()->property_details();
+base::Optional<JSObjectRef> AllocationSiteRef::boilerplate() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return JSObjectRef(broker(), handle(object<AllocationSite>()->boilerplate(),
+ broker()->isolate()));
+ } else {
+ JSObjectData* boilerplate = data()->AsAllocationSite()->boilerplate;
+ if (boilerplate) {
+ return JSObjectRef(boilerplate);
+ } else {
+ return base::nullopt;
+ }
+ }
+}
+
+ElementsKind JSObjectRef::GetElementsKind() const {
+ return map().elements_kind();
}
+Handle<Object> ObjectRef::object() const { return data_->object; }
+
+JSHeapBroker* ObjectRef::broker() const { return data_->broker; }
+
+ObjectData* ObjectRef::data() const { return data_; }
+
+Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
+ const char* function, int line) {
+ if (FLAG_trace_heap_broker) {
+ PrintF("[%p] Skipping optimization in %s at line %d due to missing data\n",
+ broker, function, line);
+ }
+ return AdvancedReducer::NoChange();
+}
+
+#undef BIMODAL_ACCESSOR
+#undef BIMODAL_ACCESSOR_B
+#undef BIMODAL_ACCESSOR_C
+#undef GET_OR_CREATE
+#undef HANDLE_ACCESSOR
+#undef HANDLE_ACCESSOR_C
+#undef IF_BROKER_DISABLED_ACCESS_HANDLE_C
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 8503e82d12..7ea12ee733 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -9,18 +9,12 @@
#include "src/base/optional.h"
#include "src/globals.h"
#include "src/objects.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
-class DisallowHeapAccess {
- DisallowHeapAllocation no_heap_allocation_;
- DisallowHandleAllocation no_handle_allocation_;
- DisallowHandleDereference no_handle_dereference_;
- DisallowCodeDependencyChange no_dependency_change_;
-};
-
enum class OddballType : uint8_t {
kNone, // Not an Oddball.
kBoolean, // True or False.
@@ -31,6 +25,7 @@ enum class OddballType : uint8_t {
kOther // Oddball, but none of the above.
};
+// TODO(neis): Get rid of the HeapObjectType class.
class HeapObjectType {
public:
enum Flag : uint8_t { kUndetectable = 1 << 0, kCallable = 1 << 1 };
@@ -59,49 +54,63 @@ class HeapObjectType {
Flags const flags_;
};
+// This list is sorted such that subtypes appear before their supertypes.
+// DO NOT VIOLATE THIS PROPERTY!
#define HEAP_BROKER_OBJECT_LIST(V) \
+ /* Subtypes of JSObject */ \
+ V(JSArray) \
+ V(JSFunction) \
+ V(JSGlobalProxy) \
+ V(JSRegExp) \
+ /* Subtypes of Context */ \
+ V(NativeContext) \
+ /* Subtypes of FixedArrayBase */ \
+ V(BytecodeArray) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ /* Subtypes of Name */ \
+ V(InternalizedString) \
+ V(String) \
+ /* Subtypes of HeapObject */ \
V(AllocationSite) \
V(Cell) \
V(Code) \
- V(Context) \
V(FeedbackVector) \
- V(FixedArray) \
+ V(Map) \
+ V(Module) \
+ V(ScopeInfo) \
+ V(ScriptContextTable) \
+ V(SharedFunctionInfo) \
+ V(Context) \
V(FixedArrayBase) \
- V(FixedDoubleArray) \
V(HeapNumber) \
- V(HeapObject) \
- V(InternalizedString) \
- V(JSArray) \
- V(JSFunction) \
- V(JSGlobalProxy) \
V(JSObject) \
- V(JSRegExp) \
- V(Map) \
- V(Module) \
V(MutableHeapNumber) \
V(Name) \
- V(NativeContext) \
V(PropertyCell) \
- V(ScopeInfo) \
- V(ScriptContextTable) \
- V(SharedFunctionInfo) \
- V(String)
+ /* Subtypes of Object */ \
+ V(HeapObject)
class CompilationDependencies;
class JSHeapBroker;
+class ObjectData;
#define FORWARD_DECL(Name) class Name##Ref;
HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
#undef FORWARD_DECL
class ObjectRef {
public:
- explicit ObjectRef(const JSHeapBroker* broker, Handle<Object> object)
- : broker_(broker), object_(object) {}
+ ObjectRef(JSHeapBroker* broker, Handle<Object> object);
+ explicit ObjectRef(ObjectData* data) : data_(data) { CHECK_NOT_NULL(data_); }
+
+ bool equals(const ObjectRef& other) const;
+ Handle<Object> object() const;
+ // TODO(neis): Remove eventually.
template <typename T>
Handle<T> object() const {
AllowHandleDereference handle_dereference;
- return Handle<T>::cast(object_);
+ return Handle<T>::cast(object());
}
OddballType oddball_type() const;
@@ -109,8 +118,6 @@ class ObjectRef {
bool IsSmi() const;
int AsSmi() const;
- bool equals(const ObjectRef& other) const;
-
#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const;
HEAP_BROKER_OBJECT_LIST(HEAP_IS_METHOD_DECL)
#undef HEAP_IS_METHOD_DECL
@@ -123,17 +130,14 @@ class ObjectRef {
bool BooleanValue();
double OddballToNumber() const;
+ Isolate* isolate() const;
+
protected:
- const JSHeapBroker* broker() const { return broker_; }
+ JSHeapBroker* broker() const;
+ ObjectData* data() const;
private:
- const JSHeapBroker* broker_;
- Handle<Object> object_;
-};
-
-class FieldTypeRef : public ObjectRef {
- public:
- using ObjectRef::ObjectRef;
+ ObjectData* data_;
};
class HeapObjectRef : public ObjectRef {
@@ -165,30 +169,22 @@ class JSObjectRef : public HeapObjectRef {
FixedArrayBaseRef elements() const;
void EnsureElementsTenured();
- ElementsKind GetElementsKind();
-};
-
-struct SlackTrackingResult {
- SlackTrackingResult(int instance_sizex, int inobject_property_countx)
- : instance_size(instance_sizex),
- inobject_property_count(inobject_property_countx) {}
- int instance_size;
- int inobject_property_count;
+ ElementsKind GetElementsKind() const;
};
class JSFunctionRef : public JSObjectRef {
public:
using JSObjectRef::JSObjectRef;
- bool HasBuiltinFunctionId() const;
- BuiltinFunctionId GetBuiltinFunctionId() const;
bool IsConstructor() const;
bool has_initial_map() const;
MapRef initial_map() const;
+ bool has_prototype() const;
+ ObjectRef prototype() const;
+ bool PrototypeRequiresRuntimeLookup() const;
JSGlobalProxyRef global_proxy() const;
- SlackTrackingResult FinishSlackTracking() const;
+ int InitialMapInstanceSizeWithMinSlack() const;
SharedFunctionInfoRef shared() const;
- void EnsureHasInitialMap() const;
};
class JSRegExpRef : public JSObjectRef {
@@ -224,27 +220,39 @@ class ContextRef : public HeapObjectRef {
ObjectRef get(int index) const;
};
+#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
+ V(JSFunction, array_function) \
+ V(JSFunction, object_function) \
+ V(JSFunction, promise_function) \
+ V(Map, fast_aliased_arguments_map) \
+ V(Map, initial_array_iterator_map) \
+ V(Map, iterator_result_map) \
+ V(Map, js_array_holey_double_elements_map) \
+ V(Map, js_array_holey_elements_map) \
+ V(Map, js_array_holey_smi_elements_map) \
+ V(Map, js_array_packed_double_elements_map) \
+ V(Map, js_array_packed_elements_map) \
+ V(Map, js_array_packed_smi_elements_map) \
+ V(Map, map_key_iterator_map) \
+ V(Map, map_key_value_iterator_map) \
+ V(Map, map_value_iterator_map) \
+ V(Map, set_key_value_iterator_map) \
+ V(Map, set_value_iterator_map) \
+ V(Map, sloppy_arguments_map) \
+ V(Map, strict_arguments_map) \
+ V(Map, string_iterator_map) \
+ V(ScriptContextTable, script_context_table)
+
class NativeContextRef : public ContextRef {
public:
using ContextRef::ContextRef;
- ScriptContextTableRef script_context_table() const;
-
- MapRef fast_aliased_arguments_map() const;
- MapRef sloppy_arguments_map() const;
- MapRef strict_arguments_map() const;
- MapRef js_array_fast_elements_map_index() const;
- MapRef initial_array_iterator_map() const;
- MapRef set_value_iterator_map() const;
- MapRef set_key_value_iterator_map() const;
- MapRef map_key_iterator_map() const;
- MapRef map_value_iterator_map() const;
- MapRef map_key_value_iterator_map() const;
- MapRef iterator_result_map() const;
- MapRef string_iterator_map() const;
- MapRef promise_function_initial_map() const;
+#define DECL_ACCESSOR(type, name) type##Ref name() const;
+ BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
+#undef DECL_ACCESSOR
MapRef GetFunctionMapFromIndex(int index) const;
+ MapRef GetInitialJSArrayMap(ElementsKind kind) const;
};
class NameRef : public HeapObjectRef {
@@ -276,12 +284,21 @@ class AllocationSiteRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
- JSObjectRef boilerplate() const;
+ bool PointsToLiteral() const;
PretenureFlag GetPretenureMode() const;
- bool IsFastLiteral() const;
ObjectRef nested_site() const;
- bool PointsToLiteral() const;
+
+ // {IsFastLiteral} determines whether the given array or object literal
+ // boilerplate satisfies all limits to be considered for fast deep-copying
+ // and computes the total size of all objects that are part of the graph.
+ //
+ // If PointsToLiteral() is false, then IsFastLiteral() is also false.
+ bool IsFastLiteral() const;
+ // We only serialize boilerplate if IsFastLiteral is true.
+ base::Optional<JSObjectRef> boilerplate() const;
+
ElementsKind GetElementsKind() const;
+ bool CanInlineCall() const;
};
class MapRef : public HeapObjectRef {
@@ -291,26 +308,30 @@ class MapRef : public HeapObjectRef {
int instance_size() const;
InstanceType instance_type() const;
int GetInObjectProperties() const;
+ int GetInObjectPropertiesStartInWords() const;
int NumberOfOwnDescriptors() const;
- PropertyDetails GetPropertyDetails(int i) const;
- NameRef GetPropertyKey(int i) const;
- FieldIndex GetFieldIndexFor(int i) const;
int GetInObjectPropertyOffset(int index) const;
ElementsKind elements_kind() const;
- ObjectRef constructor_or_backpointer() const;
bool is_stable() const;
bool has_prototype_slot() const;
bool is_deprecated() const;
bool CanBeDeprecated() const;
bool CanTransition() const;
bool IsInobjectSlackTrackingInProgress() const;
- MapRef FindFieldOwner(int descriptor) const;
bool is_dictionary_map() const;
bool IsJSArrayMap() const;
bool IsFixedCowArrayMap() const;
+ ObjectRef constructor_or_backpointer() const;
+
+ base::Optional<MapRef> AsElementsKind(ElementsKind kind) const;
+
// Concerning the underlying instance_descriptors:
- FieldTypeRef GetFieldType(int descriptor) const;
+ MapRef FindFieldOwner(int descriptor) const;
+ PropertyDetails GetPropertyDetails(int i) const;
+ NameRef GetPropertyKey(int i) const;
+ FieldIndex GetFieldIndexFor(int i) const;
+ ObjectRef GetFieldType(int descriptor) const;
};
class FixedArrayBaseRef : public HeapObjectRef {
@@ -336,11 +357,17 @@ class FixedDoubleArrayRef : public FixedArrayBaseRef {
bool is_the_hole(int i) const;
};
+class BytecodeArrayRef : public FixedArrayBaseRef {
+ public:
+ using FixedArrayBaseRef::FixedArrayBaseRef;
+
+ int register_count() const;
+};
+
class JSArrayRef : public JSObjectRef {
public:
using JSObjectRef::JSObjectRef;
- ElementsKind GetElementsKind() const;
ObjectRef length() const;
};
@@ -351,22 +378,29 @@ class ScopeInfoRef : public HeapObjectRef {
int ContextLength() const;
};
+#define BROKER_SFI_FIELDS(V) \
+ V(int, internal_formal_parameter_count) \
+ V(bool, has_duplicate_parameters) \
+ V(int, function_map_index) \
+ V(FunctionKind, kind) \
+ V(LanguageMode, language_mode) \
+ V(bool, native) \
+ V(bool, HasBreakInfo) \
+ V(bool, HasBuiltinFunctionId) \
+ V(bool, HasBuiltinId) \
+ V(BuiltinFunctionId, builtin_function_id) \
+ V(bool, construct_as_builtin) \
+ V(bool, HasBytecodeArray)
+
class SharedFunctionInfoRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
- int internal_formal_parameter_count() const;
- bool has_duplicate_parameters() const;
- int function_map_index() const;
- FunctionKind kind() const;
- LanguageMode language_mode();
- bool native() const;
- bool HasBreakInfo() const;
- bool HasBuiltinId() const;
int builtin_id() const;
- bool construct_as_builtin() const;
- bool HasBytecodeArray() const;
- int GetBytecodeArrayRegisterCount() const;
+ BytecodeArrayRef GetBytecodeArray() const;
+#define DECL_ACCESSOR(type, name) type name() const;
+ BROKER_SFI_FIELDS(DECL_ACCESSOR)
+#undef DECL_ACCSESOR
};
class StringRef : public NameRef {
@@ -375,7 +409,7 @@ class StringRef : public NameRef {
int length() const;
uint16_t GetFirstChar();
- double ToNumber();
+ base::Optional<double> ToNumber();
};
class ModuleRef : public HeapObjectRef {
@@ -407,23 +441,59 @@ class InternalizedStringRef : public StringRef {
class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE(ZoneObject) {
public:
- JSHeapBroker(Isolate* isolate);
+ JSHeapBroker(Isolate* isolate, Zone* zone);
+ void SerializeStandardObjects();
HeapObjectType HeapObjectTypeFromMap(Handle<Map> map) const {
AllowHandleDereference handle_dereference;
return HeapObjectTypeFromMap(*map);
}
- static base::Optional<int> TryGetSmi(Handle<Object> object);
-
Isolate* isolate() const { return isolate_; }
+ Zone* zone() const { return zone_; }
+
+ enum BrokerMode { kDisabled, kSerializing, kSerialized };
+ BrokerMode mode() const { return mode_; }
+ void StopSerializing() {
+ CHECK_EQ(mode_, kSerializing);
+ mode_ = kSerialized;
+ }
+ bool SerializingAllowed() const;
+
+ // Returns nullptr iff handle unknown.
+ ObjectData* GetData(Handle<Object>) const;
+ // Never returns nullptr.
+ ObjectData* GetOrCreateData(Handle<Object>);
+
+ void Trace(const char* format, ...) const;
private:
friend class HeapObjectRef;
+ friend class ObjectRef;
+ friend class ObjectData;
+
+ // TODO(neis): Remove eventually.
HeapObjectType HeapObjectTypeFromMap(Map* map) const;
+ void AddData(Handle<Object> object, ObjectData* data);
+
Isolate* const isolate_;
-};
+ Zone* const zone_;
+ ZoneUnorderedMap<Address, ObjectData*> refs_;
+ BrokerMode mode_;
+};
+
+#define ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(something_var, \
+ optionally_something) \
+ auto optionally_something_ = optionally_something; \
+ if (!optionally_something_) \
+ return NoChangeBecauseOfMissingData(js_heap_broker(), __FUNCTION__, \
+ __LINE__); \
+ something_var = *optionally_something_;
+
+class Reduction;
+Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
+ const char* function, int line);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
new file mode 100644
index 0000000000..0bcc662771
--- /dev/null
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -0,0 +1,85 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-heap-copy-reducer.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/js-operator.h"
+#include "src/heap/factory-inl.h"
+#include "src/objects/map.h"
+#include "src/objects/scope-info.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// In the functions below, we call the ObjectRef (or subclass) constructor in
+// order to trigger serialization if not yet done.
+
+JSHeapCopyReducer::JSHeapCopyReducer(JSHeapBroker* broker) : broker_(broker) {}
+
+JSHeapBroker* JSHeapCopyReducer::broker() { return broker_; }
+
+Reduction JSHeapCopyReducer::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kHeapConstant: {
+ ObjectRef(broker(), HeapConstantOf(node->op()));
+ break;
+ }
+ case IrOpcode::kJSCreateArray: {
+ CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
+ Handle<AllocationSite> site;
+ if (p.site().ToHandle(&site)) AllocationSiteRef(broker(), site);
+ break;
+ }
+ case IrOpcode::kJSCreateCatchContext: {
+ ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
+ break;
+ }
+ case IrOpcode::kJSCreateClosure: {
+ CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
+ SharedFunctionInfoRef(broker(), p.shared_info());
+ HeapObjectRef(broker(), p.feedback_cell());
+ HeapObjectRef(broker(), p.code());
+ break;
+ }
+ case IrOpcode::kJSCreateEmptyLiteralArray: {
+ // TODO(neis, jarin) Force serialization of the entire feedback vector
+ // rather than just the one element.
+ FeedbackParameter const& p = FeedbackParameterOf(node->op());
+ FeedbackVectorRef(broker(), p.feedback().vector());
+ Handle<Object> feedback(
+ p.feedback().vector()->Get(p.feedback().slot())->ToObject(),
+ broker()->isolate());
+ ObjectRef(broker(), feedback);
+ break;
+ }
+ case IrOpcode::kJSCreateFunctionContext: {
+ CreateFunctionContextParameters const& p =
+ CreateFunctionContextParametersOf(node->op());
+ ScopeInfoRef(broker(), p.scope_info());
+ break;
+ }
+ case IrOpcode::kJSCreateLiteralArray:
+ case IrOpcode::kJSCreateLiteralObject: {
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ ObjectRef(broker(), p.feedback().vector());
+ break;
+ }
+ case IrOpcode::kJSLoadNamed:
+ case IrOpcode::kJSStoreNamed: {
+ NamedAccess const& p = NamedAccessOf(node->op());
+ NameRef(broker(), p.name());
+ break;
+ }
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.h b/deps/v8/src/compiler/js-heap-copy-reducer.h
new file mode 100644
index 0000000000..b94b930d78
--- /dev/null
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.h
@@ -0,0 +1,38 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_HEAP_COPY_REDUCER_H_
+#define V8_COMPILER_JS_HEAP_COPY_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSHeapBroker;
+
+// The heap copy reducer makes sure that the relevant heap data referenced
+// by handles embedded in the graph is copied to the heap broker.
+// TODO(jarin) This is just a temporary solution until the graph uses only
+// ObjetRef-derived reference to refer to the heap data.
+class JSHeapCopyReducer : public Reducer {
+ public:
+ explicit JSHeapCopyReducer(JSHeapBroker* broker);
+
+ const char* reducer_name() const override { return "JSHeapCopyReducer"; }
+
+ Reduction Reduce(Node* node) override;
+
+ private:
+ JSHeapBroker* broker();
+
+ JSHeapBroker* broker_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_HEAP_COPY_REDUCER_H_
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index dc8d70f6ac..68919c9aec 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -34,7 +34,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
void Finalize() final;
private:
- // This limit currently matches what Crankshaft does. We may want to
+ // This limit currently matches what the old compiler did. We may want to
// re-evaluate and come up with a proper limit for TurboFan.
static const int kMaxCallPolymorphism = 4;
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index fcb9e87adb..194e876849 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -15,6 +15,7 @@
#include "src/compiler/operator-properties.h"
#include "src/counters.h"
#include "src/objects-inl.h"
+#include "src/objects/js-generator.h"
namespace v8 {
namespace internal {
@@ -55,10 +56,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
case Runtime::kInlineIsJSProxy:
return ReduceIsInstanceType(node, JS_PROXY_TYPE);
- case Runtime::kInlineIsJSWeakMap:
- return ReduceIsInstanceType(node, JS_WEAK_MAP_TYPE);
- case Runtime::kInlineIsJSWeakSet:
- return ReduceIsInstanceType(node, JS_WEAK_SET_TYPE);
case Runtime::kInlineIsJSReceiver:
return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
@@ -79,19 +76,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceToString(node);
case Runtime::kInlineCall:
return ReduceCall(node);
- case Runtime::kInlineGetSuperConstructor:
- return ReduceGetSuperConstructor(node);
- case Runtime::kInlineArrayBufferViewWasNeutered:
- return ReduceArrayBufferViewWasNeutered(node);
- case Runtime::kInlineMaxSmi:
- return ReduceMaxSmi(node);
- case Runtime::kInlineTypedArrayGetLength:
- return ReduceArrayBufferViewField(node,
- AccessBuilder::ForJSTypedArrayLength());
- case Runtime::kInlineTheHole:
- return ReduceTheHole(node);
- case Runtime::kInlineStringMaxLength:
- return ReduceStringMaxLength(node);
default:
break;
}
@@ -321,66 +305,6 @@ Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
return Changed(node);
}
-Reduction JSIntrinsicLowering::ReduceArrayBufferViewField(
- Node* node, FieldAccess const& access) {
- Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // Load the {receiver}s field.
- Node* value = effect = graph()->NewNode(simplified()->LoadField(access),
- receiver, effect, control);
-
- // Check if the {receiver}s buffer was neutered.
- Node* receiver_buffer = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
- receiver, effect, control);
- Node* check = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), receiver_buffer, effect, control);
-
- // Default to zero if the {receiver}s buffer was neutered.
- value = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
- check, jsgraph()->ZeroConstant(), value);
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
-}
-
-Reduction JSIntrinsicLowering::ReduceArrayBufferViewWasNeutered(Node* node) {
- Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // Check if the {receiver}s buffer was neutered.
- Node* receiver_buffer = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
- receiver, effect, control);
- Node* value = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), receiver_buffer, effect, control);
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
-}
-
-Reduction JSIntrinsicLowering::ReduceMaxSmi(Node* node) {
- Node* value = jsgraph()->Constant(Smi::kMaxValue);
- ReplaceWithValue(node, value);
- return Replace(value);
-}
-
-Reduction JSIntrinsicLowering::ReduceTheHole(Node* node) {
- Node* value = jsgraph()->TheHoleConstant();
- ReplaceWithValue(node, value);
- return Replace(value);
-}
-
-Reduction JSIntrinsicLowering::ReduceStringMaxLength(Node* node) {
- Node* value = jsgraph()->Constant(String::kMaxLength);
- ReplaceWithValue(node, value);
- return Replace(value);
-}
-
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Node* b) {
RelaxControls(node);
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 18fe1248c7..e0a55d7b06 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -63,18 +63,6 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceCall(Node* node);
Reduction ReduceGetSuperConstructor(Node* node);
- // TODO(turbofan): typedarray.js support; drop once TypedArrays are
- // converted to proper CodeStubAssembler based builtins.
- Reduction ReduceArrayBufferViewField(Node* node, FieldAccess const& access);
- Reduction ReduceArrayBufferViewWasNeutered(Node* node);
- Reduction ReduceMaxSmi(Node* node);
-
- // TODO(turbofan): collection.js support; drop once Maps and Sets are
- // converted to proper CodeStubAssembler based builtins.
- Reduction ReduceTheHole(Node* node);
-
- Reduction ReduceStringMaxLength(Node* node);
-
Reduction Change(Node* node, const Operator* op);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 4c6ea30bae..e35a860be0 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -5,7 +5,7 @@
#include "src/compiler/js-native-context-specialization.h"
#include "src/accessors.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/code-factory.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/access-info.h"
@@ -20,6 +20,8 @@
#include "src/feedback-vector.h"
#include "src/field-index-inl.h"
#include "src/isolate-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/templates.h"
#include "src/vector-slot-pair.h"
@@ -58,9 +60,9 @@ struct JSNativeContextSpecialization::ScriptContextTableLookupResult {
};
JSNativeContextSpecialization::JSNativeContextSpecialization(
- Editor* editor, JSGraph* jsgraph, const JSHeapBroker* js_heap_broker,
- Flags flags, Handle<Context> native_context,
- CompilationDependencies* dependencies, Zone* zone)
+ Editor* editor, JSGraph* jsgraph, JSHeapBroker* js_heap_broker, Flags flags,
+ Handle<Context> native_context, CompilationDependencies* dependencies,
+ Zone* zone)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
js_heap_broker_(js_heap_broker),
@@ -109,6 +111,8 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
return ReduceJSStoreDataPropertyInLiteral(node);
case IrOpcode::kJSStoreInArrayLiteral:
return ReduceJSStoreInArrayLiteral(node);
+ case IrOpcode::kJSToObject:
+ return ReduceJSToObject(node);
default:
break;
}
@@ -408,10 +412,9 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
if (function->IsConstructor() && function->has_prototype_slot() &&
function->has_instance_prototype() &&
function->prototype()->IsJSReceiver()) {
- // Ensure that the {function} has a valid initial map, so we can
- // depend on that for the prototype constant-folding below.
- JSFunction::EnsureHasInitialMap(function);
-
+ // We need {function}'s initial map so that we can depend on it for the
+ // prototype constant-folding below.
+ if (!function->has_initial_map()) return NoChange();
MapRef initial_map = dependencies()->DependOnInitialMap(
JSFunctionRef(js_heap_broker(), function));
Node* prototype = jsgraph()->Constant(
@@ -1100,19 +1103,17 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
if (m.Value()->IsJSFunction() &&
p.name().is_identical_to(factory()->prototype_string())) {
// Optimize "prototype" property of functions.
- Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
- if (function->IsConstructor()) {
- // We need to add a code dependency on the initial map of the
- // {function} in order to be notified about changes to the
- // "prototype" of {function}.
- JSFunction::EnsureHasInitialMap(function);
- dependencies()->DependOnInitialMap(
- JSFunctionRef(js_heap_broker(), function));
- Handle<Object> prototype(function->prototype(), isolate());
- Node* value = jsgraph()->Constant(prototype);
- ReplaceWithValue(node, value);
- return Replace(value);
+ JSFunctionRef function = m.Ref(js_heap_broker()).AsJSFunction();
+ // TODO(neis): Remove the has_prototype_slot condition once the broker is
+ // always enabled.
+ if (!function.map().has_prototype_slot() || !function.has_prototype() ||
+ function.PrototypeRequiresRuntimeLookup()) {
+ return NoChange();
}
+ ObjectRef prototype = dependencies()->DependOnPrototypeProperty(function);
+ Node* value = jsgraph()->Constant(prototype);
+ ReplaceWithValue(node, value);
+ return Replace(value);
} else if (m.Value()->IsString() &&
p.name().is_identical_to(factory()->length_string())) {
// Constant-fold "length" property on constant strings.
@@ -2180,12 +2181,31 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreInArrayLiteral(
store_mode);
}
+Reduction JSNativeContextSpecialization::ReduceJSToObject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSToObject, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(isolate(), receiver, effect,
+ &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ if (!receiver_maps[i]->IsJSReceiverMap()) return NoChange();
+ }
+
+ ReplaceWithValue(node, receiver, effect);
+ return Replace(receiver);
+}
+
namespace {
ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
switch (kind) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
return kExternal##Type##Array;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -2221,7 +2241,8 @@ JSNativeContextSpecialization::BuildElementAccess(
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(m.Value());
// Determine the {receiver}s (known) length.
- length = jsgraph()->Constant(typed_array->length_value());
+ length =
+ jsgraph()->Constant(static_cast<double>(typed_array->length_value()));
// Check if the {receiver}s buffer was neutered.
buffer = jsgraph()->HeapConstant(typed_array->GetBuffer());
@@ -2474,7 +2495,7 @@ JSNativeContextSpecialization::BuildElementAccess(
if (access_mode == AccessMode::kLoad) {
// Compute the real element access type, which includes the hole in case
// of holey backing stores.
- if (IsHoleyOrDictionaryElementsKind(elements_kind)) {
+ if (IsHoleyElementsKind(elements_kind)) {
element_access.type =
Type::Union(element_type, Type::Hole(), graph()->zone());
}
@@ -2513,10 +2534,10 @@ JSNativeContextSpecialization::BuildElementAccess(
} else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
// Return the signaling NaN hole directly if all uses are
// truncating.
- vtrue = etrue =
- graph()->NewNode(simplified()->CheckFloat64Hole(
- CheckFloat64HoleMode::kAllowReturnHole),
- vtrue, etrue, if_true);
+ vtrue = etrue = graph()->NewNode(
+ simplified()->CheckFloat64Hole(
+ CheckFloat64HoleMode::kAllowReturnHole, VectorSlotPair()),
+ vtrue, etrue, if_true);
}
}
@@ -2564,7 +2585,8 @@ JSNativeContextSpecialization::BuildElementAccess(
mode = CheckFloat64HoleMode::kAllowReturnHole;
}
value = effect = graph()->NewNode(
- simplified()->CheckFloat64Hole(mode), value, effect, control);
+ simplified()->CheckFloat64Hole(mode, VectorSlotPair()), value,
+ effect, control);
}
}
} else {
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 53fe9e2c11..413e3c191f 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -47,7 +47,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
typedef base::Flags<Flag> Flags;
JSNativeContextSpecialization(Editor* editor, JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker, Flags flags,
+ JSHeapBroker* js_heap_broker, Flags flags,
Handle<Context> native_context,
CompilationDependencies* dependencies,
Zone* zone);
@@ -76,6 +76,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Reduction ReduceJSStoreNamedOwn(Node* node);
Reduction ReduceJSStoreDataPropertyInLiteral(Node* node);
Reduction ReduceJSStoreInArrayLiteral(Node* node);
+ Reduction ReduceJSToObject(Node* node);
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
MapHandles const& receiver_maps,
@@ -217,7 +218,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
Isolate* isolate() const;
Factory* factory() const;
CommonOperatorBuilder* common() const;
@@ -231,7 +232,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Zone* zone() const { return zone_; }
JSGraph* const jsgraph_;
- const JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const js_heap_broker_;
Flags const flags_;
Handle<JSGlobalObject> global_object_;
Handle<JSGlobalProxy> global_proxy_;
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 5d45bb7f95..57f9950d55 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -363,7 +363,7 @@ CreateArgumentsType const& CreateArgumentsTypeOf(const Operator* op) {
bool operator==(CreateArrayParameters const& lhs,
CreateArrayParameters const& rhs) {
return lhs.arity() == rhs.arity() &&
- lhs.site().location() == rhs.site().location();
+ lhs.site().address() == rhs.site().address();
}
@@ -374,13 +374,14 @@ bool operator!=(CreateArrayParameters const& lhs,
size_t hash_value(CreateArrayParameters const& p) {
- return base::hash_combine(p.arity(), p.site().location());
+ return base::hash_combine(p.arity(), p.site().address());
}
std::ostream& operator<<(std::ostream& os, CreateArrayParameters const& p) {
os << p.arity();
- if (!p.site().is_null()) os << ", " << Brief(*p.site());
+ Handle<AllocationSite> site;
+ if (p.site().ToHandle(&site)) os << ", " << Brief(*site);
return os;
}
@@ -534,6 +535,29 @@ const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op) {
return OpParameter<CreateLiteralParameters>(op);
}
+bool operator==(CloneObjectParameters const& lhs,
+ CloneObjectParameters const& rhs) {
+ return lhs.feedback() == rhs.feedback() && lhs.flags() == rhs.flags();
+}
+
+bool operator!=(CloneObjectParameters const& lhs,
+ CloneObjectParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(CloneObjectParameters const& p) {
+ return base::hash_combine(p.feedback(), p.flags());
+}
+
+std::ostream& operator<<(std::ostream& os, CloneObjectParameters const& p) {
+ return os << p.flags();
+}
+
+const CloneObjectParameters& CloneObjectParametersOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSCloneObject);
+ return OpParameter<CloneObjectParameters>(op);
+}
+
size_t hash_value(ForInMode mode) { return static_cast<uint8_t>(mode); }
std::ostream& operator<<(std::ostream& os, ForInMode mode) {
@@ -589,6 +613,7 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(ToLength, Operator::kNoProperties, 1, 1) \
V(ToName, Operator::kNoProperties, 1, 1) \
V(ToNumber, Operator::kNoProperties, 1, 1) \
+ V(ToNumberConvertBigInt, Operator::kNoProperties, 1, 1) \
V(ToNumeric, Operator::kNoProperties, 1, 1) \
V(ToObject, Operator::kFoldable, 1, 1) \
V(ToString, Operator::kNoProperties, 1, 1) \
@@ -1087,8 +1112,8 @@ const Operator* JSOperatorBuilder::CreateArguments(CreateArgumentsType type) {
type); // parameter
}
-const Operator* JSOperatorBuilder::CreateArray(size_t arity,
- Handle<AllocationSite> site) {
+const Operator* JSOperatorBuilder::CreateArray(
+ size_t arity, MaybeHandle<AllocationSite> site) {
// constructor, new_target, arg1, ..., argN
int const value_input_count = static_cast<int>(arity) + 2;
CreateArrayParameters parameters(arity, site);
@@ -1179,6 +1204,17 @@ const Operator* JSOperatorBuilder::CreateLiteralObject(
parameters); // parameter
}
+const Operator* JSOperatorBuilder::CloneObject(VectorSlotPair const& feedback,
+ int literal_flags) {
+ CloneObjectParameters parameters(feedback, literal_flags);
+ return new (zone()) Operator1<CloneObjectParameters>( // --
+ IrOpcode::kJSCloneObject, // opcode
+ Operator::kNoProperties, // properties
+ "JSCloneObject", // name
+ 1, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
+
const Operator* JSOperatorBuilder::CreateEmptyLiteralObject() {
return new (zone()) Operator( // --
IrOpcode::kJSCreateEmptyLiteralObject, // opcode
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index f73aca819f..b10d89cdb9 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -7,7 +7,7 @@
#include "src/base/compiler-specific.h"
#include "src/globals.h"
-#include "src/handles.h"
+#include "src/maybe-handles.h"
#include "src/runtime/runtime.h"
#include "src/type-hints.h"
#include "src/vector-slot-pair.h"
@@ -457,15 +457,15 @@ CreateArgumentsType const& CreateArgumentsTypeOf(const Operator* op);
// used as parameter by JSCreateArray operators.
class CreateArrayParameters final {
public:
- explicit CreateArrayParameters(size_t arity, Handle<AllocationSite> site)
+ explicit CreateArrayParameters(size_t arity, MaybeHandle<AllocationSite> site)
: arity_(arity), site_(site) {}
size_t arity() const { return arity_; }
- Handle<AllocationSite> site() const { return site_; }
+ MaybeHandle<AllocationSite> site() const { return site_; }
private:
size_t const arity_;
- Handle<AllocationSite> const site_;
+ MaybeHandle<AllocationSite> const site_;
};
bool operator==(CreateArrayParameters const&, CreateArrayParameters const&);
@@ -626,6 +626,28 @@ std::ostream& operator<<(std::ostream&, CreateLiteralParameters const&);
const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op);
+class CloneObjectParameters final {
+ public:
+ CloneObjectParameters(VectorSlotPair const& feedback, int flags)
+ : feedback_(feedback), flags_(flags) {}
+
+ VectorSlotPair const& feedback() const { return feedback_; }
+ int flags() const { return flags_; }
+
+ private:
+ VectorSlotPair const feedback_;
+ int const flags_;
+};
+
+bool operator==(CloneObjectParameters const&, CloneObjectParameters const&);
+bool operator!=(CloneObjectParameters const&, CloneObjectParameters const&);
+
+size_t hash_value(CloneObjectParameters const&);
+
+std::ostream& operator<<(std::ostream&, CloneObjectParameters const&);
+
+const CloneObjectParameters& CloneObjectParametersOf(const Operator* op);
+
// Descriptor used by the JSForInPrepare and JSForInNext opcodes.
enum class ForInMode : uint8_t {
kUseEnumCacheKeysAndIndices,
@@ -685,13 +707,14 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* ToLength();
const Operator* ToName();
const Operator* ToNumber();
+ const Operator* ToNumberConvertBigInt();
const Operator* ToNumeric();
const Operator* ToObject();
const Operator* ToString();
const Operator* Create();
const Operator* CreateArguments(CreateArgumentsType type);
- const Operator* CreateArray(size_t arity, Handle<AllocationSite> site);
+ const Operator* CreateArray(size_t arity, MaybeHandle<AllocationSite> site);
const Operator* CreateArrayIterator(IterationKind);
const Operator* CreateCollectionIterator(CollectionKind, IterationKind);
const Operator* CreateBoundFunction(size_t arity, Handle<Map> map);
@@ -716,6 +739,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
Handle<ObjectBoilerplateDescription> constant,
VectorSlotPair const& feedback, int literal_flags,
int number_of_properties);
+ const Operator* CloneObject(VectorSlotPair const& feedback,
+ int literal_flags);
const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
VectorSlotPair const& feedback,
int literal_flags);
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 4fc1f84538..56b1f224c7 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -17,6 +17,7 @@
#include "src/compiler/type-cache.h"
#include "src/compiler/types.h"
#include "src/objects-inl.h"
+#include "src/objects/js-generator.h"
#include "src/objects/module-inl.h"
namespace v8 {
@@ -93,7 +94,7 @@ class JSBinopReduction final {
if (BothInputsAre(Type::String()) ||
BinaryOperationHintOf(node_->op()) == BinaryOperationHint::kString) {
HeapObjectBinopMatcher m(node_);
- const JSHeapBroker* broker = lowering_->js_heap_broker();
+ JSHeapBroker* broker = lowering_->js_heap_broker();
if (m.right().HasValue() && m.right().Ref(broker).IsString()) {
StringRef right_string = m.right().Ref(broker).AsString();
if (right_string.length() >= ConsString::kMinLength) return true;
@@ -408,7 +409,7 @@ class JSBinopReduction final {
// - relax effects from generic but not-side-effecting operations
JSTypedLowering::JSTypedLowering(Editor* editor, JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker, Zone* zone)
+ JSHeapBroker* js_heap_broker, Zone* zone)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
js_heap_broker_(js_heap_broker),
@@ -529,6 +530,33 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
NodeProperties::ReplaceValueInput(node, reduction.replacement(), 0);
}
}
+ // We might be able to constant-fold the String concatenation now.
+ if (r.BothInputsAre(Type::String())) {
+ HeapObjectBinopMatcher m(node);
+ if (m.IsFoldable()) {
+ StringRef left = m.left().Ref(js_heap_broker()).AsString();
+ StringRef right = m.right().Ref(js_heap_broker()).AsString();
+ if (left.length() + right.length() > String::kMaxLength) {
+ // No point in trying to optimize this, as it will just throw.
+ return NoChange();
+ }
+ // TODO(mslekova): get rid of these allows by doing either one of:
+ // 1. remove the optimization and check if it ruins the performance
+ // 2. leave a placeholder and do the actual allocations once back on the
+ // MT
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+ AllowHeapAllocation allow_heap_allocation;
+ ObjectRef cons(
+ js_heap_broker(),
+ factory()
+ ->NewConsString(left.object<String>(), right.object<String>())
+ .ToHandleChecked());
+ Node* value = jsgraph()->Constant(cons);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ }
// We might know for sure that we're creating a ConsString here.
if (r.ShouldCreateConsString()) {
return ReduceCreateConsString(node);
@@ -962,7 +990,9 @@ Reduction JSTypedLowering::ReduceJSToNumberOrNumericInput(Node* input) {
HeapObjectMatcher m(input);
if (m.HasValue() && m.Ref(js_heap_broker()).IsString()) {
StringRef input_value = m.Ref(js_heap_broker()).AsString();
- return Replace(jsgraph()->Constant(input_value.ToNumber()));
+ double number;
+ ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());
+ return Replace(jsgraph()->Constant(number));
}
}
if (input_type.IsHeapConstant()) {
@@ -1035,6 +1065,20 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
if (input_type.Is(Type::NaN())) {
return Replace(jsgraph()->HeapConstant(factory()->NaN_string()));
}
+ if (input_type.Is(Type::OrderedNumber()) &&
+ input_type.Min() == input_type.Max()) {
+ // TODO(mslekova): get rid of these allows by doing either one of:
+ // 1. remove the optimization and check if it ruins the performance
+ // 2. allocate all the ToString's from numbers before the compilation
+ // 3. leave a placeholder and do the actual allocations once back on the MT
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+ AllowHeapAllocation allow_heap_allocation;
+ // Note that we can use Type::OrderedNumber(), since
+ // both 0 and -0 map to the String "0" in JavaScript.
+ return Replace(jsgraph()->HeapConstant(
+ factory()->NumberToString(factory()->NewNumber(input_type.Min()))));
+ }
if (input_type.Is(Type::Number())) {
return Replace(graph()->NewNode(simplified()->NumberToString(), input));
}
@@ -1356,7 +1400,7 @@ Node* JSTypedLowering::BuildGetModuleCell(Node* node) {
if (module_type.IsHeapConstant()) {
ModuleRef module_constant = module_type.AsHeapConstant()->Ref().AsModule();
- CellRef cell_constant(module_constant.GetCell(cell_index));
+ CellRef cell_constant = module_constant.GetCell(cell_index);
return jsgraph()->Constant(cell_constant);
}
@@ -1415,8 +1459,8 @@ Reduction JSTypedLowering::ReduceJSStoreModule(Node* node) {
namespace {
-void ReduceBuiltin(Isolate* isolate, JSGraph* jsgraph, Node* node,
- int builtin_index, int arity, CallDescriptor::Flags flags) {
+void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
+ CallDescriptor::Flags flags) {
// Patch {node} to a direct CEntry call.
//
// ----------- A r g u m e n t s -----------
@@ -1678,8 +1722,7 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
} else if (shared.HasBuiltinId() &&
Builtins::HasCppImplementation(shared.builtin_id())) {
// Patch {node} to a direct CEntry call.
- ReduceBuiltin(isolate(), jsgraph(), node, shared.builtin_id(), arity,
- flags);
+ ReduceBuiltin(jsgraph(), node, shared.builtin_id(), arity, flags);
} else if (shared.HasBuiltinId() &&
Builtins::KindOf(shared.builtin_id()) == Builtins::TFJ) {
// Patch {node} to a direct code object call.
@@ -2278,6 +2321,7 @@ Reduction JSTypedLowering::Reduce(Node* node) {
case IrOpcode::kJSToName:
return ReduceJSToName(node);
case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToNumberConvertBigInt:
case IrOpcode::kJSToNumeric:
return ReduceJSToNumberOrNumeric(node);
case IrOpcode::kJSToString:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index c8fcac5ff6..c3bef9aeed 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -32,7 +32,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
JSTypedLowering(Editor* editor, JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker, Zone* zone);
+ JSHeapBroker* js_heap_broker, Zone* zone);
~JSTypedLowering() final {}
const char* reducer_name() const override { return "JSTypedLowering"; }
@@ -98,14 +98,14 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
Isolate* isolate() const;
JSOperatorBuilder* javascript() const;
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
JSGraph* jsgraph_;
- const JSHeapBroker* js_heap_broker_;
+ JSHeapBroker* js_heap_broker_;
Type empty_string_type_;
Type pointer_comparable_type_;
TypeCache const& type_cache_;
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 99f192acdf..99c52b1ade 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -184,8 +184,6 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kInlineGeneratorGetResumeMode:
case Runtime::kInlineCreateJSGeneratorObject:
case Runtime::kInlineIsArray:
- case Runtime::kInlineIsJSWeakMap:
- case Runtime::kInlineIsJSWeakSet:
case Runtime::kInlineIsJSReceiver:
case Runtime::kInlineIsRegExp:
case Runtime::kInlineIsSmi:
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index c1d8570353..6d6cfafdbf 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -19,6 +19,7 @@ namespace {
bool IsRename(Node* node) {
switch (node->opcode()) {
+ case IrOpcode::kCheckHeapObject:
case IrOpcode::kFinishRegion:
case IrOpcode::kTypeGuard:
return true;
@@ -35,12 +36,14 @@ Node* ResolveRenames(Node* node) {
}
bool MayAlias(Node* a, Node* b) {
- if (a == b) return true;
- if (!NodeProperties::GetType(a).Maybe(NodeProperties::GetType(b))) {
- return false;
- }
- switch (b->opcode()) {
- case IrOpcode::kAllocate: {
+ if (a != b) {
+ if (!NodeProperties::GetType(a).Maybe(NodeProperties::GetType(b))) {
+ return false;
+ } else if (IsRename(b)) {
+ return MayAlias(a, b->InputAt(0));
+ } else if (IsRename(a)) {
+ return MayAlias(a->InputAt(0), b);
+ } else if (b->opcode() == IrOpcode::kAllocate) {
switch (a->opcode()) {
case IrOpcode::kAllocate:
case IrOpcode::kHeapConstant:
@@ -49,16 +52,7 @@ bool MayAlias(Node* a, Node* b) {
default:
break;
}
- break;
- }
- case IrOpcode::kFinishRegion:
- case IrOpcode::kTypeGuard:
- return MayAlias(a, b->InputAt(0));
- default:
- break;
- }
- switch (a->opcode()) {
- case IrOpcode::kAllocate: {
+ } else if (a->opcode() == IrOpcode::kAllocate) {
switch (b->opcode()) {
case IrOpcode::kHeapConstant:
case IrOpcode::kParameter:
@@ -66,13 +60,7 @@ bool MayAlias(Node* a, Node* b) {
default:
break;
}
- break;
}
- case IrOpcode::kFinishRegion:
- case IrOpcode::kTypeGuard:
- return MayAlias(a->InputAt(0), b);
- default:
- break;
}
return true;
}
@@ -445,6 +433,7 @@ LoadElimination::AbstractMaps const* LoadElimination::AbstractMaps::Extend(
}
void LoadElimination::AbstractMaps::Print() const {
+ AllowHandleDereference allow_handle_dereference;
StdoutStream os;
for (auto pair : info_for_node_) {
os << " #" << pair.first->id() << ":" << pair.first->op()->mnemonic()
@@ -676,6 +665,12 @@ Node* LoadElimination::AbstractState::LookupField(Node* object,
}
bool LoadElimination::AliasStateInfo::MayAlias(Node* other) const {
+ // If {object} is being initialized right here (indicated by {object} being
+ // an Allocate node instead of a FinishRegion node), we know that {other}
+ // can only alias with {object} if they refer to exactly the same node.
+ if (object_->opcode() == IrOpcode::kAllocate) {
+ return object_ == other;
+ }
// Decide aliasing based on the node kinds.
if (!compiler::MayAlias(object_, other)) {
return false;
@@ -905,8 +900,9 @@ Reduction LoadElimination::ReduceTransitionAndStoreElement(Node* node) {
Reduction LoadElimination::ReduceLoadField(Node* node) {
FieldAccess const& access = FieldAccessOf(node->op());
- Node* const object = NodeProperties::GetValueInput(node, 0);
- Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
if (access.offset == HeapObject::kMapOffset &&
@@ -924,12 +920,19 @@ Reduction LoadElimination::ReduceLoadField(Node* node) {
if (field_index >= 0) {
if (Node* replacement = state->LookupField(object, field_index)) {
// Make sure we don't resurrect dead {replacement} nodes.
- // Skip lowering if the type of the {replacement} node is not a subtype
- // of the original {node}'s type.
- // TODO(tebbi): We should insert a {TypeGuard} for the intersection of
- // these two types here once we properly handle {Type::None} everywhere.
- if (!replacement->IsDead() && NodeProperties::GetType(replacement)
- .Is(NodeProperties::GetType(node))) {
+ if (!replacement->IsDead()) {
+ // Introduce a TypeGuard if the type of the {replacement} node is not
+ // a subtype of the original {node}'s type.
+ if (!NodeProperties::GetType(replacement)
+ .Is(NodeProperties::GetType(node))) {
+ Type replacement_type = Type::Intersect(
+ NodeProperties::GetType(node),
+ NodeProperties::GetType(replacement), graph()->zone());
+ replacement = effect =
+ graph()->NewNode(common()->TypeGuard(replacement_type),
+ replacement, effect, control);
+ NodeProperties::SetType(replacement, replacement_type);
+ }
ReplaceWithValue(node, replacement, effect);
return Replace(replacement);
}
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index d3b1b5c14a..2ce5a04397 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -9,6 +9,7 @@
#include "src/compiler/graph-reducer.h"
#include "src/globals.h"
#include "src/machine-type.h"
+#include "src/maybe-handles.h"
#include "src/zone/zone-handle-set.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.cc b/deps/v8/src/compiler/loop-variable-optimizer.cc
index fe5b8c7889..5a0fc9dbfb 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.cc
+++ b/deps/v8/src/compiler/loop-variable-optimizer.cc
@@ -240,7 +240,8 @@ InductionVariable* LoopVariableOptimizer::TryGetInductionVariable(Node* phi) {
// TODO(jarin) Support both sides.
Node* input = arith->InputAt(0);
if (input->opcode() == IrOpcode::kSpeculativeToNumber ||
- input->opcode() == IrOpcode::kJSToNumber) {
+ input->opcode() == IrOpcode::kJSToNumber ||
+ input->opcode() == IrOpcode::kJSToNumberConvertBigInt) {
input = input->InputAt(0);
}
if (input != phi) return nullptr;
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 55ef35d231..f3a5fb9023 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -155,7 +155,7 @@ class MachineRepresentationInferrer {
case IrOpcode::kWord32AtomicOr:
case IrOpcode::kWord32AtomicXor:
representation_vector_[node->id()] = PromoteRepresentation(
- AtomicOpRepresentationOf(node->op()).representation());
+ AtomicOpType(node->op()).representation());
break;
case IrOpcode::kStore:
case IrOpcode::kProtectedStore:
@@ -782,7 +782,8 @@ class MachineRepresentationChecker {
str << std::endl;
}
str << " * input " << i << " (" << input->id() << ":" << *input->op()
- << ") doesn't have a " << expected_input_type << " representation.";
+ << ") has a " << input_type
+ << " representation (expected: " << expected_input_type << ").";
}
}
if (should_log_error) {
diff --git a/deps/v8/src/compiler/machine-graph.cc b/deps/v8/src/compiler/machine-graph.cc
index 0fcd97ff90..b81ad03d83 100644
--- a/deps/v8/src/compiler/machine-graph.cc
+++ b/deps/v8/src/compiler/machine-graph.cc
@@ -5,6 +5,7 @@
#include "src/compiler/machine-graph.h"
#include "src/compiler/node-properties.h"
+#include "src/external-reference.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/machine-graph.h b/deps/v8/src/compiler/machine-graph.h
index bd20d8c50d..83d27e03b9 100644
--- a/deps/v8/src/compiler/machine-graph.h
+++ b/deps/v8/src/compiler/machine-graph.h
@@ -5,13 +5,13 @@
#ifndef V8_COMPILER_MACHINE_GRAPH_H_
#define V8_COMPILER_MACHINE_GRAPH_H_
-#include "src/assembler.h"
#include "src/base/compiler-specific.h"
#include "src/compiler/common-node-cache.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/machine-operator.h"
#include "src/globals.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 8eac3ed18c..241651254b 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -39,6 +39,7 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) {
IrOpcode::kProtectedLoad == op->opcode() ||
IrOpcode::kWord32AtomicLoad == op->opcode() ||
IrOpcode::kWord64AtomicLoad == op->opcode() ||
+ IrOpcode::kWord32AtomicPairLoad == op->opcode() ||
IrOpcode::kPoisonedLoad == op->opcode() ||
IrOpcode::kUnalignedLoad == op->opcode());
return OpParameter<LoadRepresentation>(op);
@@ -80,11 +81,12 @@ StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op) {
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kWord32AtomicStore == op->opcode() ||
- IrOpcode::kWord64AtomicStore == op->opcode());
+ IrOpcode::kWord64AtomicStore == op->opcode() ||
+ IrOpcode::kWord32AtomicPairStore == op->opcode());
return OpParameter<MachineRepresentation>(op);
}
-MachineType AtomicOpRepresentationOf(Operator const* op) {
+MachineType AtomicOpType(Operator const* op) {
return OpParameter<MachineType>(op);
}
@@ -137,6 +139,8 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
PURE_BINARY_OP_LIST_64(V) \
V(Word32Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
+ V(Word32ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
+ V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
@@ -338,8 +342,6 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(Word64Ctz, Operator::kNoProperties, 1, 0, 1) \
V(Word32ReverseBits, Operator::kNoProperties, 1, 0, 1) \
V(Word64ReverseBits, Operator::kNoProperties, 1, 0, 1) \
- V(Word32ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
- V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
V(Int32AbsWithOverflow, Operator::kNoProperties, 1, 0, 1) \
V(Int64AbsWithOverflow, Operator::kNoProperties, 1, 0, 1) \
V(Word32Popcnt, Operator::kNoProperties, 1, 0, 1) \
@@ -390,18 +392,19 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(kTaggedPointer) \
V(kTagged)
+#define ATOMIC_U32_TYPE_LIST(V) \
+ V(Uint8) \
+ V(Uint16) \
+ V(Uint32)
+
#define ATOMIC_TYPE_LIST(V) \
+ ATOMIC_U32_TYPE_LIST(V) \
V(Int8) \
- V(Uint8) \
V(Int16) \
- V(Uint16) \
- V(Int32) \
- V(Uint32)
+ V(Int32)
-#define ATOMIC64_TYPE_LIST(V) \
- V(Uint8) \
- V(Uint16) \
- V(Uint32) \
+#define ATOMIC_U64_TYPE_LIST(V) \
+ ATOMIC_U32_TYPE_LIST(V) \
V(Uint64)
#define ATOMIC_REPRESENTATION_LIST(V) \
@@ -413,6 +416,14 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
ATOMIC_REPRESENTATION_LIST(V) \
V(kWord64)
+#define ATOMIC_PAIR_BINOP_LIST(V) \
+ V(Add) \
+ V(Sub) \
+ V(And) \
+ V(Or) \
+ V(Xor) \
+ V(Exchange)
+
#define SIMD_LANE_OP_LIST(V) \
V(F32x4, 4) \
V(I32x4, 4) \
@@ -592,7 +603,7 @@ struct MachineOperatorGlobalCache {
"Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
Word64AtomicLoad##Type##Operator kWord64AtomicLoad##Type;
- ATOMIC64_TYPE_LIST(ATOMIC_LOAD)
+ ATOMIC_U64_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
#define ATOMIC_STORE(Type) \
@@ -647,7 +658,7 @@ struct MachineOperatorGlobalCache {
ATOMIC_OP(Word64AtomicOr, type) \
ATOMIC_OP(Word64AtomicXor, type) \
ATOMIC_OP(Word64AtomicExchange, type)
- ATOMIC64_TYPE_LIST(ATOMIC64_OP_LIST)
+ ATOMIC_U64_TYPE_LIST(ATOMIC64_OP_LIST)
#undef ATOMIC64_OP_LIST
#undef ATOMIC_OP
@@ -676,7 +687,76 @@ struct MachineOperatorGlobalCache {
}; \
Word64AtomicCompareExchange##Type##Operator \
kWord64AtomicCompareExchange##Type;
- ATOMIC64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
+ ATOMIC_U64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
+#undef ATOMIC_COMPARE_EXCHANGE
+
+ struct Word32AtomicPairLoadOperator : public Operator {
+ Word32AtomicPairLoadOperator()
+ : Operator(IrOpcode::kWord32AtomicPairLoad,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0) {}
+ };
+ Word32AtomicPairLoadOperator kWord32AtomicPairLoad;
+
+ struct Word32AtomicPairStoreOperator : public Operator {
+ Word32AtomicPairStoreOperator()
+ : Operator(IrOpcode::kWord32AtomicPairStore,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairStore", 4, 1, 1, 0, 1, 0) {}
+ };
+ Word32AtomicPairStoreOperator kWord32AtomicPairStore;
+
+#define ATOMIC_PAIR_OP(op) \
+ struct Word32AtomicPair##op##Operator : public Operator { \
+ Word32AtomicPair##op##Operator() \
+ : Operator(IrOpcode::kWord32AtomicPair##op, \
+ Operator::kNoDeopt | Operator::kNoThrow, \
+ "Word32AtomicPair##op", 4, 1, 1, 2, 1, 0) {} \
+ }; \
+ Word32AtomicPair##op##Operator kWord32AtomicPair##op;
+ ATOMIC_PAIR_BINOP_LIST(ATOMIC_PAIR_OP)
+#undef ATOMIC_PAIR_OP
+#undef ATOMIC_PAIR_BINOP_LIST
+
+#define ATOMIC64_NARROW_OP(op, type) \
+ struct op##type##Operator : public Operator1<MachineType> { \
+ op##type##Operator() \
+ : Operator1<MachineType>( \
+ IrOpcode::k##op, Operator::kNoDeopt | Operator::kNoThrow, "#op", \
+ 3, 1, 1, 2, 1, 0, MachineType::type()) {} \
+ }; \
+ op##type##Operator k##op##type;
+#define ATOMIC_OP_LIST(type) \
+ ATOMIC64_NARROW_OP(Word64AtomicNarrowAdd, type) \
+ ATOMIC64_NARROW_OP(Word64AtomicNarrowSub, type) \
+ ATOMIC64_NARROW_OP(Word64AtomicNarrowAnd, type) \
+ ATOMIC64_NARROW_OP(Word64AtomicNarrowOr, type) \
+ ATOMIC64_NARROW_OP(Word64AtomicNarrowXor, type) \
+ ATOMIC64_NARROW_OP(Word64AtomicNarrowExchange, type)
+ ATOMIC_U32_TYPE_LIST(ATOMIC_OP_LIST)
+#undef ATOMIC_OP_LIST
+#undef ATOMIC64_NARROW_OP
+
+ struct Word32AtomicPairCompareExchangeOperator : public Operator {
+ Word32AtomicPairCompareExchangeOperator()
+ : Operator(IrOpcode::kWord32AtomicPairCompareExchange,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairCompareExchange", 6, 1, 1, 2, 1, 0) {}
+ };
+ Word32AtomicPairCompareExchangeOperator kWord32AtomicPairCompareExchange;
+
+#define ATOMIC_COMPARE_EXCHANGE(Type) \
+ struct Word64AtomicNarrowCompareExchange##Type##Operator \
+ : public Operator1<MachineType> { \
+ Word64AtomicNarrowCompareExchange##Type##Operator() \
+ : Operator1<MachineType>(IrOpcode::kWord64AtomicNarrowCompareExchange, \
+ Operator::kNoDeopt | Operator::kNoThrow, \
+ "Word64AtomicNarrowCompareExchange", 4, 1, 1, \
+ 2, 1, 0, MachineType::Type()) {} \
+ }; \
+ Word64AtomicNarrowCompareExchange##Type##Operator \
+ kWord64AtomicNarrowCompareExchange##Type;
+ ATOMIC_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
#undef ATOMIC_COMPARE_EXCHANGE
// The {BitcastWordToTagged} operator must not be marked as pure (especially
@@ -965,10 +1045,10 @@ const Operator* MachineOperatorBuilder::Word32AtomicStore(
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word32AtomicExchange(MachineType rep) {
-#define EXCHANGE(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord32AtomicExchange##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicExchange(MachineType type) {
+#define EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicExchange##kType; \
}
ATOMIC_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
@@ -976,85 +1056,73 @@ const Operator* MachineOperatorBuilder::Word32AtomicExchange(MachineType rep) {
}
const Operator* MachineOperatorBuilder::Word32AtomicCompareExchange(
- MachineType rep) {
-#define COMPARE_EXCHANGE(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord32AtomicCompareExchange##kRep; \
+ MachineType type) {
+#define COMPARE_EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicCompareExchange##kType; \
}
ATOMIC_TYPE_LIST(COMPARE_EXCHANGE)
#undef COMPARE_EXCHANGE
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word32AtomicAdd(MachineType rep) {
-#define ADD(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord32AtomicAdd##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicAdd(MachineType type) {
+#define ADD(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicAdd##kType; \
}
ATOMIC_TYPE_LIST(ADD)
#undef ADD
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word32AtomicSub(MachineType rep) {
-#define SUB(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord32AtomicSub##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicSub(MachineType type) {
+#define SUB(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicSub##kType; \
}
ATOMIC_TYPE_LIST(SUB)
#undef SUB
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word32AtomicAnd(MachineType rep) {
-#define AND(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord32AtomicAnd##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicAnd(MachineType type) {
+#define AND(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicAnd##kType; \
}
ATOMIC_TYPE_LIST(AND)
#undef AND
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word32AtomicOr(MachineType rep) {
-#define OR(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord32AtomicOr##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicOr(MachineType type) {
+#define OR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicOr##kType; \
}
ATOMIC_TYPE_LIST(OR)
#undef OR
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType rep) {
-#define XOR(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord32AtomicXor##kRep; \
+const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType type) {
+#define XOR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord32AtomicXor##kType; \
}
ATOMIC_TYPE_LIST(XOR)
#undef XOR
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() {
- return &cache_.kTaggedPoisonOnSpeculation;
-}
-
-const Operator* MachineOperatorBuilder::Word32PoisonOnSpeculation() {
- return &cache_.kWord32PoisonOnSpeculation;
-}
-
-const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() {
- return &cache_.kWord64PoisonOnSpeculation;
-}
-
const Operator* MachineOperatorBuilder::Word64AtomicLoad(
LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
return &cache_.kWord64AtomicLoad##Type; \
}
- ATOMIC64_TYPE_LIST(LOAD)
+ ATOMIC_U64_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
}
@@ -1070,77 +1138,201 @@ const Operator* MachineOperatorBuilder::Word64AtomicStore(
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word64AtomicAdd(MachineType rep) {
-#define ADD(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord64AtomicAdd##kRep; \
+const Operator* MachineOperatorBuilder::Word64AtomicAdd(MachineType type) {
+#define ADD(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicAdd##kType; \
}
- ATOMIC64_TYPE_LIST(ADD)
+ ATOMIC_U64_TYPE_LIST(ADD)
#undef ADD
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word64AtomicSub(MachineType rep) {
-#define SUB(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord64AtomicSub##kRep; \
+const Operator* MachineOperatorBuilder::Word64AtomicSub(MachineType type) {
+#define SUB(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicSub##kType; \
}
- ATOMIC64_TYPE_LIST(SUB)
+ ATOMIC_U64_TYPE_LIST(SUB)
#undef SUB
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word64AtomicAnd(MachineType rep) {
-#define AND(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord64AtomicAnd##kRep; \
+const Operator* MachineOperatorBuilder::Word64AtomicAnd(MachineType type) {
+#define AND(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicAnd##kType; \
}
- ATOMIC64_TYPE_LIST(AND)
+ ATOMIC_U64_TYPE_LIST(AND)
#undef AND
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word64AtomicOr(MachineType rep) {
-#define OR(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord64AtomicOr##kRep; \
+const Operator* MachineOperatorBuilder::Word64AtomicOr(MachineType type) {
+#define OR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicOr##kType; \
}
- ATOMIC64_TYPE_LIST(OR)
+ ATOMIC_U64_TYPE_LIST(OR)
#undef OR
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word64AtomicXor(MachineType rep) {
-#define XOR(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord64AtomicXor##kRep; \
+const Operator* MachineOperatorBuilder::Word64AtomicXor(MachineType type) {
+#define XOR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicXor##kType; \
}
- ATOMIC64_TYPE_LIST(XOR)
+ ATOMIC_U64_TYPE_LIST(XOR)
#undef XOR
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word64AtomicExchange(MachineType rep) {
-#define EXCHANGE(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord64AtomicExchange##kRep; \
+const Operator* MachineOperatorBuilder::Word64AtomicExchange(MachineType type) {
+#define EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicExchange##kType; \
}
- ATOMIC64_TYPE_LIST(EXCHANGE)
+ ATOMIC_U64_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicCompareExchange(
- MachineType rep) {
-#define COMPARE_EXCHANGE(kRep) \
- if (rep == MachineType::kRep()) { \
- return &cache_.kWord64AtomicCompareExchange##kRep; \
+ MachineType type) {
+#define COMPARE_EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicCompareExchange##kType; \
}
- ATOMIC64_TYPE_LIST(COMPARE_EXCHANGE)
+ ATOMIC_U64_TYPE_LIST(COMPARE_EXCHANGE)
#undef COMPARE_EXCHANGE
UNREACHABLE();
}
+const Operator* MachineOperatorBuilder::Word32AtomicPairLoad() {
+ return &cache_.kWord32AtomicPairLoad;
+}
+
+const Operator* MachineOperatorBuilder::Word32AtomicPairStore() {
+ return &cache_.kWord32AtomicPairStore;
+}
+
+const Operator* MachineOperatorBuilder::Word32AtomicPairAdd() {
+ return &cache_.kWord32AtomicPairAdd;
+}
+
+const Operator* MachineOperatorBuilder::Word32AtomicPairSub() {
+ return &cache_.kWord32AtomicPairSub;
+}
+
+const Operator* MachineOperatorBuilder::Word32AtomicPairAnd() {
+ return &cache_.kWord32AtomicPairAnd;
+}
+
+const Operator* MachineOperatorBuilder::Word32AtomicPairOr() {
+ return &cache_.kWord32AtomicPairOr;
+}
+
+const Operator* MachineOperatorBuilder::Word32AtomicPairXor() {
+ return &cache_.kWord32AtomicPairXor;
+}
+
+const Operator* MachineOperatorBuilder::Word32AtomicPairExchange() {
+ return &cache_.kWord32AtomicPairExchange;
+}
+
+const Operator* MachineOperatorBuilder::Word32AtomicPairCompareExchange() {
+ return &cache_.kWord32AtomicPairCompareExchange;
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicNarrowAdd(
+ MachineType type) {
+#define ADD(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicNarrowAdd##kType; \
+ }
+ ATOMIC_U32_TYPE_LIST(ADD)
+#undef ADD
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicNarrowSub(
+ MachineType type) {
+#define SUB(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicNarrowSub##kType; \
+ }
+ ATOMIC_U32_TYPE_LIST(SUB)
+#undef SUB
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicNarrowAnd(
+ MachineType type) {
+#define AND(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicNarrowAnd##kType; \
+ }
+ ATOMIC_U32_TYPE_LIST(AND)
+#undef AND
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicNarrowOr(MachineType type) {
+#define OR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicNarrowOr##kType; \
+ }
+ ATOMIC_U32_TYPE_LIST(OR)
+#undef OR
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicNarrowXor(
+ MachineType type) {
+#define XOR(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicNarrowXor##kType; \
+ }
+ ATOMIC_U32_TYPE_LIST(XOR)
+#undef XOR
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicNarrowExchange(
+ MachineType type) {
+#define EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicNarrowExchange##kType; \
+ }
+ ATOMIC_U32_TYPE_LIST(EXCHANGE)
+#undef EXCHANGE
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicNarrowCompareExchange(
+ MachineType type) {
+#define CMP_EXCHANGE(kType) \
+ if (type == MachineType::kType()) { \
+ return &cache_.kWord64AtomicNarrowCompareExchange##kType; \
+ }
+ ATOMIC_U32_TYPE_LIST(CMP_EXCHANGE)
+#undef CMP_EXCHANGE
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() {
+ return &cache_.kTaggedPoisonOnSpeculation;
+}
+
+const Operator* MachineOperatorBuilder::Word32PoisonOnSpeculation() {
+ return &cache_.kWord32PoisonOnSpeculation;
+}
+
+const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() {
+ return &cache_.kWord64PoisonOnSpeculation;
+}
+
const OptionalOperator MachineOperatorBuilder::SpeculationFence() {
return OptionalOperator(flags_ & kSpeculationFence,
&cache_.kSpeculationFence);
@@ -1203,7 +1395,8 @@ const Operator* MachineOperatorBuilder::S8x16Shuffle(
#undef MACHINE_TYPE_LIST
#undef MACHINE_REPRESENTATION_LIST
#undef ATOMIC_TYPE_LIST
-#undef ATOMIC64_TYPE_LIST
+#undef ATOMIC_U64_TYPE_LIST
+#undef ATOMIC_U32_TYPE_LIST
#undef ATOMIC_REPRESENTATION_LIST
#undef ATOMIC64_REPRESENTATION_LIST
#undef SIMD_LANE_OP_LIST
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 65217cf2a6..261891dcdc 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -110,7 +110,7 @@ V8_EXPORT_PRIVATE StackSlotRepresentation const& StackSlotRepresentationOf(
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op)
V8_WARN_UNUSED_RESULT;
-MachineType AtomicOpRepresentationOf(Operator const* op) V8_WARN_UNUSED_RESULT;
+MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT;
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
@@ -140,8 +140,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
kWord64Popcnt = 1u << 15,
kWord32ReverseBits = 1u << 16,
kWord64ReverseBits = 1u << 17,
- kWord32ReverseBytes = 1u << 18,
- kWord64ReverseBytes = 1u << 19,
kInt32AbsWithOverflow = 1u << 20,
kInt64AbsWithOverflow = 1u << 21,
kSpeculationFence = 1u << 22,
@@ -150,9 +148,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
kFloat64RoundUp | kFloat32RoundTruncate | kFloat64RoundTruncate |
kFloat64RoundTiesAway | kFloat32RoundTiesEven | kFloat64RoundTiesEven |
kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
- kWord32ReverseBits | kWord64ReverseBits | kWord32ReverseBytes |
- kWord64ReverseBytes | kInt32AbsWithOverflow | kInt64AbsWithOverflow |
- kSpeculationFence
+ kWord32ReverseBits | kWord64ReverseBits | kInt32AbsWithOverflow |
+ kInt64AbsWithOverflow | kSpeculationFence
};
typedef base::Flags<Flag, unsigned> Flags;
@@ -238,8 +235,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const OptionalOperator Word64Popcnt();
const OptionalOperator Word32ReverseBits();
const OptionalOperator Word64ReverseBits();
- const OptionalOperator Word32ReverseBytes();
- const OptionalOperator Word64ReverseBytes();
+ const Operator* Word32ReverseBytes();
+ const Operator* Word64ReverseBytes();
const OptionalOperator Int32AbsWithOverflow();
const OptionalOperator Int64AbsWithOverflow();
@@ -624,33 +621,66 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// atomic-store [base + index], value
const Operator* Word64AtomicStore(MachineRepresentation rep);
// atomic-exchange [base + index], value
- const Operator* Word32AtomicExchange(MachineType rep);
+ const Operator* Word32AtomicExchange(MachineType type);
// atomic-exchange [base + index], value
- const Operator* Word64AtomicExchange(MachineType rep);
+ const Operator* Word64AtomicExchange(MachineType type);
// atomic-compare-exchange [base + index], old_value, new_value
- const Operator* Word32AtomicCompareExchange(MachineType rep);
+ const Operator* Word32AtomicCompareExchange(MachineType type);
// atomic-compare-exchange [base + index], old_value, new_value
- const Operator* Word64AtomicCompareExchange(MachineType rep);
+ const Operator* Word64AtomicCompareExchange(MachineType type);
// atomic-add [base + index], value
- const Operator* Word32AtomicAdd(MachineType rep);
+ const Operator* Word32AtomicAdd(MachineType type);
// atomic-sub [base + index], value
- const Operator* Word32AtomicSub(MachineType rep);
+ const Operator* Word32AtomicSub(MachineType type);
// atomic-and [base + index], value
- const Operator* Word32AtomicAnd(MachineType rep);
+ const Operator* Word32AtomicAnd(MachineType type);
// atomic-or [base + index], value
- const Operator* Word32AtomicOr(MachineType rep);
+ const Operator* Word32AtomicOr(MachineType type);
// atomic-xor [base + index], value
const Operator* Word32AtomicXor(MachineType rep);
- // atomic-load [base + index]
+ // atomic-add [base + index], value
const Operator* Word64AtomicAdd(MachineType rep);
// atomic-sub [base + index], value
- const Operator* Word64AtomicSub(MachineType rep);
+ const Operator* Word64AtomicSub(MachineType type);
// atomic-and [base + index], value
- const Operator* Word64AtomicAnd(MachineType rep);
+ const Operator* Word64AtomicAnd(MachineType type);
// atomic-or [base + index], value
- const Operator* Word64AtomicOr(MachineType rep);
+ const Operator* Word64AtomicOr(MachineType type);
// atomic-xor [base + index], value
const Operator* Word64AtomicXor(MachineType rep);
+ // atomic-narrow-add [base + index], value
+ const Operator* Word64AtomicNarrowAdd(MachineType type);
+ // atomic-narow-sub [base + index], value
+ const Operator* Word64AtomicNarrowSub(MachineType type);
+ // atomic-narrow-and [base + index], value
+ const Operator* Word64AtomicNarrowAnd(MachineType type);
+ // atomic-narrow-or [base + index], value
+ const Operator* Word64AtomicNarrowOr(MachineType type);
+ // atomic-narrow-xor [base + index], value
+ const Operator* Word64AtomicNarrowXor(MachineType type);
+ // atomic-narrow-exchange [base + index], value
+ const Operator* Word64AtomicNarrowExchange(MachineType type);
+ // atomic-narrow-compare-exchange [base + index], old_value, new_value
+ const Operator* Word64AtomicNarrowCompareExchange(MachineType type);
+ // atomic-pair-load [base + index]
+ const Operator* Word32AtomicPairLoad();
+ // atomic-pair-sub [base + index], value_high, value-low
+ const Operator* Word32AtomicPairStore();
+ // atomic-pair-add [base + index], value_high, value_low
+ const Operator* Word32AtomicPairAdd();
+ // atomic-pair-sub [base + index], value_high, value-low
+ const Operator* Word32AtomicPairSub();
+ // atomic-pair-and [base + index], value_high, value_low
+ const Operator* Word32AtomicPairAnd();
+ // atomic-pair-or [base + index], value_high, value_low
+ const Operator* Word32AtomicPairOr();
+ // atomic-pair-xor [base + index], value_high, value_low
+ const Operator* Word32AtomicPairXor();
+ // atomic-pair-exchange [base + index], value_high, value_low
+ const Operator* Word32AtomicPairExchange();
+ // atomic-pair-compare-exchange [base + index], old_value_high, old_value_low,
+ // new_value_high, new_value_low
+ const Operator* Word32AtomicPairCompareExchange();
const OptionalOperator SpeculationFence();
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index e7ec150985..3ba3dcc6b8 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -101,8 +101,10 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
case IrOpcode::kIfException:
case IrOpcode::kLoad:
case IrOpcode::kProtectedLoad:
+ case IrOpcode::kUnalignedLoad:
case IrOpcode::kStore:
case IrOpcode::kProtectedStore:
+ case IrOpcode::kUnalignedStore:
case IrOpcode::kRetain:
case IrOpcode::kUnsafePointerAdd:
case IrOpcode::kDebugBreak:
diff --git a/deps/v8/src/compiler/mips/OWNERS b/deps/v8/src/compiler/mips/OWNERS
index 4ce9d7f91d..8bbcab4c2d 100644
--- a/deps/v8/src/compiler/mips/OWNERS
+++ b/deps/v8/src/compiler/mips/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com \ No newline at end of file
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com \ No newline at end of file
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index cfe132338c..66f38dc283 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -534,7 +534,8 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
void InstructionSelector::VisitWord32Sar(Node* node) {
Int32BinopMatcher m(node);
- if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
if (m.right().HasValue() && mleft.right().HasValue()) {
MipsOperandGenerator g(this);
@@ -1778,7 +1779,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -1817,7 +1818,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -1858,7 +1859,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = int8_op;
} else if (type == MachineType::Uint8()) {
@@ -2169,8 +2170,8 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
uint8_t offset;
MipsOperandGenerator g(this);
if (TryMatchConcat(shuffle, &offset)) {
- Emit(kMipsS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input0),
- g.UseRegister(input1), g.UseImmediate(offset));
+ Emit(kMipsS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
+ g.UseRegister(input0), g.UseImmediate(offset));
return;
}
if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
@@ -2215,9 +2216,7 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat32RoundDown |
MachineOperatorBuilder::kFloat32RoundUp |
MachineOperatorBuilder::kFloat32RoundTruncate |
- MachineOperatorBuilder::kFloat32RoundTiesEven |
- MachineOperatorBuilder::kWord32ReverseBytes |
- MachineOperatorBuilder::kWord64ReverseBytes;
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
}
// static
diff --git a/deps/v8/src/compiler/mips64/OWNERS b/deps/v8/src/compiler/mips64/OWNERS
index 4ce9d7f91d..8bbcab4c2d 100644
--- a/deps/v8/src/compiler/mips64/OWNERS
+++ b/deps/v8/src/compiler/mips64/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com \ No newline at end of file
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com \ No newline at end of file
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index ee02d30244..9f9aebc145 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -1927,10 +1927,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
bool IsNodeUnsigned(Node* n) {
NodeMatcher m(n);
- if (m.IsLoad()) {
- LoadRepresentation load_rep = LoadRepresentationOf(n->op());
- return load_rep.IsUnsigned();
- } else if (m.IsUnalignedLoad()) {
+ if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() ||
+ m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
LoadRepresentation load_rep = LoadRepresentationOf(n->op());
return load_rep.IsUnsigned();
} else {
@@ -2443,7 +2441,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2482,7 +2480,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2523,7 +2521,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = int8_op;
} else if (type == MachineType::Uint8()) {
@@ -2839,8 +2837,8 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
uint8_t offset;
Mips64OperandGenerator g(this);
if (TryMatchConcat(shuffle, &offset)) {
- Emit(kMips64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input0),
- g.UseRegister(input1), g.UseImmediate(offset));
+ Emit(kMips64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
+ g.UseRegister(input0), g.UseImmediate(offset));
return;
}
if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
@@ -2899,9 +2897,7 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesEven |
- MachineOperatorBuilder::kFloat32RoundTiesEven |
- MachineOperatorBuilder::kWord32ReverseBytes |
- MachineOperatorBuilder::kWord64ReverseBytes;
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
}
// static
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 4b02cad9b9..cbefb0ac35 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -195,7 +195,7 @@ struct HeapObjectMatcher final
return this->HasValue() && this->Value().address() == value.address();
}
- ObjectRef Ref(const JSHeapBroker* broker) const {
+ ObjectRef Ref(JSHeapBroker* broker) const {
return ObjectRef(broker, this->Value());
}
};
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 0d0e4f3c97..22cdd0b091 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -12,6 +12,7 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/verifier.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
#include "src/zone/zone-handle-set.h"
namespace v8 {
@@ -602,6 +603,7 @@ bool NodeProperties::CanBeNullOrUndefined(Isolate* isolate, Node* receiver,
case IrOpcode::kJSToLength:
case IrOpcode::kJSToName:
case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToNumberConvertBigInt:
case IrOpcode::kJSToNumeric:
case IrOpcode::kJSToString:
case IrOpcode::kToBoolean:
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 7a6b19cb35..d6ea247fbc 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -121,6 +121,7 @@
V(JSToLength) \
V(JSToName) \
V(JSToNumber) \
+ V(JSToNumberConvertBigInt) \
V(JSToNumeric) \
V(JSToObject) \
V(JSToString) \
@@ -152,6 +153,7 @@
V(JSCreateEmptyLiteralArray) \
V(JSCreateLiteralObject) \
V(JSCreateEmptyLiteralObject) \
+ V(JSCloneObject) \
V(JSCreateLiteralRegExp)
#define JS_OBJECT_OP_LIST(V) \
@@ -560,106 +562,125 @@
V(Float64Mod) \
V(Float64Pow)
-#define MACHINE_OP_LIST(V) \
- MACHINE_UNOP_32_LIST(V) \
- MACHINE_BINOP_32_LIST(V) \
- MACHINE_BINOP_64_LIST(V) \
- MACHINE_COMPARE_BINOP_LIST(V) \
- MACHINE_FLOAT32_BINOP_LIST(V) \
- MACHINE_FLOAT32_UNOP_LIST(V) \
- MACHINE_FLOAT64_BINOP_LIST(V) \
- MACHINE_FLOAT64_UNOP_LIST(V) \
- V(DebugAbort) \
- V(DebugBreak) \
- V(Comment) \
- V(Load) \
- V(PoisonedLoad) \
- V(Store) \
- V(StackSlot) \
- V(Word32Popcnt) \
- V(Word64Popcnt) \
- V(Word64Clz) \
- V(Word64Ctz) \
- V(Word64ReverseBits) \
- V(Word64ReverseBytes) \
- V(Int64AbsWithOverflow) \
- V(BitcastTaggedToWord) \
- V(BitcastWordToTagged) \
- V(BitcastWordToTaggedSigned) \
- V(TruncateFloat64ToWord32) \
- V(ChangeFloat32ToFloat64) \
- V(ChangeFloat64ToInt32) \
- V(ChangeFloat64ToUint32) \
- V(ChangeFloat64ToUint64) \
- V(Float64SilenceNaN) \
- V(TruncateFloat64ToUint32) \
- V(TruncateFloat32ToInt32) \
- V(TruncateFloat32ToUint32) \
- V(TryTruncateFloat32ToInt64) \
- V(TryTruncateFloat64ToInt64) \
- V(TryTruncateFloat32ToUint64) \
- V(TryTruncateFloat64ToUint64) \
- V(ChangeInt32ToFloat64) \
- V(ChangeInt32ToInt64) \
- V(ChangeUint32ToFloat64) \
- V(ChangeUint32ToUint64) \
- V(TruncateFloat64ToFloat32) \
- V(TruncateInt64ToInt32) \
- V(RoundFloat64ToInt32) \
- V(RoundInt32ToFloat32) \
- V(RoundInt64ToFloat32) \
- V(RoundInt64ToFloat64) \
- V(RoundUint32ToFloat32) \
- V(RoundUint64ToFloat32) \
- V(RoundUint64ToFloat64) \
- V(BitcastFloat32ToInt32) \
- V(BitcastFloat64ToInt64) \
- V(BitcastInt32ToFloat32) \
- V(BitcastInt64ToFloat64) \
- V(Float64ExtractLowWord32) \
- V(Float64ExtractHighWord32) \
- V(Float64InsertLowWord32) \
- V(Float64InsertHighWord32) \
- V(TaggedPoisonOnSpeculation) \
- V(Word32PoisonOnSpeculation) \
- V(Word64PoisonOnSpeculation) \
- V(LoadStackPointer) \
- V(LoadFramePointer) \
- V(LoadParentFramePointer) \
- V(UnalignedLoad) \
- V(UnalignedStore) \
- V(Int32PairAdd) \
- V(Int32PairSub) \
- V(Int32PairMul) \
- V(Word32PairShl) \
- V(Word32PairShr) \
- V(Word32PairSar) \
- V(ProtectedLoad) \
- V(ProtectedStore) \
- V(Word32AtomicLoad) \
- V(Word32AtomicStore) \
- V(Word32AtomicExchange) \
- V(Word32AtomicCompareExchange) \
- V(Word32AtomicAdd) \
- V(Word32AtomicSub) \
- V(Word32AtomicAnd) \
- V(Word32AtomicOr) \
- V(Word32AtomicXor) \
- V(Word64AtomicLoad) \
- V(Word64AtomicStore) \
- V(Word64AtomicAdd) \
- V(Word64AtomicSub) \
- V(Word64AtomicAnd) \
- V(Word64AtomicOr) \
- V(Word64AtomicXor) \
- V(Word64AtomicExchange) \
- V(Word64AtomicCompareExchange) \
- V(SpeculationFence) \
- V(SignExtendWord8ToInt32) \
- V(SignExtendWord16ToInt32) \
- V(SignExtendWord8ToInt64) \
- V(SignExtendWord16ToInt64) \
- V(SignExtendWord32ToInt64) \
+#define MACHINE_WORD64_ATOMIC_OP_LIST(V) \
+ V(Word64AtomicLoad) \
+ V(Word64AtomicStore) \
+ V(Word64AtomicAdd) \
+ V(Word64AtomicSub) \
+ V(Word64AtomicAnd) \
+ V(Word64AtomicOr) \
+ V(Word64AtomicXor) \
+ V(Word64AtomicExchange) \
+ V(Word64AtomicCompareExchange) \
+ V(Word64AtomicNarrowAdd) \
+ V(Word64AtomicNarrowSub) \
+ V(Word64AtomicNarrowAnd) \
+ V(Word64AtomicNarrowOr) \
+ V(Word64AtomicNarrowXor) \
+ V(Word64AtomicNarrowExchange) \
+ V(Word64AtomicNarrowCompareExchange)
+
+#define MACHINE_OP_LIST(V) \
+ MACHINE_UNOP_32_LIST(V) \
+ MACHINE_BINOP_32_LIST(V) \
+ MACHINE_BINOP_64_LIST(V) \
+ MACHINE_COMPARE_BINOP_LIST(V) \
+ MACHINE_FLOAT32_BINOP_LIST(V) \
+ MACHINE_FLOAT32_UNOP_LIST(V) \
+ MACHINE_FLOAT64_BINOP_LIST(V) \
+ MACHINE_FLOAT64_UNOP_LIST(V) \
+ MACHINE_WORD64_ATOMIC_OP_LIST(V) \
+ V(DebugAbort) \
+ V(DebugBreak) \
+ V(Comment) \
+ V(Load) \
+ V(PoisonedLoad) \
+ V(Store) \
+ V(StackSlot) \
+ V(Word32Popcnt) \
+ V(Word64Popcnt) \
+ V(Word64Clz) \
+ V(Word64Ctz) \
+ V(Word64ReverseBits) \
+ V(Word64ReverseBytes) \
+ V(Int64AbsWithOverflow) \
+ V(BitcastTaggedToWord) \
+ V(BitcastWordToTagged) \
+ V(BitcastWordToTaggedSigned) \
+ V(TruncateFloat64ToWord32) \
+ V(ChangeFloat32ToFloat64) \
+ V(ChangeFloat64ToInt32) \
+ V(ChangeFloat64ToUint32) \
+ V(ChangeFloat64ToUint64) \
+ V(Float64SilenceNaN) \
+ V(TruncateFloat64ToUint32) \
+ V(TruncateFloat32ToInt32) \
+ V(TruncateFloat32ToUint32) \
+ V(TryTruncateFloat32ToInt64) \
+ V(TryTruncateFloat64ToInt64) \
+ V(TryTruncateFloat32ToUint64) \
+ V(TryTruncateFloat64ToUint64) \
+ V(ChangeInt32ToFloat64) \
+ V(ChangeInt32ToInt64) \
+ V(ChangeUint32ToFloat64) \
+ V(ChangeUint32ToUint64) \
+ V(TruncateFloat64ToFloat32) \
+ V(TruncateInt64ToInt32) \
+ V(RoundFloat64ToInt32) \
+ V(RoundInt32ToFloat32) \
+ V(RoundInt64ToFloat32) \
+ V(RoundInt64ToFloat64) \
+ V(RoundUint32ToFloat32) \
+ V(RoundUint64ToFloat32) \
+ V(RoundUint64ToFloat64) \
+ V(BitcastFloat32ToInt32) \
+ V(BitcastFloat64ToInt64) \
+ V(BitcastInt32ToFloat32) \
+ V(BitcastInt64ToFloat64) \
+ V(Float64ExtractLowWord32) \
+ V(Float64ExtractHighWord32) \
+ V(Float64InsertLowWord32) \
+ V(Float64InsertHighWord32) \
+ V(TaggedPoisonOnSpeculation) \
+ V(Word32PoisonOnSpeculation) \
+ V(Word64PoisonOnSpeculation) \
+ V(LoadStackPointer) \
+ V(LoadFramePointer) \
+ V(LoadParentFramePointer) \
+ V(UnalignedLoad) \
+ V(UnalignedStore) \
+ V(Int32PairAdd) \
+ V(Int32PairSub) \
+ V(Int32PairMul) \
+ V(Word32PairShl) \
+ V(Word32PairShr) \
+ V(Word32PairSar) \
+ V(ProtectedLoad) \
+ V(ProtectedStore) \
+ V(Word32AtomicLoad) \
+ V(Word32AtomicStore) \
+ V(Word32AtomicExchange) \
+ V(Word32AtomicCompareExchange) \
+ V(Word32AtomicAdd) \
+ V(Word32AtomicSub) \
+ V(Word32AtomicAnd) \
+ V(Word32AtomicOr) \
+ V(Word32AtomicXor) \
+ V(Word32AtomicPairLoad) \
+ V(Word32AtomicPairStore) \
+ V(Word32AtomicPairAdd) \
+ V(Word32AtomicPairSub) \
+ V(Word32AtomicPairAnd) \
+ V(Word32AtomicPairOr) \
+ V(Word32AtomicPairXor) \
+ V(Word32AtomicPairExchange) \
+ V(Word32AtomicPairCompareExchange) \
+ V(SpeculationFence) \
+ V(SignExtendWord8ToInt32) \
+ V(SignExtendWord16ToInt32) \
+ V(SignExtendWord8ToInt64) \
+ V(SignExtendWord16ToInt64) \
+ V(SignExtendWord32ToInt64) \
V(UnsafePointerAdd)
#define MACHINE_SIMD_OP_LIST(V) \
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index a9ae8c322a..67a7b138a5 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -16,8 +16,8 @@ namespace v8 {
namespace internal {
namespace compiler {
-OperationTyper::OperationTyper(Isolate* isolate,
- const JSHeapBroker* js_heap_broker, Zone* zone)
+OperationTyper::OperationTyper(Isolate* isolate, JSHeapBroker* js_heap_broker,
+ Zone* zone)
: zone_(zone), cache_(TypeCache::Get()) {
Factory* factory = isolate->factory();
infinity_ =
@@ -265,7 +265,9 @@ Type OperationTyper::ConvertReceiver(Type type) {
return type;
}
-Type OperationTyper::ToNumberOrNumeric(Object::Conversion mode, Type type) {
+// Returns the result type of converting {type} to number, if the
+// result does not depend on conversion options.
+base::Optional<Type> OperationTyper::ToNumberCommon(Type type) {
if (type.Is(Type::Number())) return type;
if (type.Is(Type::NullOrUndefined())) {
if (type.Is(Type::Null())) return cache_.kSingletonZero;
@@ -289,6 +291,13 @@ Type OperationTyper::ToNumberOrNumeric(Object::Conversion mode, Type type) {
}
return Type::Intersect(type, Type::Number(), zone());
}
+ return base::Optional<Type>();
+}
+
+Type OperationTyper::ToNumberOrNumeric(Object::Conversion mode, Type type) {
+ if (base::Optional<Type> maybe_result_type = ToNumberCommon(type)) {
+ return *maybe_result_type;
+ }
if (type.Is(Type::BigInt())) {
return mode == Object::Conversion::kToNumber ? Type::None() : type;
}
@@ -300,6 +309,13 @@ Type OperationTyper::ToNumber(Type type) {
return ToNumberOrNumeric(Object::Conversion::kToNumber, type);
}
+Type OperationTyper::ToNumberConvertBigInt(Type type) {
+ if (base::Optional<Type> maybe_result_type = ToNumberCommon(type)) {
+ return *maybe_result_type;
+ }
+ return Type::Number();
+}
+
Type OperationTyper::ToNumeric(Type type) {
return ToNumberOrNumeric(Object::Conversion::kToNumeric, type);
}
diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h
index 81f20bcda4..fb5997485c 100644
--- a/deps/v8/src/compiler/operation-typer.h
+++ b/deps/v8/src/compiler/operation-typer.h
@@ -27,14 +27,14 @@ class TypeCache;
class V8_EXPORT_PRIVATE OperationTyper {
public:
- OperationTyper(Isolate* isolate, const JSHeapBroker* js_heap_broker,
- Zone* zone);
+ OperationTyper(Isolate* isolate, JSHeapBroker* js_heap_broker, Zone* zone);
// Typing Phi.
Type Merge(Type left, Type right);
Type ToPrimitive(Type type);
Type ToNumber(Type type);
+ Type ToNumberConvertBigInt(Type type);
Type ToNumeric(Type type);
Type ToBoolean(Type type);
@@ -78,6 +78,7 @@ class V8_EXPORT_PRIVATE OperationTyper {
typedef base::Flags<ComparisonOutcomeFlags> ComparisonOutcome;
Type ToNumberOrNumeric(Object::Conversion mode, Type type);
+ base::Optional<Type> ToNumberCommon(Type type);
ComparisonOutcome Invert(ComparisonOutcome);
Type Invert(Type);
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 689561059c..a5d16053d2 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -76,6 +76,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSCreateLiteralObject:
case IrOpcode::kJSCreateLiteralRegExp:
case IrOpcode::kJSCreateObject:
+ case IrOpcode::kJSCloneObject:
// Property access operations
case IrOpcode::kJSLoadNamed:
@@ -93,6 +94,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSToLength:
case IrOpcode::kJSToName:
case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToNumberConvertBigInt:
case IrOpcode::kJSToNumeric:
case IrOpcode::kJSToObject:
case IrOpcode::kJSToString:
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 3366d1db94..5717c70348 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -40,6 +40,7 @@
#include "src/compiler/js-create-lowering.h"
#include "src/compiler/js-generic-lowering.h"
#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/js-heap-copy-reducer.h"
#include "src/compiler/js-inlining-heuristic.h"
#include "src/compiler/js-intrinsic-lowering.h"
#include "src/compiler/js-native-context-specialization.h"
@@ -83,6 +84,7 @@
#include "src/register-configuration.h"
#include "src/utils.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/wasm-engine.h"
namespace v8 {
namespace internal {
@@ -135,7 +137,7 @@ class PipelineData {
javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
jsgraph_ = new (graph_zone_)
JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
- js_heap_broker_ = new (codegen_zone_) JSHeapBroker(isolate_);
+ js_heap_broker_ = new (codegen_zone_) JSHeapBroker(isolate_, codegen_zone_);
dependencies_ =
new (codegen_zone_) CompilationDependencies(isolate_, codegen_zone_);
}
@@ -146,7 +148,6 @@ class PipelineData {
PipelineStatistics* pipeline_statistics,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- WasmCompilationData* wasm_compilation_data,
int wasm_function_index,
const AssemblerOptions& assembler_options)
: isolate_(nullptr),
@@ -171,7 +172,6 @@ class PipelineData {
codegen_zone_(codegen_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
- wasm_compilation_data_(wasm_compilation_data),
assembler_options_(assembler_options) {}
// For machine graph testing entry point.
@@ -301,6 +301,10 @@ class PipelineData {
return jump_optimization_info_;
}
+ const AssemblerOptions& assembler_options() const {
+ return assembler_options_;
+ }
+
CodeTracer* GetCodeTracer() const {
return wasm_engine_ == nullptr ? isolate_->GetCodeTracer()
: wasm_engine_->GetCodeTracer();
@@ -393,8 +397,8 @@ class PipelineData {
code_generator_ = new CodeGenerator(
codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
osr_helper_, start_source_position_, jump_optimization_info_,
- wasm_compilation_data_, info()->GetPoisoningMitigationLevel(),
- assembler_options_, info_->builtin_index());
+ info()->GetPoisoningMitigationLevel(), assembler_options_,
+ info_->builtin_index());
}
void BeginPhaseKind(const char* phase_kind_name) {
@@ -411,10 +415,6 @@ class PipelineData {
const char* debug_name() const { return debug_name_.get(); }
- WasmCompilationData* wasm_compilation_data() const {
- return wasm_compilation_data_;
- }
-
int wasm_function_index() const { return wasm_function_index_; }
private:
@@ -478,8 +478,6 @@ class PipelineData {
// Source position output for --trace-turbo.
std::string source_position_output_;
- WasmCompilationData* wasm_compilation_data_ = nullptr;
-
JumpOptimizationInfo* jump_optimization_info_ = nullptr;
AssemblerOptions assembler_options_;
@@ -527,7 +525,9 @@ class PipelineImpl final {
OptimizedCompilationInfo* info() const;
Isolate* isolate() const;
+ CodeGenerator* code_generator() const;
+ private:
PipelineData* const data_;
};
@@ -997,7 +997,6 @@ class PipelineWasmCompilationJob final : public OptimizedCompilationJob {
OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
MachineGraph* mcgraph, CallDescriptor* call_descriptor,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- WasmCompilationData* wasm_compilation_data,
wasm::FunctionBody function_body, wasm::WasmModule* wasm_module,
wasm::NativeModule* native_module, int function_index, bool asmjs_origin)
: OptimizedCompilationJob(kNoStackLimit, info, "TurboFan",
@@ -1007,7 +1006,7 @@ class PipelineWasmCompilationJob final : public OptimizedCompilationJob {
wasm_engine, function_body, wasm_module, info, &zone_stats_)),
data_(&zone_stats_, wasm_engine, info, mcgraph,
pipeline_statistics_.get(), source_positions, node_origins,
- wasm_compilation_data, function_index, WasmAssemblerOptions()),
+ function_index, WasmAssemblerOptions()),
pipeline_(&data_),
linkage_(call_descriptor),
native_module_(native_module),
@@ -1074,7 +1073,7 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
if (!pipeline_.SelectInstructions(&linkage_)) return FAILED;
pipeline_.AssembleCode(&linkage_);
- CodeGenerator* code_generator = pipeline_.data_->code_generator();
+ CodeGenerator* code_generator = pipeline_.code_generator();
CodeDesc code_desc;
code_generator->tasm()->GetCode(nullptr, &code_desc);
@@ -1083,7 +1082,7 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
code_generator->frame()->GetTotalFrameSlotCount(),
code_generator->GetSafepointTableOffset(),
code_generator->GetHandlerTableOffset(),
- data_.wasm_compilation_data()->GetProtectedInstructions(),
+ code_generator->GetProtectedInstructions(),
code_generator->GetSourcePositionTable(), wasm::WasmCode::kTurbofan);
if (data_.info()->trace_turbo_json_enabled()) {
@@ -1279,6 +1278,21 @@ struct UntyperPhase {
}
};
+struct CopyMetadataForConcurrentCompilePhase {
+ static const char* phase_name() {
+ return "copy metadata for concurrent compile";
+ }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ GraphReducer graph_reducer(temp_zone, data->graph(),
+ data->jsgraph()->Dead());
+ JSHeapCopyReducer heap_copy_reducer(data->js_heap_broker());
+ AddReducer(data, &graph_reducer, &heap_copy_reducer);
+ graph_reducer.ReduceGraph();
+ data->js_heap_broker()->StopSerializing();
+ }
+};
+
struct TypedLoweringPhase {
static const char* phase_name() { return "typed lowering"; }
@@ -2015,15 +2029,20 @@ bool PipelineImpl::CreateGraph() {
Run<TyperPhase>(&typer);
RunPrintAndVerify(TyperPhase::phase_name());
+ // Do some hacky things to prepare for the optimization phase.
+ // (caching handles, etc.).
+ Run<ConcurrentOptimizationPrepPhase>();
+
+ if (FLAG_concurrent_compiler_frontend) {
+ data->js_heap_broker()->SerializeStandardObjects();
+ Run<CopyMetadataForConcurrentCompilePhase>();
+ }
+
// Lower JSOperators where we can determine types.
Run<TypedLoweringPhase>();
RunPrintAndVerify(TypedLoweringPhase::phase_name());
}
- // Do some hacky things to prepare for the optimization phase.
- // (caching handles, etc.).
- Run<ConcurrentOptimizationPrepPhase>();
-
data->EndPhaseKind();
return true;
@@ -2254,14 +2273,13 @@ OptimizedCompilationJob* Pipeline::NewWasmCompilationJob(
OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
MachineGraph* mcgraph, CallDescriptor* call_descriptor,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- WasmCompilationData* wasm_compilation_data,
wasm::FunctionBody function_body, wasm::WasmModule* wasm_module,
wasm::NativeModule* native_module, int function_index,
wasm::ModuleOrigin asmjs_origin) {
return new PipelineWasmCompilationJob(
info, wasm_engine, mcgraph, call_descriptor, source_positions,
- node_origins, wasm_compilation_data, function_body, wasm_module,
- native_module, function_index, asmjs_origin);
+ node_origins, function_body, wasm_module, native_module, function_index,
+ asmjs_origin);
}
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
@@ -2271,8 +2289,8 @@ bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
Code::STUB);
ZoneStats zone_stats(sequence->isolate()->allocator());
PipelineData data(&zone_stats, &info, sequence->isolate(), sequence);
+ data.InitializeFrameData(nullptr);
PipelineImpl pipeline(&data);
- pipeline.data_->InitializeFrameData(nullptr);
pipeline.AllocateRegisters(config, nullptr, run_verifier);
return !data.compilation_failed();
}
@@ -2378,6 +2396,18 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
PoisoningMitigationLevel::kDontPoison) {
AllocateRegisters(RegisterConfiguration::Poisoning(), call_descriptor,
run_verifier);
+#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
+ } else if (data_->assembler_options().isolate_independent_code) {
+ // TODO(v8:6666): Extend support to user code. Ensure that
+ // it is mutually exclusive with the Poisoning configuration above; and that
+ // it cooperates with restricted allocatable registers above.
+ static_assert(kRootRegister == kSpeculationPoisonRegister,
+ "The following checks assume root equals poison register");
+ CHECK_IMPLIES(FLAG_embedded_builtins, !FLAG_branch_load_poisoning);
+ CHECK_IMPLIES(FLAG_embedded_builtins, !FLAG_untrusted_code_mitigations);
+ AllocateRegisters(RegisterConfiguration::PreserveRootIA32(),
+ call_descriptor, run_verifier);
+#endif // V8_TARGET_ARCH_IA32
} else {
AllocateRegisters(RegisterConfiguration::Default(), call_descriptor,
run_verifier);
@@ -2642,6 +2672,10 @@ OptimizedCompilationInfo* PipelineImpl::info() const { return data_->info(); }
Isolate* PipelineImpl::isolate() const { return data_->isolate(); }
+CodeGenerator* PipelineImpl::code_generator() const {
+ return data_->code_generator();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 95d13f3169..5e4ae8671b 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -37,7 +37,6 @@ class MachineGraph;
class NodeOriginTable;
class Schedule;
class SourcePositionTable;
-class WasmCompilationData;
class Pipeline : public AllStatic {
public:
@@ -51,7 +50,6 @@ class Pipeline : public AllStatic {
OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
MachineGraph* mcgraph, CallDescriptor* call_descriptor,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- WasmCompilationData* wasm_compilation_data,
wasm::FunctionBody function_body, wasm::WasmModule* wasm_module,
wasm::NativeModule* native_module, int function_index,
wasm::ModuleOrigin wasm_origin);
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 9c71d65d9c..45cd95a9e0 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -13,6 +13,7 @@
#include "src/double.h"
#include "src/optimized-compilation-info.h"
#include "src/ppc/macro-assembler-ppc.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -881,11 +882,7 @@ void CodeGenerator::BailoutIfDeoptimized() {
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
Register scratch = kScratchReg;
- Label current_pc;
- __ mov_label_addr(scratch, &current_pc);
-
- __ bind(&current_pc);
- __ subi(scratch, scratch, Operand(__ pc_offset()));
+ __ ComputeCodeStartAddress(scratch);
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
@@ -1834,7 +1831,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
} else {
__ li(i.OutputRegister(1), Operand::Zero());
- __ bc(v8::internal::Assembler::kInstrSize * 2, BT, crbit);
+ __ bc(v8::internal::kInstrSize * 2, BT, crbit);
__ li(i.OutputRegister(1), Operand(1));
}
}
@@ -1861,7 +1858,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
} else {
__ li(i.OutputRegister(1), Operand::Zero());
- __ bc(v8::internal::Assembler::kInstrSize * 2, BT, crbit);
+ __ bc(v8::internal::kInstrSize * 2, BT, crbit);
__ li(i.OutputRegister(1), Operand(1));
}
}
@@ -2057,6 +2054,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Xor, xor_)
#undef ATOMIC_BINOP_CASE
+ case kPPC_ByteRev32: {
+ Register input = i.InputRegister(0);
+ Register output = i.OutputRegister();
+ Register temp1 = r0;
+ __ rotlwi(temp1, input, 8);
+ __ rlwimi(temp1, input, 24, 0, 7);
+ __ rlwimi(temp1, input, 24, 16, 23);
+ __ extsw(output, temp1);
+ break;
+ }
+#ifdef V8_TARGET_ARCH_PPC64
+ case kPPC_ByteRev64: {
+ Register input = i.InputRegister(0);
+ Register output = i.OutputRegister();
+ Register temp1 = r0;
+ Register temp2 = kScratchReg;
+ Register temp3 = i.TempRegister(0);
+ __ rldicl(temp1, input, 32, 32);
+ __ rotlwi(temp2, input, 8);
+ __ rlwimi(temp2, input, 24, 0, 7);
+ __ rotlwi(temp3, temp1, 8);
+ __ rlwimi(temp2, input, 24, 16, 23);
+ __ rlwimi(temp3, temp1, 24, 0, 7);
+ __ rlwimi(temp3, temp1, 24, 16, 23);
+ __ rldicr(temp2, temp2, 32, 31);
+ __ orx(output, temp2, temp3);
+ break;
+ }
+#endif // V8_TARGET_ARCH_PPC64
default:
UNREACHABLE();
break;
diff --git a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
index f68ab3ae68..3f3270028c 100644
--- a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
@@ -124,7 +124,9 @@ namespace compiler {
V(PPC_StoreWord32) \
V(PPC_StoreWord64) \
V(PPC_StoreFloat32) \
- V(PPC_StoreDouble)
+ V(PPC_StoreDouble) \
+ V(PPC_ByteRev32) \
+ V(PPC_ByteRev64)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
index 2b491f1b80..51c92e8e84 100644
--- a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -109,6 +109,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_BitcastFloat32ToInt32:
case kPPC_BitcastInt64ToDouble:
case kPPC_BitcastDoubleToInt64:
+ case kPPC_ByteRev32:
+ case kPPC_ByteRev64:
return kNoOpcodeFlags;
case kPPC_LoadWordS8:
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index b02e80d389..6cb98c4f95 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -937,9 +937,18 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
#endif
-void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
+ PPCOperandGenerator g(this);
+ InstructionOperand temp[] = {g.TempRegister()};
+ Emit(kPPC_ByteRev64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), 1, temp);
+}
-void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_ByteRev32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
@@ -1977,7 +1986,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2012,7 +2021,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
@@ -2053,7 +2062,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index d319304df6..42b28cfa76 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -192,8 +192,7 @@ Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
// here, once we have the immutable bit in the access_info.
// TODO(turbofan): Given that we already have the field_index here, we
- // might be smarter in the future and not rely on the LookupIterator,
- // but for now let's just do what Crankshaft does.
+ // might be smarter in the future and not rely on the LookupIterator.
LookupIterator it(isolate(), m.Value(), name,
LookupIterator::OWN_SKIP_INTERCEPTOR);
if (it.state() == LookupIterator::DATA) {
diff --git a/deps/v8/src/compiler/property-access-builder.h b/deps/v8/src/compiler/property-access-builder.h
index 7b569a9a12..6a073be65d 100644
--- a/deps/v8/src/compiler/property-access-builder.h
+++ b/deps/v8/src/compiler/property-access-builder.h
@@ -26,7 +26,7 @@ class SimplifiedOperatorBuilder;
class PropertyAccessBuilder {
public:
- PropertyAccessBuilder(JSGraph* jsgraph, const JSHeapBroker* js_heap_broker,
+ PropertyAccessBuilder(JSGraph* jsgraph, JSHeapBroker* js_heap_broker,
CompilationDependencies* dependencies)
: jsgraph_(jsgraph),
js_heap_broker_(js_heap_broker),
@@ -54,7 +54,7 @@ class PropertyAccessBuilder {
private:
JSGraph* jsgraph() const { return jsgraph_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
CompilationDependencies* dependencies() const { return dependencies_; }
Graph* graph() const;
Isolate* isolate() const;
@@ -69,7 +69,7 @@ class PropertyAccessBuilder {
Node* ResolveHolder(PropertyAccessInfo const& access_info, Node* receiver);
JSGraph* jsgraph_;
- const JSHeapBroker* js_heap_broker_;
+ JSHeapBroker* js_heap_broker_;
CompilationDependencies* dependencies_;
};
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index ff8bebc411..304d0e4ff1 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -229,7 +229,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
Node* WordNot(Node* a) {
if (machine()->Is32()) {
- return Word32Not(a);
+ return Word32BitwiseNot(a);
} else {
return Word64Not(a);
}
@@ -263,7 +263,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* Word32NotEqual(Node* a, Node* b) {
return Word32BinaryNot(Word32Equal(a, b));
}
- Node* Word32Not(Node* a) { return Word32Xor(a, Int32Constant(-1)); }
+ Node* Word32BitwiseNot(Node* a) { return Word32Xor(a, Int32Constant(-1)); }
Node* Word32BinaryNot(Node* a) { return Word32Equal(a, Int32Constant(0)); }
Node* Word64And(Node* a, Node* b) {
@@ -711,10 +711,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return AddNode(machine()->Float64RoundTiesEven().op(), a);
}
Node* Word32ReverseBytes(Node* a) {
- return AddNode(machine()->Word32ReverseBytes().op(), a);
+ return AddNode(machine()->Word32ReverseBytes(), a);
}
Node* Word64ReverseBytes(Node* a) {
- return AddNode(machine()->Word64ReverseBytes().op(), a);
+ return AddNode(machine()->Word64ReverseBytes(), a);
}
// Float64 bit operations.
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index 01c80e6954..ab9bd16e81 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -12,6 +12,7 @@
#include "src/compiler/osr.h"
#include "src/optimized-compilation-info.h"
#include "src/s390/macro-assembler-s390.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index 340cbb65c1..8174551777 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -329,12 +329,12 @@ ArchOpcode SelectLoadOpcode(Node* node) {
/* Float unary op*/ \
V(BitcastFloat32ToInt32) \
/* V(TruncateFloat64ToWord32) */ \
- /* V(RoundFloat64ToInt32) */ \
- /* V(TruncateFloat32ToInt32) */ \
- /* V(TruncateFloat32ToUint32) */ \
- /* V(TruncateFloat64ToUint32) */ \
- /* V(ChangeFloat64ToInt32) */ \
- /* V(ChangeFloat64ToUint32) */ \
+ V(RoundFloat64ToInt32) \
+ V(TruncateFloat32ToInt32) \
+ V(TruncateFloat32ToUint32) \
+ V(TruncateFloat64ToUint32) \
+ V(ChangeFloat64ToInt32) \
+ V(ChangeFloat64ToUint32) \
/* Word32 unary op */ \
V(Word32Clz) \
V(Word32Popcnt) \
@@ -2256,7 +2256,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
@@ -2291,7 +2291,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
@@ -2339,7 +2339,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
@@ -2609,8 +2609,6 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kWord32Popcnt |
- MachineOperatorBuilder::kWord32ReverseBytes |
- MachineOperatorBuilder::kWord64ReverseBytes |
MachineOperatorBuilder::kInt32AbsWithOverflow |
MachineOperatorBuilder::kInt64AbsWithOverflow |
MachineOperatorBuilder::kWord64Popcnt;
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 645f47f706..882e3b9d6e 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -484,16 +484,9 @@ void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType input_rep_type,
}
} else {
for (int i = 0; i < num_lanes / 2; ++i) {
-#if defined(V8_TARGET_BIG_ENDIAN)
- rep_node[i] =
- graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]);
- rep_node[i + num_lanes / 2] =
- graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]);
-#else
rep_node[i] = graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]);
rep_node[i + num_lanes / 2] =
graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]);
-#endif
}
}
ReplaceNode(node, rep_node, num_lanes);
@@ -554,21 +547,12 @@ void SimdScalarLowering::LowerBinaryOpForSmallInt(Node* node,
}
} else {
for (int i = 0; i < num_lanes / 2; ++i) {
-#if defined(V8_TARGET_BIG_ENDIAN)
- rep_node[i] = FixUpperBits(
- graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]),
- shift_val);
- rep_node[i + num_lanes / 2] = FixUpperBits(
- graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]),
- shift_val);
-#else
rep_node[i] = FixUpperBits(
graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]),
shift_val);
rep_node[i + num_lanes / 2] = FixUpperBits(
graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]),
shift_val);
-#endif
}
}
ReplaceNode(node, rep_node, num_lanes);
@@ -804,17 +788,10 @@ void SimdScalarLowering::LowerPack(Node* node, SimdType input_rep_type,
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
Node* input = nullptr;
-#if defined(V8_TARGET_BIG_ENDIAN)
- if (i < num_lanes / 2)
- input = rep_right[i];
- else
- input = rep_left[i - num_lanes / 2];
-#else
if (i < num_lanes / 2)
input = rep_left[i];
else
input = rep_right[i - num_lanes / 2];
-#endif
if (is_signed) {
Diamond d_min(graph(), common(), graph()->NewNode(less_op, input, min));
input = d_min.Phi(phi_rep, min, input);
@@ -1366,12 +1343,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
Node** rep_node = zone()->NewArray<Node*>(16);
for (int i = 0; i < 16; i++) {
int lane = shuffle[i];
-#if defined(V8_TARGET_BIG_ENDIAN)
- rep_node[15 - i] =
- lane < 16 ? rep_left[15 - lane] : rep_right[31 - lane];
-#else
rep_node[i] = lane < 16 ? rep_left[lane] : rep_right[lane - 16];
-#endif
}
ReplaceNode(node, rep_node, 16);
break;
@@ -1487,6 +1459,59 @@ void SimdScalarLowering::Float32ToInt32(Node** replacements, Node** result) {
}
}
+template <typename T>
+void SimdScalarLowering::Int32ToSmallerInt(Node** replacements, Node** result) {
+ const int num_ints = sizeof(int32_t) / sizeof(T);
+ const int bit_size = sizeof(T) * 8;
+ const Operator* sign_extend;
+ switch (sizeof(T)) {
+ case 1:
+ sign_extend = machine()->SignExtendWord8ToInt32();
+ break;
+ case 2:
+ sign_extend = machine()->SignExtendWord16ToInt32();
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ for (int i = 0; i < kNumLanes32; i++) {
+ if (replacements[i] != nullptr) {
+ for (int j = 0; j < num_ints; j++) {
+ result[num_ints * i + j] = graph()->NewNode(
+ sign_extend,
+ graph()->NewNode(machine()->Word32Sar(), replacements[i],
+ mcgraph_->Int32Constant(j * bit_size)));
+ }
+ } else {
+ for (int j = 0; j < num_ints; j++) {
+ result[num_ints * i + j] = nullptr;
+ }
+ }
+ }
+}
+
+template <typename T>
+void SimdScalarLowering::SmallerIntToInt32(Node** replacements, Node** result) {
+ const int num_ints = sizeof(int32_t) / sizeof(T);
+ const int bit_size = sizeof(T) * 8;
+ const int bit_mask = (1 << bit_size) - 1;
+
+ for (int i = 0; i < kNumLanes32; ++i) {
+ result[i] = mcgraph_->Int32Constant(0);
+ for (int j = 0; j < num_ints; j++) {
+ if (replacements[num_ints * i + j] != nullptr) {
+ Node* clean_bits = graph()->NewNode(machine()->Word32And(),
+ replacements[num_ints * i + j],
+ mcgraph_->Int32Constant(bit_mask));
+ Node* shift = graph()->NewNode(machine()->Word32Shl(), clean_bits,
+ mcgraph_->Int32Constant(j * bit_size));
+ result[i] = graph()->NewNode(machine()->Word32Or(), result[i], shift);
+ }
+ }
+ }
+}
+
Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
Node** replacements = GetReplacements(node);
if (ReplacementType(node) == type) {
@@ -1498,7 +1523,9 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
if (ReplacementType(node) == SimdType::kFloat32x4) {
Float32ToInt32(replacements, result);
} else if (ReplacementType(node) == SimdType::kInt16x8) {
- UNIMPLEMENTED();
+ SmallerIntToInt32<int16_t>(replacements, result);
+ } else if (ReplacementType(node) == SimdType::kInt8x16) {
+ SmallerIntToInt32<int8_t>(replacements, result);
} else {
UNREACHABLE();
}
@@ -1511,12 +1538,19 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
UNREACHABLE();
}
} else if (type == SimdType::kInt16x8) {
- if (ReplacementType(node) == SimdType::kInt32x4 ||
- ReplacementType(node) == SimdType::kFloat32x4) {
+ if (ReplacementType(node) == SimdType::kInt32x4) {
+ Int32ToSmallerInt<int16_t>(replacements, result);
+ } else if (ReplacementType(node) == SimdType::kFloat32x4) {
UNIMPLEMENTED();
} else {
UNREACHABLE();
}
+ } else if (type == SimdType::kInt8x16) {
+ if (ReplacementType(node) == SimdType::kInt32x4) {
+ Int32ToSmallerInt<int8_t>(replacements, result);
+ } else {
+ UNIMPLEMENTED();
+ }
} else {
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index 9bb6e79cbe..01ea195bdc 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -68,6 +68,10 @@ class SimdScalarLowering {
int ReplacementCount(Node* node);
void Float32ToInt32(Node** replacements, Node** result);
void Int32ToFloat32(Node** replacements, Node** result);
+ template <typename T>
+ void Int32ToSmallerInt(Node** replacements, Node** result);
+ template <typename T>
+ void SmallerIntToInt32(Node** replacements, Node** result);
Node** GetReplacementsWithType(Node* node, SimdType type);
SimdType ReplacementType(Node* node);
void PreparePhiReplacement(Node* phi);
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 74bb7fcd6b..2d82fc99bc 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -286,7 +286,7 @@ class RepresentationSelector {
bool weakened_ = false;
};
- RepresentationSelector(JSGraph* jsgraph, const JSHeapBroker* js_heap_broker,
+ RepresentationSelector(JSGraph* jsgraph, JSHeapBroker* js_heap_broker,
Zone* zone, RepresentationChanger* changer,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins)
@@ -1563,6 +1563,7 @@ class RepresentationSelector {
return;
}
case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToNumberConvertBigInt:
case IrOpcode::kJSToNumeric: {
VisitInputs(node);
// TODO(bmeurer): Optimize somewhat based on input type?
@@ -2972,7 +2973,8 @@ class RepresentationSelector {
if (input_type.Is(Type::Number())) {
VisitNoop(node, truncation);
} else {
- CheckFloat64HoleMode mode = CheckFloat64HoleModeOf(node->op());
+ CheckFloat64HoleMode mode =
+ CheckFloat64HoleParametersOf(node->op()).mode();
switch (mode) {
case CheckFloat64HoleMode::kAllowReturnHole:
if (truncation.IsUnused()) return VisitUnused(node);
@@ -3274,8 +3276,7 @@ class RepresentationSelector {
};
SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker,
- Zone* zone,
+ JSHeapBroker* js_heap_broker, Zone* zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
PoisoningMitigationLevel poisoning_level)
@@ -3297,6 +3298,7 @@ void SimplifiedLowering::LowerAllNodes() {
void SimplifiedLowering::DoJSToNumberOrNumericTruncatesToFloat64(
Node* node, RepresentationSelector* selector) {
DCHECK(node->opcode() == IrOpcode::kJSToNumber ||
+ node->opcode() == IrOpcode::kJSToNumberConvertBigInt ||
node->opcode() == IrOpcode::kJSToNumeric);
Node* value = node->InputAt(0);
Node* context = node->InputAt(1);
@@ -3320,11 +3322,17 @@ void SimplifiedLowering::DoJSToNumberOrNumericTruncatesToFloat64(
Node* efalse0 = effect;
Node* vfalse0;
{
- Operator const* op = node->opcode() == IrOpcode::kJSToNumber
- ? ToNumberOperator()
- : ToNumericOperator();
- Node* code = node->opcode() == IrOpcode::kJSToNumber ? ToNumberCode()
- : ToNumericCode();
+ Operator const* op =
+ node->opcode() == IrOpcode::kJSToNumber
+ ? (node->opcode() == IrOpcode::kJSToNumberConvertBigInt
+ ? ToNumberConvertBigIntOperator()
+ : ToNumberOperator())
+ : ToNumericOperator();
+ Node* code = node->opcode() == IrOpcode::kJSToNumber
+ ? ToNumberCode()
+ : (node->opcode() == IrOpcode::kJSToNumberConvertBigInt
+ ? ToNumberConvertBigIntCode()
+ : ToNumericCode());
vfalse0 = efalse0 = if_false0 = graph()->NewNode(
op, code, value, context, frame_state, efalse0, if_false0);
@@ -3392,6 +3400,7 @@ void SimplifiedLowering::DoJSToNumberOrNumericTruncatesToFloat64(
void SimplifiedLowering::DoJSToNumberOrNumericTruncatesToWord32(
Node* node, RepresentationSelector* selector) {
DCHECK(node->opcode() == IrOpcode::kJSToNumber ||
+ node->opcode() == IrOpcode::kJSToNumberConvertBigInt ||
node->opcode() == IrOpcode::kJSToNumeric);
Node* value = node->InputAt(0);
Node* context = node->InputAt(1);
@@ -3412,11 +3421,17 @@ void SimplifiedLowering::DoJSToNumberOrNumericTruncatesToWord32(
Node* efalse0 = effect;
Node* vfalse0;
{
- Operator const* op = node->opcode() == IrOpcode::kJSToNumber
- ? ToNumberOperator()
- : ToNumericOperator();
- Node* code = node->opcode() == IrOpcode::kJSToNumber ? ToNumberCode()
- : ToNumericCode();
+ Operator const* op =
+ node->opcode() == IrOpcode::kJSToNumber
+ ? (node->opcode() == IrOpcode::kJSToNumberConvertBigInt
+ ? ToNumberConvertBigIntOperator()
+ : ToNumberOperator())
+ : ToNumericOperator();
+ Node* code = node->opcode() == IrOpcode::kJSToNumber
+ ? ToNumberCode()
+ : (node->opcode() == IrOpcode::kJSToNumberConvertBigInt
+ ? ToNumberConvertBigIntCode()
+ : ToNumericCode());
vfalse0 = efalse0 = if_false0 = graph()->NewNode(
op, code, value, context, frame_state, efalse0, if_false0);
@@ -3922,6 +3937,16 @@ Node* SimplifiedLowering::ToNumberCode() {
return to_number_code_.get();
}
+Node* SimplifiedLowering::ToNumberConvertBigIntCode() {
+ if (!to_number_convert_big_int_code_.is_set()) {
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kToNumberConvertBigInt);
+ to_number_convert_big_int_code_.set(
+ jsgraph()->HeapConstant(callable.code()));
+ }
+ return to_number_convert_big_int_code_.get();
+}
+
Node* SimplifiedLowering::ToNumericCode() {
if (!to_numeric_code_.is_set()) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumeric);
@@ -3942,6 +3967,19 @@ Operator const* SimplifiedLowering::ToNumberOperator() {
return to_number_operator_.get();
}
+Operator const* SimplifiedLowering::ToNumberConvertBigIntOperator() {
+ if (!to_number_convert_big_int_operator_.is_set()) {
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kToNumberConvertBigInt);
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+ auto call_descriptor =
+ Linkage::GetStubCallDescriptor(graph()->zone(), callable.descriptor(),
+ 0, flags, Operator::kNoProperties);
+ to_number_convert_big_int_operator_.set(common()->Call(call_descriptor));
+ }
+ return to_number_convert_big_int_operator_.get();
+}
+
Operator const* SimplifiedLowering::ToNumericOperator() {
if (!to_numeric_operator_.is_set()) {
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumeric);
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 86ac8c75ab..7b21b07813 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -23,8 +23,8 @@ class TypeCache;
class V8_EXPORT_PRIVATE SimplifiedLowering final {
public:
- SimplifiedLowering(JSGraph* jsgraph, const JSHeapBroker* js_heap_broker,
- Zone* zone, SourcePositionTable* source_position,
+ SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* js_heap_broker, Zone* zone,
+ SourcePositionTable* source_position,
NodeOriginTable* node_origins,
PoisoningMitigationLevel poisoning_level);
~SimplifiedLowering() {}
@@ -48,12 +48,14 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
private:
JSGraph* const jsgraph_;
- const JSHeapBroker* js_heap_broker_;
+ JSHeapBroker* js_heap_broker_;
Zone* const zone_;
TypeCache const& type_cache_;
SetOncePointer<Node> to_number_code_;
+ SetOncePointer<Node> to_number_convert_big_int_code_;
SetOncePointer<Node> to_numeric_code_;
SetOncePointer<Operator const> to_number_operator_;
+ SetOncePointer<Operator const> to_number_convert_big_int_operator_;
SetOncePointer<Operator const> to_numeric_operator_;
// TODO(danno): SimplifiedLowering shouldn't know anything about the source
@@ -76,8 +78,10 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
Node* Uint32Mod(Node* const node);
Node* ToNumberCode();
+ Node* ToNumberConvertBigIntCode();
Node* ToNumericCode();
Operator const* ToNumberOperator();
+ Operator const* ToNumberConvertBigIntOperator();
Operator const* ToNumericOperator();
friend class RepresentationSelector;
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index 34be9cb0e4..ce7d18f34a 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -33,7 +33,7 @@ Decision DecideObjectIsSmi(Node* const input) {
} // namespace
SimplifiedOperatorReducer::SimplifiedOperatorReducer(
- Editor* editor, JSGraph* jsgraph, const JSHeapBroker* js_heap_broker)
+ Editor* editor, JSGraph* jsgraph, JSHeapBroker* js_heap_broker)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
js_heap_broker_(js_heap_broker) {}
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index af827a2788..93104e31b0 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -27,7 +27,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
SimplifiedOperatorReducer(Editor* editor, JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker);
+ JSHeapBroker* js_heap_broker);
~SimplifiedOperatorReducer() final;
const char* reducer_name() const override {
@@ -56,10 +56,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
SimplifiedOperatorBuilder* simplified() const;
JSGraph* jsgraph() const { return jsgraph_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
JSGraph* const jsgraph_;
- const JSHeapBroker* const js_heap_broker_;
+ JSHeapBroker* const js_heap_broker_;
DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
};
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 32aafa33d4..0c331bce5e 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -166,9 +166,31 @@ std::ostream& operator<<(std::ostream& os, CheckFloat64HoleMode mode) {
UNREACHABLE();
}
-CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator* op) {
+CheckFloat64HoleParameters const& CheckFloat64HoleParametersOf(
+ Operator const* op) {
DCHECK_EQ(IrOpcode::kCheckFloat64Hole, op->opcode());
- return OpParameter<CheckFloat64HoleMode>(op);
+ return OpParameter<CheckFloat64HoleParameters>(op);
+}
+
+std::ostream& operator<<(std::ostream& os,
+ CheckFloat64HoleParameters const& params) {
+ os << params.mode();
+ if (params.feedback().IsValid()) os << "; " << params.feedback();
+ return os;
+}
+
+size_t hash_value(const CheckFloat64HoleParameters& params) {
+ return base::hash_combine(params.mode(), params.feedback());
+}
+
+bool operator==(CheckFloat64HoleParameters const& lhs,
+ CheckFloat64HoleParameters const& rhs) {
+ return lhs.mode() == rhs.mode() && lhs.feedback() == rhs.feedback();
+}
+
+bool operator!=(CheckFloat64HoleParameters const& lhs,
+ CheckFloat64HoleParameters const& rhs) {
+ return !(lhs == rhs);
}
CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator* op) {
@@ -1005,12 +1027,13 @@ struct SimplifiedOperatorGlobalCache final {
template <CheckFloat64HoleMode kMode>
struct CheckFloat64HoleNaNOperator final
- : public Operator1<CheckFloat64HoleMode> {
+ : public Operator1<CheckFloat64HoleParameters> {
CheckFloat64HoleNaNOperator()
- : Operator1<CheckFloat64HoleMode>(
+ : Operator1<CheckFloat64HoleParameters>(
IrOpcode::kCheckFloat64Hole,
Operator::kFoldable | Operator::kNoThrow, "CheckFloat64Hole", 1,
- 1, 1, 1, 1, 0, kMode) {}
+ 1, 1, 1, 1, 0,
+ CheckFloat64HoleParameters(kMode, VectorSlotPair())) {}
};
CheckFloat64HoleNaNOperator<CheckFloat64HoleMode::kAllowReturnHole>
kCheckFloat64HoleAllowReturnHoleOperator;
@@ -1289,14 +1312,20 @@ const Operator* SimplifiedOperatorBuilder::ConvertReceiver(
}
const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
- CheckFloat64HoleMode mode) {
- switch (mode) {
- case CheckFloat64HoleMode::kAllowReturnHole:
- return &cache_.kCheckFloat64HoleAllowReturnHoleOperator;
- case CheckFloat64HoleMode::kNeverReturnHole:
- return &cache_.kCheckFloat64HoleNeverReturnHoleOperator;
+ CheckFloat64HoleMode mode, VectorSlotPair const& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case CheckFloat64HoleMode::kAllowReturnHole:
+ return &cache_.kCheckFloat64HoleAllowReturnHoleOperator;
+ case CheckFloat64HoleMode::kNeverReturnHole:
+ return &cache_.kCheckFloat64HoleNeverReturnHoleOperator;
+ }
+ UNREACHABLE();
}
- UNREACHABLE();
+ return new (zone()) Operator1<CheckFloat64HoleParameters>(
+ IrOpcode::kCheckFloat64Hole, Operator::kFoldable | Operator::kNoThrow,
+ "CheckFloat64Hole", 1, 1, 1, 1, 1, 0,
+ CheckFloat64HoleParameters(mode, feedback));
}
const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber(
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 1708b0e06e..df44e899cd 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -14,6 +14,7 @@
#include "src/globals.h"
#include "src/handles.h"
#include "src/machine-type.h"
+#include "src/maybe-handles.h"
#include "src/objects.h"
#include "src/type-hints.h"
#include "src/vector-slot-pair.h"
@@ -23,7 +24,7 @@ namespace v8 {
namespace internal {
// Forward declarations.
-enum class AbortReason;
+enum class AbortReason : uint8_t;
class Zone;
namespace compiler {
@@ -194,9 +195,32 @@ size_t hash_value(CheckFloat64HoleMode);
std::ostream& operator<<(std::ostream&, CheckFloat64HoleMode);
-CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator*)
+class CheckFloat64HoleParameters {
+ public:
+ CheckFloat64HoleParameters(CheckFloat64HoleMode mode,
+ VectorSlotPair const& feedback)
+ : mode_(mode), feedback_(feedback) {}
+
+ CheckFloat64HoleMode mode() const { return mode_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
+
+ private:
+ CheckFloat64HoleMode mode_;
+ VectorSlotPair feedback_;
+};
+
+CheckFloat64HoleParameters const& CheckFloat64HoleParametersOf(Operator const*)
V8_WARN_UNUSED_RESULT;
+std::ostream& operator<<(std::ostream&, CheckFloat64HoleParameters const&);
+
+size_t hash_value(CheckFloat64HoleParameters const&);
+
+bool operator==(CheckFloat64HoleParameters const&,
+ CheckFloat64HoleParameters const&);
+bool operator!=(CheckFloat64HoleParameters const&,
+ CheckFloat64HoleParameters const&);
+
enum class CheckTaggedInputMode : uint8_t {
kNumber,
kNumberOrOddball,
@@ -640,7 +664,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckBounds(const VectorSlotPair& feedback);
const Operator* CheckEqualsInternalizedString();
const Operator* CheckEqualsSymbol();
- const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
+ const Operator* CheckFloat64Hole(CheckFloat64HoleMode, VectorSlotPair const&);
const Operator* CheckHeapObject();
const Operator* CheckIf(DeoptimizeReason deoptimize_reason,
const VectorSlotPair& feedback = VectorSlotPair());
diff --git a/deps/v8/src/compiler/type-narrowing-reducer.cc b/deps/v8/src/compiler/type-narrowing-reducer.cc
index 1b8b5b4657..01afdcb911 100644
--- a/deps/v8/src/compiler/type-narrowing-reducer.cc
+++ b/deps/v8/src/compiler/type-narrowing-reducer.cc
@@ -12,7 +12,7 @@ namespace internal {
namespace compiler {
TypeNarrowingReducer::TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker)
+ JSHeapBroker* js_heap_broker)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
op_typer_(jsgraph->isolate(), js_heap_broker, zone()) {}
diff --git a/deps/v8/src/compiler/type-narrowing-reducer.h b/deps/v8/src/compiler/type-narrowing-reducer.h
index 77cb07e772..62237ccce3 100644
--- a/deps/v8/src/compiler/type-narrowing-reducer.h
+++ b/deps/v8/src/compiler/type-narrowing-reducer.h
@@ -20,7 +20,7 @@ class V8_EXPORT_PRIVATE TypeNarrowingReducer final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker);
+ JSHeapBroker* js_heap_broker);
~TypeNarrowingReducer() final;
const char* reducer_name() const override { return "TypeNarrowingReducer"; }
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index 0c001117de..b77fc97859 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -21,7 +21,7 @@ namespace compiler {
TypedOptimization::TypedOptimization(Editor* editor,
CompilationDependencies* dependencies,
JSGraph* jsgraph,
- const JSHeapBroker* js_heap_broker)
+ JSHeapBroker* js_heap_broker)
: AdvancedReducer(editor),
dependencies_(dependencies),
jsgraph_(jsgraph),
@@ -89,8 +89,8 @@ Reduction TypedOptimization::Reduce(Node* node) {
namespace {
-base::Optional<MapRef> GetStableMapFromObjectType(
- const JSHeapBroker* js_heap_broker, Type object_type) {
+base::Optional<MapRef> GetStableMapFromObjectType(JSHeapBroker* js_heap_broker,
+ Type object_type) {
if (object_type.IsHeapConstant()) {
HeapObjectRef object = object_type.AsHeapConstant()->Ref();
MapRef object_map = object.map();
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index 3c4b6ed9cd..baee65dd4e 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -28,7 +28,7 @@ class V8_EXPORT_PRIVATE TypedOptimization final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
TypedOptimization(Editor* editor, CompilationDependencies* dependencies,
- JSGraph* jsgraph, const JSHeapBroker* js_heap_broker);
+ JSGraph* jsgraph, JSHeapBroker* js_heap_broker);
~TypedOptimization();
const char* reducer_name() const override { return "TypedOptimization"; }
@@ -71,11 +71,11 @@ class V8_EXPORT_PRIVATE TypedOptimization final
CompilationDependencies* dependencies() const { return dependencies_; }
JSGraph* jsgraph() const { return jsgraph_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
CompilationDependencies* const dependencies_;
JSGraph* const jsgraph_;
- const JSHeapBroker* js_heap_broker_;
+ JSHeapBroker* js_heap_broker_;
Type const true_type_;
Type const false_type_;
TypeCache const& type_cache_;
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 575d4aa893..7627d27b08 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -33,7 +33,7 @@ class Typer::Decorator final : public GraphDecorator {
Typer* const typer_;
};
-Typer::Typer(Isolate* isolate, const JSHeapBroker* js_heap_broker, Flags flags,
+Typer::Typer(Isolate* isolate, JSHeapBroker* js_heap_broker, Flags flags,
Graph* graph)
: flags_(flags),
graph_(graph),
@@ -64,7 +64,6 @@ class Typer::Visitor : public Reducer {
const char* reducer_name() const override { return "Typer"; }
Reduction Reduce(Node* node) override {
- DisallowHeapAccess no_heap_access;
if (node->op()->ValueOutputCount() == 0) return NoChange();
switch (node->opcode()) {
#define DECLARE_CASE(x) \
@@ -266,6 +265,7 @@ class Typer::Visitor : public Reducer {
static Type ToLength(Type, Typer*);
static Type ToName(Type, Typer*);
static Type ToNumber(Type, Typer*);
+ static Type ToNumberConvertBigInt(Type, Typer*);
static Type ToNumeric(Type, Typer*);
static Type ToObject(Type, Typer*);
static Type ToString(Type, Typer*);
@@ -530,6 +530,11 @@ Type Typer::Visitor::ToNumber(Type type, Typer* t) {
}
// static
+Type Typer::Visitor::ToNumberConvertBigInt(Type type, Typer* t) {
+ return t->operation_typer_.ToNumberConvertBigInt(type);
+}
+
+// static
Type Typer::Visitor::ToNumeric(Type type, Typer* t) {
return t->operation_typer_.ToNumeric(type);
}
@@ -1125,6 +1130,10 @@ Type Typer::Visitor::TypeJSToNumber(Node* node) {
return TypeUnaryOp(node, ToNumber);
}
+Type Typer::Visitor::TypeJSToNumberConvertBigInt(Node* node) {
+ return TypeUnaryOp(node, ToNumberConvertBigInt);
+}
+
Type Typer::Visitor::TypeJSToNumeric(Node* node) {
return TypeUnaryOp(node, ToNumeric);
}
@@ -1214,6 +1223,10 @@ Type Typer::Visitor::TypeJSCreateEmptyLiteralObject(Node* node) {
return Type::OtherObject();
}
+Type Typer::Visitor::TypeJSCloneObject(Node* node) {
+ return Type::OtherObject();
+}
+
Type Typer::Visitor::TypeJSCreateLiteralRegExp(Node* node) {
return Type::OtherObject();
}
@@ -1406,270 +1419,270 @@ Type Typer::Visitor::JSCallTyper(Type fun, Typer* t) {
return Type::NonInternal();
}
JSFunctionRef function = fun.AsHeapConstant()->Ref().AsJSFunction();
- if (!function.HasBuiltinFunctionId()) {
+ if (!function.shared().HasBuiltinFunctionId()) {
return Type::NonInternal();
}
- switch (function.GetBuiltinFunctionId()) {
- case kMathRandom:
+ switch (function.shared().builtin_function_id()) {
+ case BuiltinFunctionId::kMathRandom:
return Type::PlainNumber();
- case kMathFloor:
- case kMathCeil:
- case kMathRound:
- case kMathTrunc:
+ case BuiltinFunctionId::kMathFloor:
+ case BuiltinFunctionId::kMathCeil:
+ case BuiltinFunctionId::kMathRound:
+ case BuiltinFunctionId::kMathTrunc:
return t->cache_.kIntegerOrMinusZeroOrNaN;
// Unary math functions.
- case kMathAbs:
- case kMathExp:
- case kMathExpm1:
+ case BuiltinFunctionId::kMathAbs:
+ case BuiltinFunctionId::kMathExp:
+ case BuiltinFunctionId::kMathExpm1:
return Type::Union(Type::PlainNumber(), Type::NaN(), t->zone());
- case kMathAcos:
- case kMathAcosh:
- case kMathAsin:
- case kMathAsinh:
- case kMathAtan:
- case kMathAtanh:
- case kMathCbrt:
- case kMathCos:
- case kMathFround:
- case kMathLog:
- case kMathLog1p:
- case kMathLog10:
- case kMathLog2:
- case kMathSin:
- case kMathSqrt:
- case kMathTan:
+ case BuiltinFunctionId::kMathAcos:
+ case BuiltinFunctionId::kMathAcosh:
+ case BuiltinFunctionId::kMathAsin:
+ case BuiltinFunctionId::kMathAsinh:
+ case BuiltinFunctionId::kMathAtan:
+ case BuiltinFunctionId::kMathAtanh:
+ case BuiltinFunctionId::kMathCbrt:
+ case BuiltinFunctionId::kMathCos:
+ case BuiltinFunctionId::kMathFround:
+ case BuiltinFunctionId::kMathLog:
+ case BuiltinFunctionId::kMathLog1p:
+ case BuiltinFunctionId::kMathLog10:
+ case BuiltinFunctionId::kMathLog2:
+ case BuiltinFunctionId::kMathSin:
+ case BuiltinFunctionId::kMathSqrt:
+ case BuiltinFunctionId::kMathTan:
return Type::Number();
- case kMathSign:
+ case BuiltinFunctionId::kMathSign:
return t->cache_.kMinusOneToOneOrMinusZeroOrNaN;
// Binary math functions.
- case kMathAtan2:
- case kMathPow:
- case kMathMax:
- case kMathMin:
+ case BuiltinFunctionId::kMathAtan2:
+ case BuiltinFunctionId::kMathPow:
+ case BuiltinFunctionId::kMathMax:
+ case BuiltinFunctionId::kMathMin:
return Type::Number();
- case kMathImul:
+ case BuiltinFunctionId::kMathImul:
return Type::Signed32();
- case kMathClz32:
+ case BuiltinFunctionId::kMathClz32:
return t->cache_.kZeroToThirtyTwo;
// Date functions.
- case kDateNow:
+ case BuiltinFunctionId::kDateNow:
return t->cache_.kTimeValueType;
- case kDateGetDate:
+ case BuiltinFunctionId::kDateGetDate:
return t->cache_.kJSDateDayType;
- case kDateGetDay:
+ case BuiltinFunctionId::kDateGetDay:
return t->cache_.kJSDateWeekdayType;
- case kDateGetFullYear:
+ case BuiltinFunctionId::kDateGetFullYear:
return t->cache_.kJSDateYearType;
- case kDateGetHours:
+ case BuiltinFunctionId::kDateGetHours:
return t->cache_.kJSDateHourType;
- case kDateGetMilliseconds:
+ case BuiltinFunctionId::kDateGetMilliseconds:
return Type::Union(Type::Range(0.0, 999.0, t->zone()), Type::NaN(),
t->zone());
- case kDateGetMinutes:
+ case BuiltinFunctionId::kDateGetMinutes:
return t->cache_.kJSDateMinuteType;
- case kDateGetMonth:
+ case BuiltinFunctionId::kDateGetMonth:
return t->cache_.kJSDateMonthType;
- case kDateGetSeconds:
+ case BuiltinFunctionId::kDateGetSeconds:
return t->cache_.kJSDateSecondType;
- case kDateGetTime:
+ case BuiltinFunctionId::kDateGetTime:
return t->cache_.kJSDateValueType;
// Symbol functions.
- case kSymbolConstructor:
+ case BuiltinFunctionId::kSymbolConstructor:
return Type::Symbol();
// BigInt functions.
- case kBigIntConstructor:
+ case BuiltinFunctionId::kBigIntConstructor:
return Type::BigInt();
// Number functions.
- case kNumberConstructor:
+ case BuiltinFunctionId::kNumberConstructor:
return Type::Number();
- case kNumberIsFinite:
- case kNumberIsInteger:
- case kNumberIsNaN:
- case kNumberIsSafeInteger:
+ case BuiltinFunctionId::kNumberIsFinite:
+ case BuiltinFunctionId::kNumberIsInteger:
+ case BuiltinFunctionId::kNumberIsNaN:
+ case BuiltinFunctionId::kNumberIsSafeInteger:
return Type::Boolean();
- case kNumberParseFloat:
+ case BuiltinFunctionId::kNumberParseFloat:
return Type::Number();
- case kNumberParseInt:
+ case BuiltinFunctionId::kNumberParseInt:
return t->cache_.kIntegerOrMinusZeroOrNaN;
- case kNumberToString:
+ case BuiltinFunctionId::kNumberToString:
return Type::String();
// String functions.
- case kStringConstructor:
+ case BuiltinFunctionId::kStringConstructor:
return Type::String();
- case kStringCharCodeAt:
+ case BuiltinFunctionId::kStringCharCodeAt:
return Type::Union(Type::Range(0, kMaxUInt16, t->zone()), Type::NaN(),
t->zone());
- case kStringCharAt:
+ case BuiltinFunctionId::kStringCharAt:
return Type::String();
- case kStringCodePointAt:
+ case BuiltinFunctionId::kStringCodePointAt:
return Type::Union(Type::Range(0.0, String::kMaxCodePoint, t->zone()),
Type::Undefined(), t->zone());
- case kStringConcat:
- case kStringFromCharCode:
- case kStringFromCodePoint:
+ case BuiltinFunctionId::kStringConcat:
+ case BuiltinFunctionId::kStringFromCharCode:
+ case BuiltinFunctionId::kStringFromCodePoint:
return Type::String();
- case kStringIndexOf:
- case kStringLastIndexOf:
+ case BuiltinFunctionId::kStringIndexOf:
+ case BuiltinFunctionId::kStringLastIndexOf:
return Type::Range(-1.0, String::kMaxLength, t->zone());
- case kStringEndsWith:
- case kStringIncludes:
+ case BuiltinFunctionId::kStringEndsWith:
+ case BuiltinFunctionId::kStringIncludes:
return Type::Boolean();
- case kStringRaw:
- case kStringRepeat:
- case kStringSlice:
+ case BuiltinFunctionId::kStringRaw:
+ case BuiltinFunctionId::kStringRepeat:
+ case BuiltinFunctionId::kStringSlice:
return Type::String();
- case kStringStartsWith:
+ case BuiltinFunctionId::kStringStartsWith:
return Type::Boolean();
- case kStringSubstr:
- case kStringSubstring:
- case kStringToLowerCase:
- case kStringToString:
- case kStringToUpperCase:
- case kStringTrim:
- case kStringTrimEnd:
- case kStringTrimStart:
- case kStringValueOf:
+ case BuiltinFunctionId::kStringSubstr:
+ case BuiltinFunctionId::kStringSubstring:
+ case BuiltinFunctionId::kStringToLowerCase:
+ case BuiltinFunctionId::kStringToString:
+ case BuiltinFunctionId::kStringToUpperCase:
+ case BuiltinFunctionId::kStringTrim:
+ case BuiltinFunctionId::kStringTrimEnd:
+ case BuiltinFunctionId::kStringTrimStart:
+ case BuiltinFunctionId::kStringValueOf:
return Type::String();
- case kStringIterator:
- case kStringIteratorNext:
+ case BuiltinFunctionId::kStringIterator:
+ case BuiltinFunctionId::kStringIteratorNext:
return Type::OtherObject();
- case kArrayEntries:
- case kArrayKeys:
- case kArrayValues:
- case kTypedArrayEntries:
- case kTypedArrayKeys:
- case kTypedArrayValues:
- case kArrayIteratorNext:
- case kMapIteratorNext:
- case kSetIteratorNext:
+ case BuiltinFunctionId::kArrayEntries:
+ case BuiltinFunctionId::kArrayKeys:
+ case BuiltinFunctionId::kArrayValues:
+ case BuiltinFunctionId::kTypedArrayEntries:
+ case BuiltinFunctionId::kTypedArrayKeys:
+ case BuiltinFunctionId::kTypedArrayValues:
+ case BuiltinFunctionId::kArrayIteratorNext:
+ case BuiltinFunctionId::kMapIteratorNext:
+ case BuiltinFunctionId::kSetIteratorNext:
return Type::OtherObject();
- case kTypedArrayToStringTag:
+ case BuiltinFunctionId::kTypedArrayToStringTag:
return Type::Union(Type::InternalizedString(), Type::Undefined(),
t->zone());
// Array functions.
- case kArrayIsArray:
+ case BuiltinFunctionId::kArrayIsArray:
return Type::Boolean();
- case kArrayConcat:
+ case BuiltinFunctionId::kArrayConcat:
return Type::Receiver();
- case kArrayEvery:
+ case BuiltinFunctionId::kArrayEvery:
return Type::Boolean();
- case kArrayFill:
- case kArrayFilter:
+ case BuiltinFunctionId::kArrayFill:
+ case BuiltinFunctionId::kArrayFilter:
return Type::Receiver();
- case kArrayFindIndex:
+ case BuiltinFunctionId::kArrayFindIndex:
return Type::Range(-1, kMaxSafeInteger, t->zone());
- case kArrayForEach:
+ case BuiltinFunctionId::kArrayForEach:
return Type::Undefined();
- case kArrayIncludes:
+ case BuiltinFunctionId::kArrayIncludes:
return Type::Boolean();
- case kArrayIndexOf:
+ case BuiltinFunctionId::kArrayIndexOf:
return Type::Range(-1, kMaxSafeInteger, t->zone());
- case kArrayJoin:
+ case BuiltinFunctionId::kArrayJoin:
return Type::String();
- case kArrayLastIndexOf:
+ case BuiltinFunctionId::kArrayLastIndexOf:
return Type::Range(-1, kMaxSafeInteger, t->zone());
- case kArrayMap:
+ case BuiltinFunctionId::kArrayMap:
return Type::Receiver();
- case kArrayPush:
+ case BuiltinFunctionId::kArrayPush:
return t->cache_.kPositiveSafeInteger;
- case kArrayReverse:
- case kArraySlice:
+ case BuiltinFunctionId::kArrayReverse:
+ case BuiltinFunctionId::kArraySlice:
return Type::Receiver();
- case kArraySome:
+ case BuiltinFunctionId::kArraySome:
return Type::Boolean();
- case kArraySplice:
+ case BuiltinFunctionId::kArraySplice:
return Type::Receiver();
- case kArrayUnshift:
+ case BuiltinFunctionId::kArrayUnshift:
return t->cache_.kPositiveSafeInteger;
// ArrayBuffer functions.
- case kArrayBufferIsView:
+ case BuiltinFunctionId::kArrayBufferIsView:
return Type::Boolean();
// Object functions.
- case kObjectAssign:
+ case BuiltinFunctionId::kObjectAssign:
return Type::Receiver();
- case kObjectCreate:
+ case BuiltinFunctionId::kObjectCreate:
return Type::OtherObject();
- case kObjectIs:
- case kObjectHasOwnProperty:
- case kObjectIsPrototypeOf:
+ case BuiltinFunctionId::kObjectIs:
+ case BuiltinFunctionId::kObjectHasOwnProperty:
+ case BuiltinFunctionId::kObjectIsPrototypeOf:
return Type::Boolean();
- case kObjectToString:
+ case BuiltinFunctionId::kObjectToString:
return Type::String();
// RegExp functions.
- case kRegExpCompile:
+ case BuiltinFunctionId::kRegExpCompile:
return Type::OtherObject();
- case kRegExpExec:
+ case BuiltinFunctionId::kRegExpExec:
return Type::Union(Type::Array(), Type::Null(), t->zone());
- case kRegExpTest:
+ case BuiltinFunctionId::kRegExpTest:
return Type::Boolean();
- case kRegExpToString:
+ case BuiltinFunctionId::kRegExpToString:
return Type::String();
// Function functions.
- case kFunctionBind:
+ case BuiltinFunctionId::kFunctionBind:
return Type::BoundFunction();
- case kFunctionHasInstance:
+ case BuiltinFunctionId::kFunctionHasInstance:
return Type::Boolean();
// Global functions.
- case kGlobalDecodeURI:
- case kGlobalDecodeURIComponent:
- case kGlobalEncodeURI:
- case kGlobalEncodeURIComponent:
- case kGlobalEscape:
- case kGlobalUnescape:
+ case BuiltinFunctionId::kGlobalDecodeURI:
+ case BuiltinFunctionId::kGlobalDecodeURIComponent:
+ case BuiltinFunctionId::kGlobalEncodeURI:
+ case BuiltinFunctionId::kGlobalEncodeURIComponent:
+ case BuiltinFunctionId::kGlobalEscape:
+ case BuiltinFunctionId::kGlobalUnescape:
return Type::String();
- case kGlobalIsFinite:
- case kGlobalIsNaN:
+ case BuiltinFunctionId::kGlobalIsFinite:
+ case BuiltinFunctionId::kGlobalIsNaN:
return Type::Boolean();
// Map functions.
- case kMapClear:
- case kMapForEach:
+ case BuiltinFunctionId::kMapClear:
+ case BuiltinFunctionId::kMapForEach:
return Type::Undefined();
- case kMapDelete:
- case kMapHas:
+ case BuiltinFunctionId::kMapDelete:
+ case BuiltinFunctionId::kMapHas:
return Type::Boolean();
- case kMapEntries:
- case kMapKeys:
- case kMapSet:
- case kMapValues:
+ case BuiltinFunctionId::kMapEntries:
+ case BuiltinFunctionId::kMapKeys:
+ case BuiltinFunctionId::kMapSet:
+ case BuiltinFunctionId::kMapValues:
return Type::OtherObject();
// Set functions.
- case kSetAdd:
- case kSetEntries:
- case kSetValues:
+ case BuiltinFunctionId::kSetAdd:
+ case BuiltinFunctionId::kSetEntries:
+ case BuiltinFunctionId::kSetValues:
return Type::OtherObject();
- case kSetClear:
- case kSetForEach:
+ case BuiltinFunctionId::kSetClear:
+ case BuiltinFunctionId::kSetForEach:
return Type::Undefined();
- case kSetDelete:
- case kSetHas:
+ case BuiltinFunctionId::kSetDelete:
+ case BuiltinFunctionId::kSetHas:
return Type::Boolean();
// WeakMap functions.
- case kWeakMapDelete:
- case kWeakMapHas:
+ case BuiltinFunctionId::kWeakMapDelete:
+ case BuiltinFunctionId::kWeakMapHas:
return Type::Boolean();
- case kWeakMapSet:
+ case BuiltinFunctionId::kWeakMapSet:
return Type::OtherObject();
// WeakSet functions.
- case kWeakSetAdd:
+ case BuiltinFunctionId::kWeakSetAdd:
return Type::OtherObject();
- case kWeakSetDelete:
- case kWeakSetHas:
+ case BuiltinFunctionId::kWeakSetDelete:
+ case BuiltinFunctionId::kWeakSetHas:
return Type::Boolean();
default:
return Type::NonInternal();
@@ -2034,8 +2047,8 @@ Type Typer::Visitor::TypeLoadElement(Node* node) {
Type Typer::Visitor::TypeLoadTypedElement(Node* node) {
switch (ExternalArrayTypeOf(node->op())) {
-#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype, size) \
- case kExternal##ElemType##Array: \
+#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype) \
+ case kExternal##ElemType##Array: \
return typer_->cache_.k##ElemType;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -2045,8 +2058,8 @@ Type Typer::Visitor::TypeLoadTypedElement(Node* node) {
Type Typer::Visitor::TypeLoadDataViewElement(Node* node) {
switch (ExternalArrayTypeOf(node->op())) {
-#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype, size) \
- case kExternal##ElemType##Array: \
+#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype) \
+ case kExternal##ElemType##Array: \
return typer_->cache_.k##ElemType;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index 1720bc776f..741ca481c2 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -25,7 +25,7 @@ class V8_EXPORT_PRIVATE Typer {
};
typedef base::Flags<Flag> Flags;
- Typer(Isolate* isolate, const JSHeapBroker* js_heap_broker, Flags flags,
+ Typer(Isolate* isolate, JSHeapBroker* js_heap_broker, Flags flags,
Graph* graph);
~Typer();
@@ -42,13 +42,13 @@ class V8_EXPORT_PRIVATE Typer {
Graph* graph() const { return graph_; }
Zone* zone() const { return graph()->zone(); }
OperationTyper* operation_typer() { return &operation_typer_; }
- const JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() const { return js_heap_broker_; }
Flags const flags_;
Graph* const graph_;
Decorator* decorator_;
TypeCache const& cache_;
- const JSHeapBroker* js_heap_broker_;
+ JSHeapBroker* js_heap_broker_;
OperationTyper operation_typer_;
Type singleton_false_;
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 8a5871fdb0..968d788fcc 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -207,7 +207,10 @@ Type::bitset BitsetType::Lub(HeapObjectType const& type) {
case JS_MESSAGE_OBJECT_TYPE:
case JS_DATE_TYPE:
#ifdef V8_INTL_SUPPORT
+ case JS_INTL_COLLATOR_TYPE:
+ case JS_INTL_LIST_FORMAT_TYPE:
case JS_INTL_LOCALE_TYPE:
+ case JS_INTL_PLURAL_RULES_TYPE:
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
#endif // V8_INTL_SUPPORT
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -273,6 +276,7 @@ Type::bitset BitsetType::Lub(HeapObjectType const& type) {
case BYTE_ARRAY_TYPE:
case BYTECODE_ARRAY_TYPE:
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
+ case ARRAY_BOILERPLATE_DESCRIPTION_TYPE:
case DESCRIPTOR_ARRAY_TYPE:
case TRANSITION_ARRAY_TYPE:
case FEEDBACK_CELL_TYPE:
@@ -305,7 +309,7 @@ Type::bitset BitsetType::Lub(HeapObjectType const& type) {
// require bit set types, they should get kOtherInternal.
case MUTABLE_HEAP_NUMBER_TYPE:
case FREE_SPACE_TYPE:
-#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
case FIXED_##TYPE##_ARRAY_TYPE:
TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE)
@@ -321,14 +325,12 @@ Type::bitset BitsetType::Lub(HeapObjectType const& type) {
case PROMISE_REACTION_TYPE:
case DEBUG_INFO_TYPE:
case STACK_FRAME_INFO_TYPE:
- case WEAK_CELL_TYPE:
case SMALL_ORDERED_HASH_MAP_TYPE:
case SMALL_ORDERED_HASH_SET_TYPE:
case PROTOTYPE_INFO_TYPE:
case INTERPRETER_DATA_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE:
- case ARRAY_BOILERPLATE_DESCRIPTION_TYPE:
case WASM_DEBUG_INFO_TYPE:
case WASM_EXPORTED_FUNCTION_DATA_TYPE:
case LOAD_HANDLER_TYPE:
@@ -820,22 +822,19 @@ Type Type::NewConstant(double value, Zone* zone) {
return OtherNumberConstant(value, zone);
}
-Type Type::NewConstant(const JSHeapBroker* js_heap_broker,
- Handle<i::Object> value, Zone* zone) {
- auto maybe_smi = JSHeapBroker::TryGetSmi(value);
- if (maybe_smi.has_value()) {
- return NewConstant(static_cast<double>(maybe_smi.value()), zone);
+Type Type::NewConstant(JSHeapBroker* js_heap_broker, Handle<i::Object> value,
+ Zone* zone) {
+ ObjectRef ref(js_heap_broker, value);
+ if (ref.IsSmi()) {
+ return NewConstant(static_cast<double>(ref.AsSmi()), zone);
}
-
- HeapObjectRef heap_ref(js_heap_broker, value);
- if (heap_ref.IsHeapNumber()) {
- return NewConstant(heap_ref.AsHeapNumber().value(), zone);
+ if (ref.IsHeapNumber()) {
+ return NewConstant(ref.AsHeapNumber().value(), zone);
}
-
- if (heap_ref.IsString() && !heap_ref.IsInternalizedString()) {
+ if (ref.IsString() && !ref.IsInternalizedString()) {
return Type::String();
}
- return HeapConstant(js_heap_broker, value, zone);
+ return HeapConstant(ref.AsHeapObject(), zone);
}
Type Type::Union(Type type1, Type type2, Zone* zone) {
@@ -1061,13 +1060,18 @@ Type Type::OtherNumberConstant(double value, Zone* zone) {
}
// static
-Type Type::HeapConstant(const JSHeapBroker* js_heap_broker,
- Handle<i::Object> value, Zone* zone) {
+Type Type::HeapConstant(JSHeapBroker* js_heap_broker, Handle<i::Object> value,
+ Zone* zone) {
return FromTypeBase(
HeapConstantType::New(HeapObjectRef(js_heap_broker, value), zone));
}
// static
+Type Type::HeapConstant(const HeapObjectRef& value, Zone* zone) {
+ return HeapConstantType::New(value, zone);
+}
+
+// static
Type Type::Range(double min, double max, Zone* zone) {
return FromTypeBase(RangeType::New(min, max, zone));
}
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index fbda845ee2..d27f6e3e75 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -361,22 +361,23 @@ class V8_EXPORT_PRIVATE Type {
static Type UnsignedSmall() { return NewBitset(BitsetType::UnsignedSmall()); }
static Type OtherNumberConstant(double value, Zone* zone);
- static Type HeapConstant(const JSHeapBroker* js_heap_broker,
+ static Type HeapConstant(JSHeapBroker* js_heap_broker,
Handle<i::Object> value, Zone* zone);
+ static Type HeapConstant(const HeapObjectRef& value, Zone* zone);
static Type Range(double min, double max, Zone* zone);
static Type Range(RangeType::Limits lims, Zone* zone);
static Type Tuple(Type first, Type second, Type third, Zone* zone);
static Type Union(int length, Zone* zone);
// NewConstant is a factory that returns Constant, Range or Number.
- static Type NewConstant(const JSHeapBroker* js_heap_broker,
- Handle<i::Object> value, Zone* zone);
+ static Type NewConstant(JSHeapBroker* js_heap_broker, Handle<i::Object> value,
+ Zone* zone);
static Type NewConstant(double value, Zone* zone);
static Type Union(Type type1, Type type2, Zone* zone);
static Type Intersect(Type type1, Type type2, Zone* zone);
- static Type For(const JSHeapBroker* js_heap_broker, Handle<i::Map> map) {
+ static Type For(JSHeapBroker* js_heap_broker, Handle<i::Map> map) {
HeapObjectType type = js_heap_broker->HeapObjectTypeFromMap(map);
return NewBitset(BitsetType::ExpandInternals(BitsetType::Lub(type)));
}
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 52cbd6e0b7..55eaf07711 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -629,6 +629,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::Name());
break;
case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToNumberConvertBigInt:
// Type is Number.
CheckTypeIs(node, Type::Number());
break;
@@ -716,6 +717,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
case IrOpcode::kJSCreateLiteralObject:
case IrOpcode::kJSCreateEmptyLiteralObject:
+ case IrOpcode::kJSCloneObject:
case IrOpcode::kJSCreateLiteralRegExp:
// Type is OtherObject.
CheckTypeIs(node, Type::OtherObject());
@@ -1736,6 +1738,22 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord64AtomicXor:
case IrOpcode::kWord64AtomicExchange:
case IrOpcode::kWord64AtomicCompareExchange:
+ case IrOpcode::kWord32AtomicPairLoad:
+ case IrOpcode::kWord32AtomicPairStore:
+ case IrOpcode::kWord32AtomicPairAdd:
+ case IrOpcode::kWord32AtomicPairSub:
+ case IrOpcode::kWord32AtomicPairAnd:
+ case IrOpcode::kWord32AtomicPairOr:
+ case IrOpcode::kWord32AtomicPairXor:
+ case IrOpcode::kWord32AtomicPairExchange:
+ case IrOpcode::kWord32AtomicPairCompareExchange:
+ case IrOpcode::kWord64AtomicNarrowAdd:
+ case IrOpcode::kWord64AtomicNarrowSub:
+ case IrOpcode::kWord64AtomicNarrowAnd:
+ case IrOpcode::kWord64AtomicNarrowOr:
+ case IrOpcode::kWord64AtomicNarrowXor:
+ case IrOpcode::kWord64AtomicNarrowExchange:
+ case IrOpcode::kWord64AtomicNarrowCompareExchange:
case IrOpcode::kSpeculationFence:
case IrOpcode::kSignExtendWord8ToInt32:
case IrOpcode::kSignExtendWord16ToInt32:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 1b8f4e9066..f544c2eb10 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -70,16 +70,16 @@ namespace compiler {
(WasmInstanceObject::k##name##Offset - kHeapObjectTag)
#define LOAD_INSTANCE_FIELD(name, type) \
- graph()->NewNode( \
+ SetEffect(graph()->NewNode( \
mcgraph()->machine()->Load(type), instance_node_.get(), \
- mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(name)), *effect_, \
- *control_)
+ mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(name)), Effect(), \
+ Control()))
-#define LOAD_FIXED_ARRAY_SLOT(array_node, index) \
- graph()->NewNode(mcgraph()->machine()->Load(MachineType::TaggedPointer()), \
- array_node, \
- mcgraph()->Int32Constant(FixedArrayOffsetMinusTag(index)), \
- *effect_, *control_);
+#define LOAD_FIXED_ARRAY_SLOT(array_node, index) \
+ SetEffect(graph()->NewNode( \
+ mcgraph()->machine()->Load(MachineType::TaggedPointer()), array_node, \
+ mcgraph()->Int32Constant(FixedArrayOffsetMinusTag(index)), Effect(), \
+ Control()))
int FixedArrayOffsetMinusTag(uint32_t index) {
auto access = AccessBuilder::ForFixedArraySlot(index);
@@ -214,9 +214,7 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
}
Node* WasmGraphBuilder::RefNull() {
- Node* null = LOAD_INSTANCE_FIELD(NullValue, MachineType::TaggedPointer());
- *effect_ = null;
- return null;
+ return LOAD_INSTANCE_FIELD(NullValue, MachineType::TaggedPointer());
}
Node* WasmGraphBuilder::NoContextConstant() {
@@ -259,7 +257,7 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
*effect, *control);
Node* limit = graph()->NewNode(
mcgraph()->machine()->Load(MachineType::Pointer()), limit_address,
- mcgraph()->IntPtrConstant(0), *effect, *control);
+ mcgraph()->IntPtrConstant(0), limit_address, *control);
*effect = limit;
Node* pointer = graph()->NewNode(mcgraph()->machine()->LoadStackPointer());
@@ -893,19 +891,19 @@ Node* Branch(MachineGraph* mcgraph, Node* cond, Node** true_node,
Node* WasmGraphBuilder::BranchNoHint(Node* cond, Node** true_node,
Node** false_node) {
- return Branch(mcgraph(), cond, true_node, false_node, *control_,
+ return Branch(mcgraph(), cond, true_node, false_node, Control(),
BranchHint::kNone);
}
Node* WasmGraphBuilder::BranchExpectTrue(Node* cond, Node** true_node,
Node** false_node) {
- return Branch(mcgraph(), cond, true_node, false_node, *control_,
+ return Branch(mcgraph(), cond, true_node, false_node, Control(),
BranchHint::kTrue);
}
Node* WasmGraphBuilder::BranchExpectFalse(Node* cond, Node** true_node,
Node** false_node) {
- return Branch(mcgraph(), cond, true_node, false_node, *control_,
+ return Branch(mcgraph(), cond, true_node, false_node, Control(),
BranchHint::kFalse);
}
@@ -936,9 +934,8 @@ TrapId WasmGraphBuilder::GetTrapIdForTrap(wasm::TrapReason reason) {
Node* WasmGraphBuilder::TrapIfTrue(wasm::TrapReason reason, Node* cond,
wasm::WasmCodePosition position) {
TrapId trap_id = GetTrapIdForTrap(reason);
- Node* node = graph()->NewNode(mcgraph()->common()->TrapIf(trap_id), cond,
- Effect(), Control());
- *control_ = node;
+ Node* node = SetControl(graph()->NewNode(mcgraph()->common()->TrapIf(trap_id),
+ cond, Effect(), Control()));
SetSourcePosition(node, position);
return node;
}
@@ -946,9 +943,8 @@ Node* WasmGraphBuilder::TrapIfTrue(wasm::TrapReason reason, Node* cond,
Node* WasmGraphBuilder::TrapIfFalse(wasm::TrapReason reason, Node* cond,
wasm::WasmCodePosition position) {
TrapId trap_id = GetTrapIdForTrap(reason);
- Node* node = graph()->NewNode(mcgraph()->common()->TrapUnless(trap_id), cond,
- Effect(), Control());
- *control_ = node;
+ Node* node = SetControl(graph()->NewNode(
+ mcgraph()->common()->TrapUnless(trap_id), cond, Effect(), Control()));
SetSourcePosition(node, position);
return node;
}
@@ -994,7 +990,7 @@ Node* WasmGraphBuilder::ZeroCheck64(wasm::TrapReason reason, Node* node,
}
Node* WasmGraphBuilder::Switch(unsigned count, Node* key) {
- return graph()->NewNode(mcgraph()->common()->Switch(count), key, *control_);
+ return graph()->NewNode(mcgraph()->common()->Switch(count), key, Control());
}
Node* WasmGraphBuilder::IfValue(int32_t value, Node* sw) {
@@ -1008,9 +1004,6 @@ Node* WasmGraphBuilder::IfDefault(Node* sw) {
}
Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
- DCHECK_NOT_NULL(*control_);
- DCHECK_NOT_NULL(*effect_);
-
static const int kStackAllocatedNodeBufferSize = 8;
Node* stack_buffer[kStackAllocatedNodeBufferSize];
std::vector<Node*> heap_buffer;
@@ -1023,8 +1016,8 @@ Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
buf[0] = mcgraph()->Int32Constant(0);
memcpy(buf + 1, vals, sizeof(void*) * count);
- buf[count + 1] = *effect_;
- buf[count + 2] = *control_;
+ buf[count + 1] = Effect();
+ buf[count + 2] = Control();
Node* ret =
graph()->NewNode(mcgraph()->common()->Return(count), count + 3, buf);
@@ -1077,9 +1070,9 @@ static bool ReverseBytesSupported(MachineOperatorBuilder* m,
switch (size_in_bytes) {
case 4:
case 16:
- return m->Word32ReverseBytes().IsSupported();
+ return true;
case 8:
- return m->Word64ReverseBytes().IsSupported();
+ return m->Is64();
default:
break;
}
@@ -1144,16 +1137,16 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
if (ReverseBytesSupported(m, valueSizeInBytes)) {
switch (valueSizeInBytes) {
case 4:
- result = graph()->NewNode(m->Word32ReverseBytes().op(), value);
+ result = graph()->NewNode(m->Word32ReverseBytes(), value);
break;
case 8:
- result = graph()->NewNode(m->Word64ReverseBytes().op(), value);
+ result = graph()->NewNode(m->Word64ReverseBytes(), value);
break;
case 16: {
Node* byte_reversed_lanes[4];
for (int lane = 0; lane < 4; lane++) {
byte_reversed_lanes[lane] = graph()->NewNode(
- m->Word32ReverseBytes().op(),
+ m->Word32ReverseBytes(),
graph()->NewNode(mcgraph()->machine()->I32x4ExtractLane(lane),
value));
}
@@ -1279,21 +1272,21 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
switch (valueSizeInBytes) {
case 2:
result =
- graph()->NewNode(m->Word32ReverseBytes().op(),
+ graph()->NewNode(m->Word32ReverseBytes(),
graph()->NewNode(m->Word32Shl(), value,
mcgraph()->Int32Constant(16)));
break;
case 4:
- result = graph()->NewNode(m->Word32ReverseBytes().op(), value);
+ result = graph()->NewNode(m->Word32ReverseBytes(), value);
break;
case 8:
- result = graph()->NewNode(m->Word64ReverseBytes().op(), value);
+ result = graph()->NewNode(m->Word64ReverseBytes(), value);
break;
case 16: {
Node* byte_reversed_lanes[4];
for (int lane = 0; lane < 4; lane++) {
byte_reversed_lanes[lane] = graph()->NewNode(
- m->Word32ReverseBytes().op(),
+ m->Word32ReverseBytes(),
graph()->NewNode(mcgraph()->machine()->I32x4ExtractLane(lane),
value));
}
@@ -1749,9 +1742,9 @@ Node* WasmGraphBuilder::BuildBitCountingCall(Node* input, ExternalReference ref,
const Operator* store_op = mcgraph()->machine()->Store(
StoreRepresentation(input_type, kNoWriteBarrier));
- *effect_ =
- graph()->NewNode(store_op, stack_slot_param, mcgraph()->Int32Constant(0),
- input, *effect_, *control_);
+ SetEffect(graph()->NewNode(store_op, stack_slot_param,
+ mcgraph()->Int32Constant(0), input, Effect(),
+ Control()));
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
@@ -1874,26 +1867,24 @@ Node* WasmGraphBuilder::BuildCFuncInstruction(ExternalReference ref,
const Operator* store_op = mcgraph()->machine()->Store(
StoreRepresentation(type.representation(), kNoWriteBarrier));
- *effect_ = graph()->NewNode(store_op, stack_slot, mcgraph()->Int32Constant(0),
- input0, *effect_, *control_);
+ SetEffect(graph()->NewNode(store_op, stack_slot, mcgraph()->Int32Constant(0),
+ input0, Effect(), Control()));
Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
if (input1 != nullptr) {
- *effect_ = graph()->NewNode(store_op, stack_slot,
- mcgraph()->Int32Constant(type_size), input1,
- *effect_, *control_);
+ SetEffect(graph()->NewNode(store_op, stack_slot,
+ mcgraph()->Int32Constant(type_size), input1,
+ Effect(), Control()));
}
MachineType sig_types[] = {MachineType::Pointer()};
MachineSignature sig(0, 1, sig_types);
BuildCCall(&sig, function, stack_slot);
- const Operator* load_op = mcgraph()->machine()->Load(type);
- Node* load = graph()->NewNode(
- load_op, stack_slot, mcgraph()->Int32Constant(0), *effect_, *control_);
- *effect_ = load;
- return load;
+ return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(type),
+ stack_slot, mcgraph()->Int32Constant(0),
+ Effect(), Control()));
}
Node* WasmGraphBuilder::BuildF32SConvertI64(Node* input) {
@@ -1930,17 +1921,15 @@ Node* WasmGraphBuilder::BuildIntToFloatConversionInstruction(
graph()->NewNode(mcgraph()->machine()->StackSlot(stack_slot_size));
const Operator* store_op = mcgraph()->machine()->Store(
StoreRepresentation(parameter_representation, kNoWriteBarrier));
- *effect_ = graph()->NewNode(store_op, stack_slot, mcgraph()->Int32Constant(0),
- input, *effect_, *control_);
+ SetEffect(graph()->NewNode(store_op, stack_slot, mcgraph()->Int32Constant(0),
+ input, Effect(), Control()));
MachineType sig_types[] = {MachineType::Pointer()};
MachineSignature sig(0, 1, sig_types);
Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
BuildCCall(&sig, function, stack_slot);
- const Operator* load_op = mcgraph()->machine()->Load(result_type);
- Node* load = graph()->NewNode(
- load_op, stack_slot, mcgraph()->Int32Constant(0), *effect_, *control_);
- *effect_ = load;
- return load;
+ return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(result_type),
+ stack_slot, mcgraph()->Int32Constant(0),
+ Effect(), Control()));
}
namespace {
@@ -1979,8 +1968,8 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
graph()->NewNode(mcgraph()->machine()->StackSlot(stack_slot_size));
const Operator* store_op = mcgraph()->machine()->Store(
StoreRepresentation(float_ty.representation(), kNoWriteBarrier));
- *effect_ = graph()->NewNode(store_op, stack_slot, Int32Constant(0), input,
- *effect_, *control_);
+ SetEffect(graph()->NewNode(store_op, stack_slot, Int32Constant(0), input,
+ Effect(), Control()));
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
Node* function =
@@ -1988,11 +1977,9 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
Node* overflow = BuildCCall(&sig, function, stack_slot);
if (IsTrappingConvertOp(opcode)) {
ZeroCheck32(wasm::kTrapFloatUnrepresentable, overflow, position);
- const Operator* load_op = mcgraph()->machine()->Load(int_ty);
- Node* load = graph()->NewNode(load_op, stack_slot, Int32Constant(0),
- *effect_, *control_);
- *effect_ = load;
- return load;
+ return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(int_ty),
+ stack_slot, Int32Constant(0), Effect(),
+ Control()));
}
Node* test = Binop(wasm::kExprI32Eq, overflow, Int32Constant(0), position);
Diamond tl_d(graph(), mcgraph()->common(), test, BranchHint::kFalse);
@@ -2005,9 +1992,9 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
sat_d.Nest(nan_d, false);
Node* sat_val =
sat_d.Phi(int_ty.representation(), Min(this, int_ty), Max(this, int_ty));
- const Operator* load_op = mcgraph()->machine()->Load(int_ty);
- Node* load = graph()->NewNode(load_op, stack_slot, Int32Constant(0), *effect_,
- *control_);
+ Node* load =
+ SetEffect(graph()->NewNode(mcgraph()->machine()->Load(int_ty), stack_slot,
+ Int32Constant(0), Effect(), Control()));
Node* nan_val =
nan_d.Phi(int_ty.representation(), Zero(this, int_ty), sat_val);
return tl_d.Phi(int_ty.representation(), nan_val, load);
@@ -2028,12 +2015,9 @@ Node* WasmGraphBuilder::GrowMemory(Node* input) {
// Just encode the stub index. This will be patched at relocation.
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmGrowMemory, RelocInfo::WASM_STUB_CALL);
- Node* call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
- call_target, input, *effect_, *control_);
-
- *effect_ = call;
- *control_ = call;
- return call;
+ return SetEffect(
+ SetControl(graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
+ call_target, input, Effect(), Control())));
}
uint32_t WasmGraphBuilder::GetExceptionEncodedSize(
@@ -2196,21 +2180,21 @@ Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right,
wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = mcgraph()->machine();
ZeroCheck32(wasm::kTrapDivByZero, right, position);
- Node* before = *control_;
+ Node* before = Control();
Node* denom_is_m1;
Node* denom_is_not_m1;
BranchExpectFalse(
graph()->NewNode(m->Word32Equal(), right, mcgraph()->Int32Constant(-1)),
&denom_is_m1, &denom_is_not_m1);
- *control_ = denom_is_m1;
+ SetControl(denom_is_m1);
TrapIfEq32(wasm::kTrapDivUnrepresentable, left, kMinInt, position);
- if (*control_ != denom_is_m1) {
- *control_ = graph()->NewNode(mcgraph()->common()->Merge(2), denom_is_not_m1,
- *control_);
+ if (Control() != denom_is_m1) {
+ SetControl(graph()->NewNode(mcgraph()->common()->Merge(2), denom_is_not_m1,
+ Control()));
} else {
- *control_ = before;
+ SetControl(before);
}
- return graph()->NewNode(m->Int32Div(), left, right, *control_);
+ return graph()->NewNode(m->Int32Div(), left, right, Control());
}
Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right,
@@ -2223,7 +2207,7 @@ Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right,
graph(), mcgraph()->common(),
graph()->NewNode(m->Word32Equal(), right, mcgraph()->Int32Constant(-1)),
BranchHint::kFalse);
- d.Chain(*control_);
+ d.Chain(Control());
return d.Phi(MachineRepresentation::kWord32, mcgraph()->Int32Constant(0),
graph()->NewNode(m->Int32Mod(), left, right, d.if_false));
@@ -2254,7 +2238,7 @@ Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
// The result is the negation of the left input.
return graph()->NewNode(m->Int32Sub(), mcgraph()->Int32Constant(0), left);
}
- return graph()->NewNode(m->Int32Div(), left, right, *control_);
+ return graph()->NewNode(m->Int32Div(), left, right, Control());
}
// asm.js semantics return 0 on divide or mod by zero.
@@ -2294,7 +2278,7 @@ Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
if (mr.Value() == 0 || mr.Value() == -1) {
return zero;
}
- return graph()->NewNode(m->Int32Mod(), left, right, *control_);
+ return graph()->NewNode(m->Int32Mod(), left, right, Control());
}
// General case for signed integer modulus, with optimization for (unknown)
@@ -2423,23 +2407,23 @@ Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right,
MachineType::Int64(), wasm::kTrapDivByZero, position);
}
ZeroCheck64(wasm::kTrapDivByZero, right, position);
- Node* before = *control_;
+ Node* before = Control();
Node* denom_is_m1;
Node* denom_is_not_m1;
BranchExpectFalse(graph()->NewNode(mcgraph()->machine()->Word64Equal(), right,
mcgraph()->Int64Constant(-1)),
&denom_is_m1, &denom_is_not_m1);
- *control_ = denom_is_m1;
+ SetControl(denom_is_m1);
TrapIfEq64(wasm::kTrapDivUnrepresentable, left,
std::numeric_limits<int64_t>::min(), position);
- if (*control_ != denom_is_m1) {
- *control_ = graph()->NewNode(mcgraph()->common()->Merge(2), denom_is_not_m1,
- *control_);
+ if (Control() != denom_is_m1) {
+ SetControl(graph()->NewNode(mcgraph()->common()->Merge(2), denom_is_not_m1,
+ Control()));
} else {
- *control_ = before;
+ SetControl(before);
}
return graph()->NewNode(mcgraph()->machine()->Int64Div(), left, right,
- *control_);
+ Control());
}
Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right,
@@ -2453,7 +2437,7 @@ Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right,
graph()->NewNode(mcgraph()->machine()->Word64Equal(), right,
mcgraph()->Int64Constant(-1)));
- d.Chain(*control_);
+ d.Chain(Control());
Node* rem = graph()->NewNode(mcgraph()->machine()->Int64Mod(), left, right,
d.if_false);
@@ -2491,11 +2475,11 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
const Operator* store_op = mcgraph()->machine()->Store(
StoreRepresentation(MachineRepresentation::kWord64, kNoWriteBarrier));
- *effect_ = graph()->NewNode(store_op, stack_slot, mcgraph()->Int32Constant(0),
- left, *effect_, *control_);
- *effect_ = graph()->NewNode(store_op, stack_slot,
- mcgraph()->Int32Constant(sizeof(double)), right,
- *effect_, *control_);
+ SetEffect(graph()->NewNode(store_op, stack_slot, mcgraph()->Int32Constant(0),
+ left, Effect(), Control()));
+ SetEffect(graph()->NewNode(store_op, stack_slot,
+ mcgraph()->Int32Constant(sizeof(double)), right,
+ Effect(), Control()));
MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
MachineSignature sig(1, 1, sig_types);
@@ -2505,11 +2489,9 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
ZeroCheck32(trap_zero, call, position);
TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1, position);
- const Operator* load_op = mcgraph()->machine()->Load(result_type);
- Node* load = graph()->NewNode(
- load_op, stack_slot, mcgraph()->Int32Constant(0), *effect_, *control_);
- *effect_ = load;
- return load;
+ return SetEffect(graph()->NewNode(mcgraph()->machine()->Load(result_type),
+ stack_slot, mcgraph()->Int32Constant(0),
+ Effect(), Control()));
}
template <typename... Args>
@@ -2517,15 +2499,13 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node* function,
Args... args) {
DCHECK_LE(sig->return_count(), 1);
DCHECK_EQ(sizeof...(args), sig->parameter_count());
- Node* const call_args[] = {function, args..., *effect_, *control_};
+ Node* const call_args[] = {function, args..., Effect(), Control()};
auto call_descriptor =
Linkage::GetSimplifiedCDescriptor(mcgraph()->zone(), sig);
const Operator* op = mcgraph()->common()->Call(call_descriptor);
- Node* call = graph()->NewNode(op, arraysize(call_args), call_args);
- *effect_ = call;
- return call;
+ return SetEffect(graph()->NewNode(op, arraysize(call_args), call_args));
}
Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
@@ -2550,17 +2530,16 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
args[1] = instance_node;
// Add effect and control inputs.
- args[params + 2] = *effect_;
- args[params + 3] = *control_;
+ args[params + 2] = Effect();
+ args[params + 3] = Control();
auto call_descriptor =
GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline);
const Operator* op = mcgraph()->common()->Call(call_descriptor);
- Node* call = graph()->NewNode(op, static_cast<int>(count), args);
+ Node* call = SetEffect(graph()->NewNode(op, static_cast<int>(count), args));
DCHECK(position == wasm::kNoCodePosition || position > 0);
if (position > 0) SetSourcePosition(call, position);
- *effect_ = call;
size_t ret_count = sig->return_count();
if (ret_count == 0) return call; // No return value.
@@ -2590,10 +2569,10 @@ Node* WasmGraphBuilder::BuildImportWasmCall(wasm::FunctionSig* sig, Node** args,
// Load the target from the imported_targets array at a known offset.
Node* imported_targets =
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
- Node* target_node = graph()->NewNode(
+ Node* target_node = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(MachineType::Pointer()), imported_targets,
- mcgraph()->Int32Constant(func_index * kPointerSize),
- mcgraph()->graph()->start(), mcgraph()->graph()->start());
+ mcgraph()->Int32Constant(func_index * kPointerSize), Effect(),
+ Control()));
args[0] = target_node;
return BuildWasmCall(sig, args, rets, position, instance_node,
untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline);
@@ -2613,19 +2592,18 @@ Node* WasmGraphBuilder::BuildImportWasmCall(wasm::FunctionSig* sig, Node** args,
Node* func_index_times_pointersize = graph()->NewNode(
mcgraph()->machine()->IntMul(), Uint32ToUintptr(func_index),
mcgraph()->Int32Constant(kPointerSize));
- Node* instance_node =
+ Node* instance_node = SetEffect(
graph()->NewNode(mcgraph()->machine()->Load(MachineType::TaggedPointer()),
imported_instances_data, func_index_times_pointersize,
- *effect_, *control_);
+ Effect(), Control()));
// Load the target from the imported_targets array at the offset of
// {func_index}.
Node* imported_targets =
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
- Node* target_node = graph()->NewNode(
+ Node* target_node = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(MachineType::Pointer()), imported_targets,
- func_index_times_pointersize, mcgraph()->graph()->start(),
- mcgraph()->graph()->start());
+ func_index_times_pointersize, Effect(), Control()));
args[0] = target_node;
return BuildWasmCall(sig, args, rets, position, instance_node,
untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline);
@@ -2686,19 +2664,12 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
LOAD_INSTANCE_FIELD(IndirectFunctionTableSigIds, MachineType::Pointer());
int32_t expected_sig_id = env_->module->signature_ids[sig_index];
- Node* scaled_key =
- graph()->NewNode(machine->Word32Shl(), key, Int32Constant(2));
- const Operator* add = nullptr;
- if (machine->Is64()) {
- scaled_key = graph()->NewNode(machine->ChangeUint32ToUint64(), scaled_key);
- add = machine->Int64Add();
- } else {
- add = machine->Int32Add();
- }
+ Node* scaled_key = Uint32ToUintptr(
+ graph()->NewNode(machine->Word32Shl(), key, Int32Constant(2)));
Node* loaded_sig =
- graph()->NewNode(machine->Load(MachineType::Int32()), ift_sig_ids,
- scaled_key, *effect_, *control_);
+ SetEffect(graph()->NewNode(machine->Load(MachineType::Int32()),
+ ift_sig_ids, scaled_key, Effect(), Control()));
Node* sig_match = graph()->NewNode(machine->WordEqual(), loaded_sig,
Int32Constant(expected_sig_id));
@@ -2712,14 +2683,15 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
scaled_key = graph()->NewNode(machine->Word32Shl(), key,
Int32Constant(kPointerSizeLog2));
- Node* target = graph()->NewNode(machine->Load(MachineType::Pointer()),
- ift_targets, scaled_key, *effect_, *control_);
+ Node* target =
+ SetEffect(graph()->NewNode(machine->Load(MachineType::Pointer()),
+ ift_targets, scaled_key, Effect(), Control()));
auto access = AccessBuilder::ForFixedArrayElement();
- Node* target_instance = graph()->NewNode(
+ Node* target_instance = SetEffect(graph()->NewNode(
machine->Load(MachineType::TaggedPointer()),
- graph()->NewNode(add, ift_instances, scaled_key),
- Int32Constant(access.header_size - access.tag()), *effect_, *control_);
+ graph()->NewNode(machine->IntAdd(), ift_instances, scaled_key),
+ Int32Constant(access.header_size - access.tag()), Effect(), Control()));
args[0] = target;
@@ -2805,33 +2777,26 @@ Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) {
void WasmGraphBuilder::InitInstanceCache(
WasmInstanceCacheNodes* instance_cache) {
DCHECK_NOT_NULL(instance_node_);
- DCHECK_NOT_NULL(*control_);
- DCHECK_NOT_NULL(*effect_);
// Load the memory start.
- Node* mem_start = graph()->NewNode(
+ instance_cache->mem_start = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(MachineType::UintPtr()), instance_node_.get(),
mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(MemoryStart)),
- *effect_, *control_);
- *effect_ = mem_start;
- instance_cache->mem_start = mem_start;
+ Effect(), Control()));
// Load the memory size.
- Node* mem_size = graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::Uint32()), instance_node_.get(),
+ instance_cache->mem_size = SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::UintPtr()), instance_node_.get(),
mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(MemorySize)),
- *effect_, *control_);
- *effect_ = mem_size;
- instance_cache->mem_size = mem_size;
+ Effect(), Control()));
if (untrusted_code_mitigations_) {
// Load the memory mask.
- Node* mem_mask = graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::Uint32()), instance_node_.get(),
+ instance_cache->mem_mask = SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::UintPtr()),
+ instance_node_.get(),
mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(MemoryMask)),
- *effect_, *control_);
- *effect_ = mem_mask;
- instance_cache->mem_mask = mem_mask;
+ Effect(), Control()));
} else {
// Explicitly set to nullptr to ensure a SEGV when we try to use it.
instance_cache->mem_mask = nullptr;
@@ -2874,13 +2839,13 @@ void WasmGraphBuilder::NewInstanceCacheMerge(WasmInstanceCacheNodes* to,
void WasmGraphBuilder::MergeInstanceCacheInto(WasmInstanceCacheNodes* to,
WasmInstanceCacheNodes* from,
Node* merge) {
- to->mem_size = CreateOrMergeIntoPhi(MachineRepresentation::kWord32, merge,
- to->mem_size, from->mem_size);
+ to->mem_size = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(),
+ merge, to->mem_size, from->mem_size);
to->mem_start = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(),
merge, to->mem_start, from->mem_start);
if (untrusted_code_mitigations_) {
- to->mem_mask = CreateOrMergeIntoPhi(MachineRepresentation::kWord32, merge,
- to->mem_mask, from->mem_mask);
+ to->mem_mask = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(),
+ merge, to->mem_mask, from->mem_mask);
}
}
@@ -2924,7 +2889,6 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
Node** offset_node) {
DCHECK_NOT_NULL(instance_node_);
if (global.mutability && global.imported) {
- DCHECK(FLAG_experimental_wasm_mut_global);
if (imported_mutable_globals_ == nullptr) {
// Load imported_mutable_globals_ from the instance object at runtime.
imported_mutable_globals_ = graph()->NewNode(
@@ -2934,13 +2898,12 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
WASM_INSTANCE_OBJECT_OFFSET(ImportedMutableGlobals)),
graph()->start(), graph()->start());
}
- *base_node = graph()->NewNode(
+ *base_node = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(MachineType::UintPtr()),
imported_mutable_globals_.get(),
- mcgraph()->Int32Constant(global.index * sizeof(Address)), *effect_,
- *control_);
+ mcgraph()->Int32Constant(global.index * sizeof(Address)), Effect(),
+ Control()));
*offset_node = mcgraph()->Int32Constant(0);
- *effect_ = *base_node;
} else {
if (globals_start_ == nullptr) {
// Load globals_start from the instance object at runtime.
@@ -2985,9 +2948,14 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
DCHECK_NOT_NULL(instance_cache_);
Node* mem_size = instance_cache_->mem_size;
DCHECK_NOT_NULL(mem_size);
- return graph()->NewNode(
- mcgraph()->machine()->Word32Shr(), mem_size,
- mcgraph()->Int32Constant(WhichPowerOf2(wasm::kWasmPageSize)));
+ Node* result =
+ graph()->NewNode(mcgraph()->machine()->WordShr(), mem_size,
+ mcgraph()->Int32Constant(wasm::kWasmPageSizeLog2));
+ if (mcgraph()->machine()->Is64()) {
+ result =
+ graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), result);
+ }
+ return result;
}
// Only call this function for code which is not reused across instantiations,
@@ -3003,7 +2971,7 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
// The CEntryStub is loaded from the instance_node so that generated code is
// Isolate independent. At the moment this is only done for CEntryStub(1).
DCHECK_EQ(1, fun->result_size);
- Node* centry_stub = *effect_ =
+ Node* centry_stub =
LOAD_INSTANCE_FIELD(CEntryStub, MachineType::TaggedPointer());
// At the moment we only allow 4 parameters. If more parameters are needed,
// increase this constant accordingly.
@@ -3019,14 +2987,11 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
mcgraph()->ExternalConstant(ExternalReference::Create(f)); // ref
inputs[count++] = mcgraph()->Int32Constant(fun->nargs); // arity
inputs[count++] = js_context; // js_context
- inputs[count++] = *effect_;
- inputs[count++] = *control_;
+ inputs[count++] = Effect();
+ inputs[count++] = Control();
- Node* node = mcgraph()->graph()->NewNode(
- mcgraph()->common()->Call(call_descriptor), count, inputs);
- *effect_ = node;
-
- return node;
+ return SetEffect(mcgraph()->graph()->NewNode(
+ mcgraph()->common()->Call(call_descriptor), count, inputs));
}
Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f,
@@ -3043,10 +3008,13 @@ Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
Node* offset = nullptr;
GetGlobalBaseAndOffset(mem_type, env_->module->globals[index], &base,
&offset);
- Node* node = graph()->NewNode(mcgraph()->machine()->Load(mem_type), base,
- offset, *effect_, *control_);
- *effect_ = node;
- return node;
+ Node* load = SetEffect(graph()->NewNode(mcgraph()->machine()->Load(mem_type),
+ base, offset, Effect(), Control()));
+#if defined(V8_TARGET_BIG_ENDIAN)
+ load = BuildChangeEndiannessLoad(load, mem_type,
+ env_->module->globals[index].type);
+#endif
+ return load;
}
Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
@@ -3058,44 +3026,58 @@ Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
&offset);
const Operator* op = mcgraph()->machine()->Store(
StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
- Node* node = graph()->NewNode(op, base, offset, val, *effect_, *control_);
- *effect_ = node;
- return node;
+#if defined(V8_TARGET_BIG_ENDIAN)
+ val = BuildChangeEndiannessStore(val, mem_type.representation(),
+ env_->module->globals[index].type);
+#endif
+ return SetEffect(
+ graph()->NewNode(op, base, offset, val, Effect(), Control()));
+}
+
+Node* WasmGraphBuilder::CheckBoundsAndAlignment(
+ uint8_t access_size, Node* index, uint32_t offset,
+ wasm::WasmCodePosition position) {
+ // Atomic operations access the memory, need to be bound checked till
+ // TrapHandlers are enabled on atomic operations
+ index =
+ BoundsCheckMem(access_size, index, offset, position, kNeedsBoundsCheck);
+ Node* effective_address =
+ graph()->NewNode(mcgraph()->machine()->IntAdd(), MemBuffer(offset),
+ Uint32ToUintptr(index));
+ // Unlike regular memory accesses, unaligned memory accesses for atomic
+ // operations should trap
+ // Access sizes are in powers of two, calculate mod without using division
+ Node* cond =
+ graph()->NewNode(mcgraph()->machine()->WordAnd(), effective_address,
+ IntPtrConstant(access_size - 1));
+ TrapIfFalse(wasm::kTrapUnalignedAccess,
+ graph()->NewNode(mcgraph()->machine()->Word32Equal(), cond,
+ mcgraph()->Int32Constant(0)),
+ position);
+ return index;
}
Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
uint32_t offset,
wasm::WasmCodePosition position,
EnforceBoundsCheck enforce_check) {
- if (FLAG_wasm_no_bounds_checks) return Uint32ToUintptr(index);
- DCHECK_NOT_NULL(instance_cache_);
- Node* mem_size = instance_cache_->mem_size;
- DCHECK_NOT_NULL(mem_size);
+ DCHECK_LE(1, access_size);
+ index = Uint32ToUintptr(index);
+ if (FLAG_wasm_no_bounds_checks) return index;
- auto m = mcgraph()->machine();
if (use_trap_handler() && enforce_check == kCanOmitBoundsCheck) {
- // Simply zero out the 32-bits on 64-bit targets and let the trap handler
- // do its job.
- return Uint32ToUintptr(index);
+ return index;
}
- uint32_t min_size = env_->module->initial_pages * wasm::kWasmPageSize;
- uint32_t max_size =
- (env_->module->has_maximum_pages ? env_->module->maximum_pages
- : wasm::kV8MaxWasmMemoryPages) *
- wasm::kWasmPageSize;
-
- if (access_size > max_size || offset > max_size - access_size) {
+ const bool statically_oob = access_size > env_->max_memory_size ||
+ offset > env_->max_memory_size - access_size;
+ if (statically_oob) {
// The access will be out of bounds, even for the largest memory.
TrapIfEq32(wasm::kTrapMemOutOfBounds, Int32Constant(0), 0, position);
return mcgraph()->IntPtrConstant(0);
}
- DCHECK_LE(1, access_size);
- // This computation cannot overflow, since
- // {offset <= max_size - access_size <= kMaxUint32 - access_size}.
- // It also cannot underflow, since {access_size >= 1}.
- uint32_t end_offset = offset + access_size - 1;
- Node* end_offset_node = Int32Constant(end_offset);
+ uint64_t end_offset = uint64_t{offset} + access_size - 1u;
+ Node* end_offset_node = IntPtrConstant(end_offset);
// The accessed memory is [index + offset, index + end_offset].
// Check that the last read byte (at {index + end_offset}) is in bounds.
@@ -3106,42 +3088,42 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
// - computing {effective_size} as {mem_size - end_offset} and
// - checking that {index < effective_size}.
- if (end_offset >= min_size) {
+ auto m = mcgraph()->machine();
+ Node* mem_size = instance_cache_->mem_size;
+ if (end_offset >= env_->min_memory_size) {
// The end offset is larger than the smallest memory.
- // Dynamically check the end offset against the actual memory size, which
- // is not known at compile time.
- Node* cond = graph()->NewNode(mcgraph()->machine()->Uint32LessThan(),
- end_offset_node, mem_size);
+ // Dynamically check the end offset against the dynamic memory size.
+ Node* cond = graph()->NewNode(m->UintLessThan(), end_offset_node, mem_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
} else {
- // The end offset is within the bounds of the smallest memory, so only
- // one check is required. Check to see if the index is also a constant.
- Uint32Matcher match(index);
+ // The end offset is smaller than the smallest memory, so only one check is
+ // required. Check to see if the index is also a constant.
+ UintPtrMatcher match(index);
if (match.HasValue()) {
- uint32_t index_val = match.Value();
- if (index_val < min_size - end_offset) {
+ uintptr_t index_val = match.Value();
+ if (index_val < env_->min_memory_size - end_offset) {
// The input index is a constant and everything is statically within
// bounds of the smallest possible memory.
- return Uint32ToUintptr(index);
+ return index;
}
}
}
// This produces a positive number, since {end_offset < min_size <= mem_size}.
- Node* effective_size = graph()->NewNode(mcgraph()->machine()->Int32Sub(),
- mem_size, end_offset_node);
+ Node* effective_size =
+ graph()->NewNode(m->IntSub(), mem_size, end_offset_node);
// Introduce the actual bounds check.
- Node* cond = graph()->NewNode(m->Uint32LessThan(), index, effective_size);
+ Node* cond = graph()->NewNode(m->UintLessThan(), index, effective_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
if (untrusted_code_mitigations_) {
// In the fallthrough case, condition the index with the memory mask.
Node* mem_mask = instance_cache_->mem_mask;
DCHECK_NOT_NULL(mem_mask);
- index = graph()->NewNode(m->Word32And(), index, mem_mask);
+ index = graph()->NewNode(m->WordAnd(), index, mem_mask);
}
- return Uint32ToUintptr(index);
+ return index;
}
const Operator* WasmGraphBuilder::GetSafeLoadOperator(int offset,
@@ -3178,9 +3160,9 @@ Node* WasmGraphBuilder::TraceMemoryOperation(bool is_store,
Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
Int32Constant(offset), index);
auto store = [&](int offset, MachineRepresentation rep, Node* data) {
- *effect_ = graph()->NewNode(
+ SetEffect(graph()->NewNode(
mcgraph()->machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)),
- info, mcgraph()->Int32Constant(offset), data, *effect_, *control_);
+ info, mcgraph()->Int32Constant(offset), data, Effect(), Control()));
};
// Store address, is_store, and mem_rep.
store(offsetof(wasm::MemoryTracingInfo, address),
@@ -3212,20 +3194,20 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
mcgraph()->machine()->UnalignedLoadSupported(memtype.representation())) {
if (use_trap_handler()) {
load = graph()->NewNode(mcgraph()->machine()->ProtectedLoad(memtype),
- MemBuffer(offset), index, *effect_, *control_);
+ MemBuffer(offset), index, Effect(), Control());
SetSourcePosition(load, position);
} else {
load = graph()->NewNode(mcgraph()->machine()->Load(memtype),
- MemBuffer(offset), index, *effect_, *control_);
+ MemBuffer(offset), index, Effect(), Control());
}
} else {
// TODO(eholk): Support unaligned loads with trap handlers.
DCHECK(!use_trap_handler());
load = graph()->NewNode(mcgraph()->machine()->UnalignedLoad(memtype),
- MemBuffer(offset), index, *effect_, *control_);
+ MemBuffer(offset), index, Effect(), Control());
}
- *effect_ = load;
+ SetEffect(load);
#if defined(V8_TARGET_BIG_ENDIAN)
load = BuildChangeEndiannessLoad(load, memtype, type);
@@ -3270,13 +3252,13 @@ Node* WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index,
if (use_trap_handler()) {
store =
graph()->NewNode(mcgraph()->machine()->ProtectedStore(mem_rep),
- MemBuffer(offset), index, val, *effect_, *control_);
+ MemBuffer(offset), index, val, Effect(), Control());
SetSourcePosition(store, position);
} else {
StoreRepresentation rep(mem_rep, kNoWriteBarrier);
store =
graph()->NewNode(mcgraph()->machine()->Store(rep), MemBuffer(offset),
- index, val, *effect_, *control_);
+ index, val, Effect(), Control());
}
} else {
// TODO(eholk): Support unaligned stores with trap handlers.
@@ -3284,10 +3266,10 @@ Node* WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index,
UnalignedStoreRepresentation rep(mem_rep);
store =
graph()->NewNode(mcgraph()->machine()->UnalignedStore(rep),
- MemBuffer(offset), index, val, *effect_, *control_);
+ MemBuffer(offset), index, val, Effect(), Control());
}
- *effect_ = store;
+ SetEffect(store);
if (FLAG_wasm_trace_memory) {
TraceMemoryOperation(true, mem_rep, index, offset, position);
@@ -3322,42 +3304,38 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
DCHECK_NOT_NULL(mem_start);
DCHECK_NOT_NULL(mem_size);
- // Asm.js semantics are defined along the lines of typed arrays, hence OOB
+ // Asm.js semantics are defined in terms of typed arrays, hence OOB
// reads return {undefined} coerced to the result type (0 for integers, NaN
// for float and double).
// Note that we check against the memory size ignoring the size of the
// stored value, which is conservative if misaligned. Technically, asm.js
// should never have misaligned accesses.
+ index = Uint32ToUintptr(index);
Diamond bounds_check(
graph(), mcgraph()->common(),
- graph()->NewNode(mcgraph()->machine()->Uint32LessThan(), index, mem_size),
+ graph()->NewNode(mcgraph()->machine()->UintLessThan(), index, mem_size),
BranchHint::kTrue);
- bounds_check.Chain(*control_);
+ bounds_check.Chain(Control());
if (untrusted_code_mitigations_) {
// Condition the index with the memory mask.
Node* mem_mask = instance_cache_->mem_mask;
DCHECK_NOT_NULL(mem_mask);
- index =
- graph()->NewNode(mcgraph()->machine()->Word32And(), index, mem_mask);
+ index = graph()->NewNode(mcgraph()->machine()->WordAnd(), index, mem_mask);
}
- index = Uint32ToUintptr(index);
Node* load = graph()->NewNode(mcgraph()->machine()->Load(type), mem_start,
- index, *effect_, bounds_check.if_true);
- Node* value_phi =
- bounds_check.Phi(type.representation(), load,
- GetAsmJsOOBValue(type.representation(), mcgraph()));
- Node* effect_phi = bounds_check.EffectPhi(load, *effect_);
- *effect_ = effect_phi;
- *control_ = bounds_check.merge;
- return value_phi;
+ index, Effect(), bounds_check.if_true);
+ SetEffect(bounds_check.EffectPhi(load, Effect()));
+ SetControl(bounds_check.merge);
+ return bounds_check.Phi(type.representation(), load,
+ GetAsmJsOOBValue(type.representation(), mcgraph()));
}
Node* WasmGraphBuilder::Uint32ToUintptr(Node* node) {
if (mcgraph()->machine()->Is32()) return node;
// Fold instances of ChangeUint32ToUint64(IntConstant) directly.
- UintPtrMatcher matcher(node);
+ Uint32Matcher matcher(node);
if (matcher.HasValue()) {
uintptr_t value = matcher.Value();
return mcgraph()->IntPtrConstant(bit_cast<intptr_t>(value));
@@ -3381,7 +3359,7 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
graph(), mcgraph()->common(),
graph()->NewNode(mcgraph()->machine()->Uint32LessThan(), index, mem_size),
BranchHint::kTrue);
- bounds_check.Chain(*control_);
+ bounds_check.Chain(Control());
if (untrusted_code_mitigations_) {
// Condition the index with the memory mask.
@@ -3394,11 +3372,10 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
index = Uint32ToUintptr(index);
const Operator* store_op = mcgraph()->machine()->Store(StoreRepresentation(
type.representation(), WriteBarrierKind::kNoWriteBarrier));
- Node* store = graph()->NewNode(store_op, mem_start, index, val, *effect_,
+ Node* store = graph()->NewNode(store_op, mem_start, index, val, Effect(),
bounds_check.if_true);
- Node* effect_phi = bounds_check.EffectPhi(store, *effect_);
- *effect_ = effect_phi;
- *control_ = bounds_check.merge;
+ SetEffect(bounds_check.EffectPhi(store, Effect()));
+ SetControl(bounds_check.merge);
return val;
}
@@ -3921,17 +3898,16 @@ Node* WasmGraphBuilder::Simd8x16ShuffleOp(const uint8_t shuffle[16],
Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
uint32_t alignment, uint32_t offset,
wasm::WasmCodePosition position) {
- // TODO(gdeepti): Add alignment validation, traps on misalignment
Node* node;
switch (opcode) {
#define BUILD_ATOMIC_BINOP(Name, Operation, Type, Prefix) \
case wasm::kExpr##Name: { \
- Node* index = \
- BoundsCheckMem(wasm::ValueTypes::MemSize(MachineType::Type()), \
- inputs[0], offset, position, kNeedsBoundsCheck); \
+ Node* index = CheckBoundsAndAlignment( \
+ wasm::ValueTypes::MemSize(MachineType::Type()), inputs[0], offset, \
+ position); \
node = graph()->NewNode( \
mcgraph()->machine()->Prefix##Atomic##Operation(MachineType::Type()), \
- MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ MemBuffer(offset), index, inputs[1], Effect(), Control()); \
break; \
}
ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP)
@@ -3939,39 +3915,39 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
#define BUILD_ATOMIC_CMP_EXCHG(Name, Type, Prefix) \
case wasm::kExpr##Name: { \
- Node* index = \
- BoundsCheckMem(wasm::ValueTypes::MemSize(MachineType::Type()), \
- inputs[0], offset, position, kNeedsBoundsCheck); \
+ Node* index = CheckBoundsAndAlignment( \
+ wasm::ValueTypes::MemSize(MachineType::Type()), inputs[0], offset, \
+ position); \
node = graph()->NewNode( \
mcgraph()->machine()->Prefix##AtomicCompareExchange( \
MachineType::Type()), \
- MemBuffer(offset), index, inputs[1], inputs[2], *effect_, *control_); \
+ MemBuffer(offset), index, inputs[1], inputs[2], Effect(), Control()); \
break; \
}
ATOMIC_CMP_EXCHG_LIST(BUILD_ATOMIC_CMP_EXCHG)
#undef BUILD_ATOMIC_CMP_EXCHG
-#define BUILD_ATOMIC_LOAD_OP(Name, Type, Prefix) \
- case wasm::kExpr##Name: { \
- Node* index = \
- BoundsCheckMem(wasm::ValueTypes::MemSize(MachineType::Type()), \
- inputs[0], offset, position, kNeedsBoundsCheck); \
- node = graph()->NewNode( \
- mcgraph()->machine()->Prefix##AtomicLoad(MachineType::Type()), \
- MemBuffer(offset), index, *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_LOAD_OP(Name, Type, Prefix) \
+ case wasm::kExpr##Name: { \
+ Node* index = CheckBoundsAndAlignment( \
+ wasm::ValueTypes::MemSize(MachineType::Type()), inputs[0], offset, \
+ position); \
+ node = graph()->NewNode( \
+ mcgraph()->machine()->Prefix##AtomicLoad(MachineType::Type()), \
+ MemBuffer(offset), index, Effect(), Control()); \
+ break; \
}
ATOMIC_LOAD_LIST(BUILD_ATOMIC_LOAD_OP)
#undef BUILD_ATOMIC_LOAD_OP
#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep, Prefix) \
case wasm::kExpr##Name: { \
- Node* index = \
- BoundsCheckMem(wasm::ValueTypes::MemSize(MachineType::Type()), \
- inputs[0], offset, position, kNeedsBoundsCheck); \
+ Node* index = CheckBoundsAndAlignment( \
+ wasm::ValueTypes::MemSize(MachineType::Type()), inputs[0], offset, \
+ position); \
node = graph()->NewNode( \
mcgraph()->machine()->Prefix##AtomicStore(MachineRepresentation::Rep), \
- MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ MemBuffer(offset), index, inputs[1], Effect(), Control()); \
break; \
}
ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP)
@@ -3979,8 +3955,7 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
default:
FATAL_UNSUPPORTED_OPCODE(opcode);
}
- *effect_ = node;
- return node;
+ return SetEffect(node);
}
#undef ATOMIC_BINOP_LIST
@@ -4067,13 +4042,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
allocate_heap_number_operator_.set(common->Call(call_descriptor));
}
Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
- target, *effect_, control);
- Node* store =
+ target, Effect(), control);
+ SetEffect(
graph()->NewNode(machine->Store(StoreRepresentation(
MachineRepresentation::kFloat64, kNoWriteBarrier)),
heap_number, BuildHeapNumberValueIndexConstant(),
- value, heap_number, control);
- *effect_ = store;
+ value, heap_number, control));
return heap_number;
}
@@ -4088,9 +4062,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
Node* BuildLoadHeapNumberValue(Node* value) {
- return *effect_ = graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::Float64()), value,
- BuildHeapNumberValueIndexConstant(), *effect_, *control_);
+ return SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::Float64()), value,
+ BuildHeapNumberValueIndexConstant(), Effect(), Control()));
}
Node* BuildHeapNumberValueIndexConstant() {
@@ -4106,8 +4080,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
DCHECK(SmiValuesAre31Bits());
- Node* effect = *effect_;
- Node* control = *control_;
+ Node* effect = Effect();
+ Node* control = Control();
Node* add = graph()->NewNode(machine->Int32AddWithOverflow(), value, value,
graph()->start());
@@ -4118,18 +4092,17 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* if_true = graph()->NewNode(common->IfTrue(), branch);
Node* vtrue = BuildAllocateHeapNumberWithValue(
graph()->NewNode(machine->ChangeInt32ToFloat64(), value), if_true);
- Node* etrue = *effect_;
+ Node* etrue = Effect();
Node* if_false = graph()->NewNode(common->IfFalse(), branch);
Node* vfalse = graph()->NewNode(common->Projection(0), add, if_false);
vfalse = BuildChangeInt32ToIntPtr(vfalse);
- Node* merge = graph()->NewNode(common->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, merge);
- *effect_ = graph()->NewNode(common->EffectPhi(2), etrue, effect, merge);
- *control_ = merge;
- return phi;
+ Node* merge =
+ SetControl(graph()->NewNode(common->Merge(2), if_true, if_false));
+ SetEffect(graph()->NewNode(common->EffectPhi(2), etrue, effect, merge));
+ return graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, merge);
}
Node* BuildChangeFloat64ToTagged(Node* value) {
@@ -4147,8 +4120,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// For potential Smi values, depending on whether Smis are 31 or 32 bit, we
// still need to check whether the value fits in a Smi.
- Node* effect = *effect_;
- Node* control = *control_;
+ Node* effect = Effect();
+ Node* control = Control();
Node* value32 = graph()->NewNode(machine->RoundFloat64ToInt32(), value);
Node* check_i32 = graph()->NewNode(
machine->Float64Equal(), value,
@@ -4210,14 +4183,13 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Allocate the box for the {value}.
Node* vbox = BuildAllocateHeapNumberWithValue(value, if_box);
- Node* ebox = *effect_;
+ Node* ebox = Effect();
- Node* merge = graph()->NewNode(common->Merge(2), if_smi, if_box);
- value = graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2),
- vsmi, vbox, merge);
- *effect_ = graph()->NewNode(common->EffectPhi(2), effect, ebox, merge);
- *control_ = merge;
- return value;
+ Node* merge =
+ SetControl(graph()->NewNode(common->Merge(2), if_smi, if_box));
+ SetEffect(graph()->NewNode(common->EffectPhi(2), effect, ebox, merge));
+ return graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2),
+ vsmi, vbox, merge);
}
int AddArgumentNodes(Node** args, int pos, int param_count,
@@ -4241,14 +4213,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
wasm::WasmCode::kWasmToNumber, RelocInfo::WASM_STUB_CALL)
: jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, ToNumber));
- Node* result =
+ Node* result = SetEffect(
graph()->NewNode(mcgraph()->common()->Call(call_descriptor), stub_code,
- node, js_context, *effect_, *control_);
+ node, js_context, Effect(), Control()));
SetSourcePosition(result, 1);
- *effect_ = result;
-
return result;
}
@@ -4266,25 +4236,25 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* check_heap_object = BuildTestHeapObject(value);
Diamond is_heap_object(graph(), common, check_heap_object,
BranchHint::kFalse);
- is_heap_object.Chain(*control_);
+ is_heap_object.Chain(Control());
- *control_ = is_heap_object.if_true;
- Node* orig_effect = *effect_;
+ SetControl(is_heap_object.if_true);
+ Node* orig_effect = Effect();
- Node* undefined_node = *effect_ =
+ Node* undefined_node =
LOAD_INSTANCE_FIELD(UndefinedValue, MachineType::TaggedPointer());
Node* check_undefined =
graph()->NewNode(machine->WordEqual(), value, undefined_node);
- Node* effect_tagged = *effect_;
+ Node* effect_tagged = Effect();
Diamond is_undefined(graph(), common, check_undefined, BranchHint::kFalse);
is_undefined.Nest(is_heap_object, true);
- *control_ = is_undefined.if_false;
+ SetControl(is_undefined.if_false);
Node* vheap_number = BuildLoadHeapNumberValue(value);
- Node* effect_undefined = *effect_;
+ Node* effect_undefined = Effect();
- *control_ = is_undefined.merge;
+ SetControl(is_undefined.merge);
Node* vundefined =
mcgraph()->Float64Constant(std::numeric_limits<double>::quiet_NaN());
Node* vtagged = is_undefined.Phi(MachineRepresentation::kFloat64,
@@ -4295,8 +4265,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// If input is Smi: just convert to float64.
Node* vfrom_smi = BuildChangeSmiToFloat64(value);
- *control_ = is_heap_object.merge;
- *effect_ = is_heap_object.EffectPhi(effect_tagged, orig_effect);
+ SetControl(is_heap_object.merge);
+ SetEffect(is_heap_object.EffectPhi(effect_tagged, orig_effect));
return is_heap_object.Phi(MachineRepresentation::kFloat64, vtagged,
vfrom_smi);
}
@@ -4356,63 +4326,69 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return num;
}
- Node* BuildModifyThreadInWasmFlag(bool new_value) {
- // TODO(eholk): generate code to modify the thread-local storage directly,
- // rather than calling the runtime.
- if (!use_trap_handler()) {
- return *control_;
- }
-
- // Using two functions instead of taking the new value as a parameter saves
- // one instruction on each call to set up the parameter.
- ExternalReference ref =
- new_value ? ExternalReference::wasm_set_thread_in_wasm_flag()
- : ExternalReference::wasm_clear_thread_in_wasm_flag();
- MachineSignature sig(0, 0, nullptr);
- return BuildCCall(
- &sig, graph()->NewNode(mcgraph()->common()->ExternalConstant(ref)));
+ void BuildModifyThreadInWasmFlag(bool new_value) {
+ if (!trap_handler::IsTrapHandlerEnabled()) return;
+ Node* thread_in_wasm_flag_address_address =
+ graph()->NewNode(mcgraph()->common()->ExternalConstant(
+ ExternalReference::wasm_thread_in_wasm_flag_address_address(
+ isolate_)));
+ Node* thread_in_wasm_flag_address = SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Load(LoadRepresentation(MachineType::Pointer())),
+ thread_in_wasm_flag_address_address, mcgraph()->Int32Constant(0),
+ Effect(), Control()));
+ SetEffect(graph()->NewNode(
+ mcgraph()->machine()->Store(StoreRepresentation(
+ MachineRepresentation::kWord32, kNoWriteBarrier)),
+ thread_in_wasm_flag_address, mcgraph()->Int32Constant(0),
+ mcgraph()->Int32Constant(new_value ? 1 : 0), Effect(), Control()));
}
Node* BuildLoadFunctionDataFromExportedFunction(Node* closure) {
- Node* shared = *effect_ = graph()->NewNode(
+ Node* shared = SetEffect(graph()->NewNode(
jsgraph()->machine()->Load(MachineType::AnyTagged()), closure,
jsgraph()->Int32Constant(JSFunction::kSharedFunctionInfoOffset -
kHeapObjectTag),
- *effect_, *control_);
- Node* function_data = *effect_ = graph()->NewNode(
+ Effect(), Control()));
+ return SetEffect(graph()->NewNode(
jsgraph()->machine()->Load(MachineType::AnyTagged()), shared,
jsgraph()->Int32Constant(SharedFunctionInfo::kFunctionDataOffset -
kHeapObjectTag),
- *effect_, *control_);
- return function_data;
+ Effect(), Control()));
}
Node* BuildLoadInstanceFromExportedFunctionData(Node* function_data) {
- Node* instance = *effect_ = graph()->NewNode(
+ return SetEffect(graph()->NewNode(
jsgraph()->machine()->Load(MachineType::AnyTagged()), function_data,
jsgraph()->Int32Constant(WasmExportedFunctionData::kInstanceOffset -
kHeapObjectTag),
- *effect_, *control_);
- return instance;
+ Effect(), Control()));
}
Node* BuildLoadFunctionIndexFromExportedFunctionData(Node* function_data) {
- Node* function_index_smi = *effect_ = graph()->NewNode(
+ Node* function_index_smi = SetEffect(graph()->NewNode(
jsgraph()->machine()->Load(MachineType::AnyTagged()), function_data,
jsgraph()->Int32Constant(
WasmExportedFunctionData::kFunctionIndexOffset - kHeapObjectTag),
- *effect_, *control_);
+ Effect(), Control()));
Node* function_index = BuildChangeSmiToInt32(function_index_smi);
return function_index;
}
+ Node* BuildLoadJumpTableOffsetFromExportedFunctionData(Node* function_data) {
+ Node* jump_table_offset_smi = SetEffect(graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::AnyTagged()), function_data,
+ jsgraph()->Int32Constant(
+ WasmExportedFunctionData::kJumpTableOffsetOffset - kHeapObjectTag),
+ Effect(), Control()));
+ Node* jump_table_offset = BuildChangeSmiToInt32(jump_table_offset_smi);
+ return jump_table_offset;
+ }
+
void BuildJSToWasmWrapper(bool is_import) {
const int wasm_count = static_cast<int>(sig_->parameter_count());
// Build the start and the JS parameter nodes.
- Node* start = Start(wasm_count + 5);
- *control_ = start;
- *effect_ = start;
+ SetEffect(SetControl(Start(wasm_count + 5)));
// Create the js_closure and js_context parameters.
Node* js_closure =
@@ -4456,28 +4432,22 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Set the ThreadInWasm flag before we do the actual call.
BuildModifyThreadInWasmFlag(true);
- // Load function index from {WasmExportedFunctionData}.
- Node* function_index =
- BuildLoadFunctionIndexFromExportedFunctionData(function_data);
-
if (is_import) {
// Call to an imported function.
+ // Load function index from {WasmExportedFunctionData}.
+ Node* function_index =
+ BuildLoadFunctionIndexFromExportedFunctionData(function_data);
BuildImportWasmCall(sig_, args, &rets, wasm::kNoCodePosition,
function_index);
} else {
// Call to a wasm function defined in this module.
- // The call target is the jump table slot for that function. This is
- // {jump_table + (func_index - num_imports) * kJumpTableSlotSize}
- // == {jump_table_adjusted + func_index * kJumpTableSlotSize}.
- Node* jump_table_adjusted =
- LOAD_INSTANCE_FIELD(JumpTableAdjustedStart, MachineType::Pointer());
- Node* jump_table_offset = graph()->NewNode(
- mcgraph()->machine()->IntMul(), Uint32ToUintptr(function_index),
- mcgraph()->IntPtrConstant(
- wasm::JumpTableAssembler::kJumpTableSlotSize));
- Node* jump_table_slot =
- graph()->NewNode(mcgraph()->machine()->IntAdd(), jump_table_adjusted,
- jump_table_offset);
+ // The call target is the jump table slot for that function.
+ Node* jump_table_start =
+ LOAD_INSTANCE_FIELD(JumpTableStart, MachineType::Pointer());
+ Node* jump_table_offset =
+ BuildLoadJumpTableOffsetFromExportedFunctionData(function_data);
+ Node* jump_table_slot = graph()->NewNode(
+ mcgraph()->machine()->IntAdd(), jump_table_start, jump_table_offset);
args[0] = jump_table_slot;
BuildWasmCall(sig_, args, &rets, wasm::kNoCodePosition, nullptr,
@@ -4498,9 +4468,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
int wasm_count = static_cast<int>(sig_->parameter_count());
// Build the start and the parameter nodes.
- Node* start = Start(wasm_count + 3);
- *effect_ = start;
- *control_ = start;
+ SetEffect(SetControl(Start(wasm_count + 3)));
// Create the instance_node from the passed parameter.
instance_node_.set(Param(wasm::kWasmInstanceParameterIndex));
@@ -4532,11 +4500,11 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (target->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(target);
FieldAccess field_access = AccessBuilder::ForJSFunctionContext();
- Node* function_context = graph()->NewNode(
+ Node* function_context = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(MachineType::TaggedPointer()),
callable_node,
mcgraph()->Int32Constant(field_access.offset - field_access.tag()),
- *effect_, *control_);
+ Effect(), Control()));
if (!IsClassConstructor(function->shared()->kind())) {
if (function->shared()->internal_formal_parameter_count() ==
@@ -4562,8 +4530,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = undefined_node; // new target
args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
args[pos++] = function_context;
- args[pos++] = *effect_;
- args[pos++] = *control_;
+ args[pos++] = Effect();
+ args[pos++] = Control();
call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
pos, args);
@@ -4594,8 +4562,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Convert wasm numbers to JS values.
pos = AddArgumentNodes(args, pos, wasm_count, sig_);
args[pos++] = function_context;
- args[pos++] = *effect_;
- args[pos++] = *control_;
+ args[pos++] = Effect();
+ args[pos++] = Control();
call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
pos, args);
}
@@ -4625,14 +4593,14 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// the target is a native function, or if the target is a callable
// JSObject, which can only be constructed by the runtime.
args[pos++] = native_context;
- args[pos++] = *effect_;
- args[pos++] = *control_;
+ args[pos++] = Effect();
+ args[pos++] = Control();
call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor), pos,
args);
}
- *effect_ = call;
+ SetEffect(call);
SetSourcePosition(call, 0);
// Convert the return value back.
@@ -4650,9 +4618,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
int param_count = static_cast<int>(sig_->parameter_count());
// Build the start and the parameter nodes.
- Node* start = Start(param_count + 3);
- *effect_ = start;
- *control_ = start;
+ SetEffect(SetControl(Start(param_count + 3)));
// Create the instance_node from the passed parameter.
instance_node_.set(Param(wasm::kWasmInstanceParameterIndex));
@@ -4685,9 +4651,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
for (int i = 0; i < param_count; ++i) {
wasm::ValueType type = sig_->GetParam(i);
// Start from the parameter with index 1 to drop the instance_node.
- *effect_ = graph()->NewNode(GetSafeStoreOperator(offset, type),
- arg_buffer, Int32Constant(offset),
- Param(i + 1), *effect_, *control_);
+ SetEffect(graph()->NewNode(GetSafeStoreOperator(offset, type), arg_buffer,
+ Int32Constant(offset), Param(i + 1), Effect(),
+ Control()));
offset += wasm::ValueTypes::ElementSizeInBytes(type);
}
DCHECK_EQ(args_size_bytes, offset);
@@ -4709,9 +4675,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
DCHECK_EQ(1, sig_->return_count());
MachineType load_rep =
wasm::ValueTypes::MachineTypeFor(sig_->GetReturn());
- Node* val =
+ Node* val = SetEffect(
graph()->NewNode(mcgraph()->machine()->Load(load_rep), arg_buffer,
- Int32Constant(0), *effect_, *control_);
+ Int32Constant(0), Effect(), Control()));
Return(val);
}
@@ -4720,9 +4686,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
void BuildCWasmEntry() {
// Build the start and the JS parameter nodes.
- Node* start = Start(CWasmEntryParameters::kNumParameters + 5);
- *control_ = start;
- *effect_ = start;
+ SetEffect(SetControl(Start(CWasmEntryParameters::kNumParameters + 5)));
// Create parameter nodes (offset by 1 for the receiver parameter).
Node* foreign_code_obj = Param(CWasmEntryParameters::kCodeObject + 1);
@@ -4730,7 +4694,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* code_obj = graph()->NewNode(
machine->Load(MachineType::Pointer()), foreign_code_obj,
Int32Constant(Foreign::kForeignAddressOffset - kHeapObjectTag),
- *effect_, *control_);
+ Effect(), Control());
Node* instance_node = Param(CWasmEntryParameters::kWasmInstance + 1);
Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer + 1);
@@ -4744,24 +4708,22 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
int offset = 0;
for (wasm::ValueType type : sig_->parameters()) {
- Node* arg_load =
+ Node* arg_load = SetEffect(
graph()->NewNode(GetSafeLoadOperator(offset, type), arg_buffer,
- Int32Constant(offset), *effect_, *control_);
- *effect_ = arg_load;
+ Int32Constant(offset), Effect(), Control()));
args[pos++] = arg_load;
offset += wasm::ValueTypes::ElementSizeInBytes(type);
}
- args[pos++] = *effect_;
- args[pos++] = *control_;
+ args[pos++] = Effect();
+ args[pos++] = Control();
DCHECK_EQ(arg_count, pos);
// Call the wasm code.
auto call_descriptor = GetWasmCallDescriptor(mcgraph()->zone(), sig_);
- Node* call = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
- arg_count, args);
- *effect_ = call;
+ Node* call = SetEffect(graph()->NewNode(
+ mcgraph()->common()->Call(call_descriptor), arg_count, args));
// Store the return value.
DCHECK_GE(1, sig_->return_count());
@@ -4769,10 +4731,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
StoreRepresentation store_rep(
wasm::ValueTypes::MachineRepresentationFor(sig_->GetReturn()),
kNoWriteBarrier);
- Node* store =
- graph()->NewNode(mcgraph()->machine()->Store(store_rep), arg_buffer,
- Int32Constant(0), call, *effect_, *control_);
- *effect_ = store;
+ SetEffect(graph()->NewNode(mcgraph()->machine()->Store(store_rep),
+ arg_buffer, Int32Constant(0), call, Effect(),
+ Control()));
}
Return(jsgraph()->SmiConstant(0));
@@ -4804,6 +4765,8 @@ MaybeHandle<Code> CompileJSToWasmWrapper(
Isolate* isolate, const wasm::NativeModule* native_module,
wasm::FunctionSig* sig, bool is_import,
wasm::UseTrapHandler use_trap_handler) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "CompileJSToWasmWrapper");
const wasm::WasmModule* module = native_module->module();
//----------------------------------------------------------------------------
@@ -4877,6 +4840,8 @@ MaybeHandle<Code> CompileWasmToJSWrapper(
Isolate* isolate, Handle<JSReceiver> target, wasm::FunctionSig* sig,
uint32_t index, wasm::ModuleOrigin origin,
wasm::UseTrapHandler use_trap_handler) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "CompileWasmToJSWrapper");
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
@@ -5082,14 +5047,14 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
TurbofanWasmCompilationUnit::TurbofanWasmCompilationUnit(
wasm::WasmCompilationUnit* wasm_unit)
- : wasm_unit_(wasm_unit),
- wasm_compilation_data_(wasm_unit->env_->runtime_exception_support) {}
+ : wasm_unit_(wasm_unit) {}
// Clears unique_ptrs, but (part of) the type is forward declared in the header.
TurbofanWasmCompilationUnit::~TurbofanWasmCompilationUnit() = default;
SourcePositionTable* TurbofanWasmCompilationUnit::BuildGraphForWasmFunction(
- double* decode_ms, MachineGraph* mcgraph, NodeOriginTable* node_origins) {
+ wasm::WasmFeatures* detected, double* decode_ms, MachineGraph* mcgraph,
+ NodeOriginTable* node_origins) {
base::ElapsedTimer decode_timer;
if (FLAG_trace_wasm_decode_time) {
decode_timer.Start();
@@ -5100,9 +5065,10 @@ SourcePositionTable* TurbofanWasmCompilationUnit::BuildGraphForWasmFunction(
new (mcgraph->zone()) SourcePositionTable(mcgraph->graph());
WasmGraphBuilder builder(wasm_unit_->env_, mcgraph->zone(), mcgraph,
wasm_unit_->func_body_.sig, source_position_table);
- graph_construction_result_ =
- wasm::BuildTFGraph(wasm_unit_->wasm_engine_->allocator(), &builder,
- wasm_unit_->func_body_, node_origins);
+ graph_construction_result_ = wasm::BuildTFGraph(
+ wasm_unit_->wasm_engine_->allocator(),
+ wasm_unit_->native_module_->enabled_features(), wasm_unit_->env_->module,
+ &builder, detected, wasm_unit_->func_body_, node_origins);
if (graph_construction_result_.failed()) {
if (FLAG_trace_wasm_compiler) {
StdoutStream{} << "Compilation failed: "
@@ -5114,7 +5080,7 @@ SourcePositionTable* TurbofanWasmCompilationUnit::BuildGraphForWasmFunction(
builder.LowerInt64();
if (builder.has_simd() &&
- (!CpuFeatures::SupportsWasmSimd128() || wasm_unit_->lower_simd_)) {
+ (!CpuFeatures::SupportsWasmSimd128() || wasm_unit_->env_->lower_simd)) {
SimdScalarLowering(
mcgraph,
CreateMachineSignature(mcgraph->zone(), wasm_unit_->func_body_.sig))
@@ -5155,7 +5121,8 @@ Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
} // namespace
-void TurbofanWasmCompilationUnit::ExecuteCompilation() {
+void TurbofanWasmCompilationUnit::ExecuteCompilation(
+ wasm::WasmFeatures* detected) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"ExecuteTurbofanCompilation");
double decode_ms = 0;
@@ -5178,13 +5145,16 @@ void TurbofanWasmCompilationUnit::ExecuteCompilation() {
GetDebugName(&compilation_zone, wasm_unit_->func_name_,
wasm_unit_->func_index_),
&compilation_zone, Code::WASM_FUNCTION);
+ if (wasm_unit_->env_->runtime_exception_support) {
+ info.SetWasmRuntimeExceptionSupport();
+ }
NodeOriginTable* node_origins = info.trace_turbo_json_enabled()
? new (&graph_zone)
NodeOriginTable(mcgraph->graph())
: nullptr;
SourcePositionTable* source_positions =
- BuildGraphForWasmFunction(&decode_ms, mcgraph, node_origins);
+ BuildGraphForWasmFunction(detected, &decode_ms, mcgraph, node_origins);
if (graph_construction_result_.failed()) {
ok_ = false;
@@ -5212,8 +5182,7 @@ void TurbofanWasmCompilationUnit::ExecuteCompilation() {
std::unique_ptr<OptimizedCompilationJob> job(
Pipeline::NewWasmCompilationJob(
&info, wasm_unit_->wasm_engine_, mcgraph, call_descriptor,
- source_positions, node_origins, &wasm_compilation_data_,
- wasm_unit_->func_body_,
+ source_positions, node_origins, wasm_unit_->func_body_,
const_cast<wasm::WasmModule*>(wasm_unit_->env_->module),
wasm_unit_->native_module_, wasm_unit_->func_index_,
wasm_unit_->env_->module->origin));
@@ -5233,6 +5202,7 @@ void TurbofanWasmCompilationUnit::ExecuteCompilation() {
}
if (ok_) wasm_code_ = info.wasm_code();
}
+ if (ok_) wasm_unit_->native_module()->PublishCode(wasm_code_);
}
wasm::WasmCode* TurbofanWasmCompilationUnit::FinishCompilation(
@@ -5255,8 +5225,6 @@ wasm::WasmCode* TurbofanWasmCompilationUnit::FinishCompilation(
return nullptr;
}
-
- wasm_unit_->native_module()->PublishCode(wasm_code_);
return wasm_code_;
}
@@ -5269,32 +5237,20 @@ class LinkageLocationAllocator {
const DoubleRegister (&fp)[kNumFpRegs])
: allocator_(wasm::LinkageAllocator(gp, fp)) {}
- LinkageLocation Next(MachineRepresentation type) {
- MachineType mach_type = MachineType::TypeForRepresentation(type);
- if (type == MachineRepresentation::kFloat32 ||
- type == MachineRepresentation::kFloat64) {
- if (allocator_.has_more_fp_regs()) {
- DoubleRegister reg = allocator_.NextFpReg();
-#if V8_TARGET_ARCH_ARM
- // Allocate floats using a double register, but modify the code to
- // reflect how ARM FP registers alias.
- // TODO(bbudge) Modify wasm linkage to allow use of all float regs.
- if (type == MachineRepresentation::kFloat32) {
- int float_reg_code = reg.code() * 2;
- DCHECK_GT(RegisterConfiguration::kMaxFPRegisters, float_reg_code);
- return LinkageLocation::ForRegister(
- DoubleRegister::from_code(float_reg_code).code(), mach_type);
- }
-#endif
- return LinkageLocation::ForRegister(reg.code(), mach_type);
+ LinkageLocation Next(MachineRepresentation rep) {
+ MachineType type = MachineType::TypeForRepresentation(rep);
+ if (IsFloatingPoint(rep)) {
+ if (allocator_.CanAllocateFP(rep)) {
+ int reg_code = allocator_.NextFpReg(rep);
+ return LinkageLocation::ForRegister(reg_code, type);
}
- } else if (allocator_.has_more_gp_regs()) {
- return LinkageLocation::ForRegister(allocator_.NextGpReg().code(),
- mach_type);
+ } else if (allocator_.CanAllocateGP()) {
+ int reg_code = allocator_.NextGpReg();
+ return LinkageLocation::ForRegister(reg_code, type);
}
// Cannot use register; use stack slot.
- int index = -1 - allocator_.NextStackSlot(type);
- return LinkageLocation::ForCallerFrameSlot(index, mach_type);
+ int index = -1 - allocator_.NextStackSlot(rep);
+ return LinkageLocation::ForCallerFrameSlot(index, type);
}
void SetStackOffset(int offset) { allocator_.SetStackOffset(offset); }
@@ -5331,7 +5287,11 @@ CallDescriptor* GetWasmCallDescriptor(
// Add return location(s).
LinkageLocationAllocator rets(wasm::kGpReturnRegisters,
wasm::kFpReturnRegisters);
- rets.SetStackOffset(params.NumStackSlots());
+
+ int parameter_slots = params.NumStackSlots();
+ if (kPadArguments) parameter_slots = RoundUp(parameter_slots, 2);
+
+ rets.SetStackOffset(parameter_slots);
const int return_count = static_cast<int>(locations.return_count_);
for (int i = 0; i < return_count; i++) {
@@ -5352,19 +5312,19 @@ CallDescriptor* GetWasmCallDescriptor(
CallDescriptor::Flags flags =
use_retpoline ? CallDescriptor::kRetpoline : CallDescriptor::kNoFlags;
- return new (zone) CallDescriptor( // --
- kind, // kind
- target_type, // target MachineType
- target_loc, // target location
- locations.Build(), // location_sig
- params.NumStackSlots(), // stack_parameter_count
- compiler::Operator::kNoProperties, // properties
- kCalleeSaveRegisters, // callee-saved registers
- kCalleeSaveFPRegisters, // callee-saved fp regs
- flags, // flags
- "wasm-call", // debug name
- 0, // allocatable registers
- rets.NumStackSlots() - params.NumStackSlots()); // stack_return_count
+ return new (zone) CallDescriptor( // --
+ kind, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ locations.Build(), // location_sig
+ parameter_slots, // stack_parameter_count
+ compiler::Operator::kNoProperties, // properties
+ kCalleeSaveRegisters, // callee-saved registers
+ kCalleeSaveFPRegisters, // callee-saved fp regs
+ flags, // flags
+ "wasm-call", // debug name
+ 0, // allocatable registers
+ rets.NumStackSlots() - parameter_slots); // stack_return_count
}
namespace {
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 0f6ee0304e..775c817242 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -10,7 +10,6 @@
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
#include "src/runtime/runtime.h"
-#include "src/trap-handler/trap-handler.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/wasm-module.h"
@@ -41,57 +40,27 @@ struct DecodeStruct;
typedef compiler::Node TFNode;
typedef compiler::MachineGraph TFGraph;
class WasmCode;
+struct WasmFeatures;
} // namespace wasm
namespace compiler {
-// Information about Wasm compilation that needs to be plumbed through the
-// different layers of the compiler.
-class WasmCompilationData {
- public:
- explicit WasmCompilationData(
- wasm::RuntimeExceptionSupport runtime_exception_support)
- : runtime_exception_support_(runtime_exception_support) {}
-
- void AddProtectedInstruction(uint32_t instr_offset, uint32_t landing_offset) {
- protected_instructions_.push_back({instr_offset, landing_offset});
- }
-
- OwnedVector<trap_handler::ProtectedInstructionData>
- GetProtectedInstructions() {
- return OwnedVector<trap_handler::ProtectedInstructionData>::Of(
- protected_instructions_);
- }
-
- wasm::RuntimeExceptionSupport runtime_exception_support() const {
- return runtime_exception_support_;
- }
-
- private:
- std::vector<trap_handler::ProtectedInstructionData> protected_instructions_;
-
- // See ModuleEnv::runtime_exception_support_.
- wasm::RuntimeExceptionSupport runtime_exception_support_;
-
- DISALLOW_COPY_AND_ASSIGN(WasmCompilationData);
-};
-
class TurbofanWasmCompilationUnit {
public:
explicit TurbofanWasmCompilationUnit(wasm::WasmCompilationUnit* wasm_unit);
~TurbofanWasmCompilationUnit();
- SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms,
+ SourcePositionTable* BuildGraphForWasmFunction(wasm::WasmFeatures* detected,
+ double* decode_ms,
MachineGraph* mcgraph,
NodeOriginTable* node_origins);
- void ExecuteCompilation();
+ void ExecuteCompilation(wasm::WasmFeatures* detected);
wasm::WasmCode* FinishCompilation(wasm::ErrorThrower*);
private:
wasm::WasmCompilationUnit* const wasm_unit_;
- WasmCompilationData wasm_compilation_data_;
bool ok_ = true;
wasm::WasmCode* wasm_code_ = nullptr;
wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
@@ -270,8 +239,22 @@ class WasmGraphBuilder {
this->instance_node_ = instance_node;
}
- Node* Control() { return *control_; }
- Node* Effect() { return *effect_; }
+ Node* Control() {
+ DCHECK_NOT_NULL(*control_);
+ return *control_;
+ }
+ Node* Effect() {
+ DCHECK_NOT_NULL(*effect_);
+ return *effect_;
+ }
+ Node* SetControl(Node* node) {
+ *control_ = node;
+ return node;
+ }
+ Node* SetEffect(Node* node) {
+ *effect_ = node;
+ return node;
+ }
void set_control_ptr(Node** control) { this->control_ = control; }
@@ -369,6 +352,8 @@ class WasmGraphBuilder {
// BoundsCheckMem receives a uint32 {index} node and returns a ptrsize index.
Node* BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset,
wasm::WasmCodePosition, EnforceBoundsCheck);
+ Node* CheckBoundsAndAlignment(uint8_t access_size, Node* index,
+ uint32_t offset, wasm::WasmCodePosition);
Node* Uint32ToUintptr(Node*);
const Operator* GetSafeLoadOperator(int offset, wasm::ValueType type);
const Operator* GetSafeStoreOperator(int offset, wasm::ValueType type);
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 548e3eb416..2ccb56907d 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -12,6 +12,8 @@
#include "src/compiler/osr.h"
#include "src/heap/heap-inl.h"
#include "src/optimized-compilation-info.h"
+#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-objects.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
@@ -1211,6 +1213,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Popcntl(i.OutputRegister(), i.InputOperand(0));
}
break;
+ case kX64Bswap:
+ __ bswapq(i.OutputRegister());
+ break;
+ case kX64Bswap32:
+ __ bswapl(i.OutputRegister());
+ break;
case kSSEFloat32Cmp:
ASSEMBLE_SSE_BINOP(Ucomiss);
break;
@@ -3226,7 +3234,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
}
-void CodeGenerator::FinishCode() {}
+void CodeGenerator::FinishCode() { tasm()->PatchConstPool(); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 88474b2494..6a9e313f4e 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -56,6 +56,8 @@ namespace compiler {
V(X64Tzcnt32) \
V(X64Popcnt) \
V(X64Popcnt32) \
+ V(X64Bswap) \
+ V(X64Bswap32) \
V(LFence) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index c3c0d3a2a5..b1f380badf 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -54,6 +54,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Tzcnt32:
case kX64Popcnt:
case kX64Popcnt32:
+ case kX64Bswap:
+ case kX64Bswap32:
case kSSEFloat32Cmp:
case kSSEFloat32Add:
case kSSEFloat32Sub:
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index b3dfb91991..b5d7fa6d55 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -197,6 +197,17 @@ class X64OperandGenerator final : public OperandGenerator {
}
}
+ InstructionOperand GetEffectiveIndexOperand(Node* index,
+ AddressingMode* mode) {
+ if (CanBeImmediate(index)) {
+ *mode = kMode_MRI;
+ return UseImmediate(index);
+ } else {
+ *mode = kMode_MR1;
+ return UseUniqueRegister(index);
+ }
+ }
+
bool CanBeBetterLeftOperand(Node* node) const {
return !selector()->IsLive(node);
}
@@ -329,17 +340,10 @@ void InstructionSelector::VisitStore(Node* node) {
if (write_barrier_kind != kNoWriteBarrier) {
DCHECK(CanBeTaggedPointer(store_rep.representation()));
AddressingMode addressing_mode;
- InstructionOperand inputs[3];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- if (g.CanBeImmediate(index)) {
- inputs[input_count++] = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- inputs[input_count++] = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode),
+ g.UseUniqueRegister(value)};
RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
switch (write_barrier_kind) {
case kNoWriteBarrier:
@@ -356,11 +360,10 @@ void InstructionSelector::VisitStore(Node* node) {
break;
}
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- size_t const temp_count = arraysize(temps);
InstructionCode code = kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
} else {
ArchOpcode opcode = GetStoreOpcode(store_rep);
InstructionOperand inputs[4];
@@ -791,9 +794,15 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Bswap, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
+}
-void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Bswap32, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
+}
void InstructionSelector::VisitInt32Add(Node* node) {
X64OperandGenerator g(this);
@@ -1827,16 +1836,9 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
AddressingMode addressing_mode;
- InstructionOperand index_operand;
- if (g.CanBeImmediate(index)) {
- index_operand = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- index_operand = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- InstructionOperand inputs[] = {g.UseUniqueRegister(value),
- g.UseUniqueRegister(base), index_operand};
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(value), g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
InstructionOperand temps[] = {g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
@@ -1853,17 +1855,10 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
Node* old_value = node->InputAt(2);
Node* new_value = node->InputAt(3);
AddressingMode addressing_mode;
- InstructionOperand index_operand;
- if (g.CanBeImmediate(index)) {
- index_operand = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- index_operand = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- InstructionOperand inputs[] = {g.UseFixed(old_value, rax),
- g.UseUniqueRegister(new_value),
- g.UseUniqueRegister(base), index_operand};
+ InstructionOperand inputs[] = {
+ g.UseFixed(old_value, rax), g.UseUniqueRegister(new_value),
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
@@ -1877,16 +1872,9 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
AddressingMode addressing_mode;
- InstructionOperand index_operand;
- if (g.CanBeImmediate(index)) {
- index_operand = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- index_operand = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- InstructionOperand inputs[] = {g.UseUniqueRegister(value),
- g.UseUniqueRegister(base), index_operand};
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(value), g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
@@ -2320,7 +2308,7 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicExchangeInt8;
@@ -2340,7 +2328,7 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint8()) {
opcode = kX64Word64AtomicExchangeUint8;
@@ -2358,7 +2346,7 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = kWord32AtomicCompareExchangeInt8;
@@ -2378,7 +2366,7 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint8()) {
opcode = kX64Word64AtomicCompareExchangeUint8;
@@ -2398,7 +2386,7 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Int8()) {
opcode = int8_op;
@@ -2434,7 +2422,7 @@ VISIT_ATOMIC_BINOP(Xor)
void InstructionSelector::VisitWord64AtomicBinaryOperation(
Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
ArchOpcode word64_op) {
- MachineType type = AtomicOpRepresentationOf(node->op());
+ MachineType type = AtomicOpType(node->op());
ArchOpcode opcode = kArchNop;
if (type == MachineType::Uint8()) {
opcode = uint8_op;
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index 62faacbca7..3470ff99d7 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -11,6 +11,7 @@
#include "src/objects/dictionary.h"
#include "src/objects/map-inl.h"
#include "src/objects/regexp-match-info.h"
+#include "src/objects/scope-info.h"
#include "src/objects/shared-function-info-inl.h"
#include "src/objects/template-objects.h"
@@ -46,6 +47,11 @@ Context* Context::cast(Object* context) {
return reinterpret_cast<Context*>(context);
}
+NativeContext* NativeContext::cast(Object* context) {
+ DCHECK(context->IsNativeContext());
+ return reinterpret_cast<NativeContext*>(context);
+}
+
void Context::set_scope_info(ScopeInfo* scope_info) {
set(SCOPE_INFO_INDEX, scope_info);
}
@@ -67,21 +73,16 @@ void Context::set_extension(HeapObject* object) {
set(EXTENSION_INDEX, object);
}
-Context* Context::native_context() const {
+NativeContext* Context::native_context() const {
Object* result = get(NATIVE_CONTEXT_INDEX);
DCHECK(IsBootstrappingOrNativeContext(this->GetIsolate(), result));
- return reinterpret_cast<Context*>(result);
+ return reinterpret_cast<NativeContext*>(result);
}
-
-void Context::set_native_context(Context* context) {
+void Context::set_native_context(NativeContext* context) {
set(NATIVE_CONTEXT_INDEX, context);
}
-bool Context::IsNativeContext() const {
- return map()->instance_type() == NATIVE_CONTEXT_TYPE;
-}
-
bool Context::IsFunctionContext() const {
return map()->instance_type() == FUNCTION_CONTEXT_TYPE;
}
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index b52d751f3f..2117179219 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -130,13 +130,11 @@ Context* Context::script_context() {
return current;
}
-
-JSObject* Context::global_proxy() {
+JSGlobalProxy* Context::global_proxy() {
return native_context()->global_proxy_object();
}
-
-void Context::set_global_proxy(JSObject* object) {
+void Context::set_global_proxy(JSGlobalProxy* object) {
native_context()->set_global_proxy_object(object);
}
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 3a4f8329c7..709ae6164a 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -10,6 +10,7 @@
namespace v8 {
namespace internal {
+class NativeContext;
class RegExpMatchInfo;
enum ContextLookupFlags {
@@ -70,47 +71,51 @@ enum ContextLookupFlags {
V(ASYNC_GENERATOR_AWAIT_CAUGHT, JSFunction, async_generator_await_caught) \
V(ASYNC_GENERATOR_AWAIT_UNCAUGHT, JSFunction, async_generator_await_uncaught)
-#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
- V(ARRAY_SHIFT_INDEX, JSFunction, array_shift) \
- V(ARRAY_SPLICE_INDEX, JSFunction, array_splice) \
- V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift) \
- V(ARRAY_ENTRIES_ITERATOR_INDEX, JSFunction, array_entries_iterator) \
- V(ARRAY_FOR_EACH_ITERATOR_INDEX, JSFunction, array_for_each_iterator) \
- V(ARRAY_KEYS_ITERATOR_INDEX, JSFunction, array_keys_iterator) \
- V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
- V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
- V(ERROR_TO_STRING, JSFunction, error_to_string) \
- V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
- V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
- V(GLOBAL_PROXY_FUNCTION_INDEX, JSFunction, global_proxy_function) \
- V(MAP_DELETE_INDEX, JSFunction, map_delete) \
- V(MAP_GET_INDEX, JSFunction, map_get) \
- V(MAP_HAS_INDEX, JSFunction, map_has) \
- V(MAP_SET_INDEX, JSFunction, map_set) \
- V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \
- V(OBJECT_VALUE_OF, JSFunction, object_value_of) \
- V(OBJECT_TO_STRING, JSFunction, object_to_string) \
- V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
- V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
- V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
- V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
- V(RESOLVE_LOCALE_FUNCTION_INDEX, JSFunction, resolve_locale) \
- V(SET_ADD_INDEX, JSFunction, set_add) \
- V(SET_DELETE_INDEX, JSFunction, set_delete) \
- V(SET_HAS_INDEX, JSFunction, set_has) \
- V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
- V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
- V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
- V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction, \
- wasm_compile_error_function) \
- V(WASM_LINK_ERROR_FUNCTION_INDEX, JSFunction, wasm_link_error_function) \
- V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, \
- wasm_runtime_error_function) \
- V(WEAKMAP_SET_INDEX, JSFunction, weakmap_set) \
+#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
+ V(ARRAY_SHIFT_INDEX, JSFunction, array_shift) \
+ V(ARRAY_SPLICE_INDEX, JSFunction, array_splice) \
+ V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift) \
+ V(ARRAY_ENTRIES_ITERATOR_INDEX, JSFunction, array_entries_iterator) \
+ V(ARRAY_FOR_EACH_ITERATOR_INDEX, JSFunction, array_for_each_iterator) \
+ V(ARRAY_KEYS_ITERATOR_INDEX, JSFunction, array_keys_iterator) \
+ V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
+ V(CANONICALIZE_LOCALE_LIST_FUNCTION_INDEX, JSFunction, \
+ canonicalize_locale_list) \
+ V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
+ V(ERROR_TO_STRING, JSFunction, error_to_string) \
+ V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
+ V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
+ V(GLOBAL_PROXY_FUNCTION_INDEX, JSFunction, global_proxy_function) \
+ V(MAP_DELETE_INDEX, JSFunction, map_delete) \
+ V(MAP_GET_INDEX, JSFunction, map_get) \
+ V(MAP_HAS_INDEX, JSFunction, map_has) \
+ V(MAP_SET_INDEX, JSFunction, map_set) \
+ V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \
+ V(OBJECT_VALUE_OF, JSFunction, object_value_of) \
+ V(OBJECT_TO_STRING, JSFunction, object_to_string) \
+ V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
+ V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
+ V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
+ V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
+ V(CACHED_OR_NEW_SERVICE_LOCALE_FUNCTION_INDEX, JSFunction, \
+ cached_or_new_service) \
+ V(RESOLVE_LOCALE_FUNCTION_INDEX, JSFunction, resolve_locale) \
+ V(SET_ADD_INDEX, JSFunction, set_add) \
+ V(SET_DELETE_INDEX, JSFunction, set_delete) \
+ V(SET_HAS_INDEX, JSFunction, set_has) \
+ V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
+ V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
+ V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
+ V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction, \
+ wasm_compile_error_function) \
+ V(WASM_LINK_ERROR_FUNCTION_INDEX, JSFunction, wasm_link_error_function) \
+ V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, \
+ wasm_runtime_error_function) \
+ V(WEAKMAP_SET_INDEX, JSFunction, weakmap_set) \
V(WEAKSET_ADD_INDEX, JSFunction, weakset_add)
#define NATIVE_CONTEXT_FIELDS(V) \
- V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object) \
+ V(GLOBAL_PROXY_INDEX, JSGlobalProxy, global_proxy_object) \
V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
/* Below is alpha-sorted */ \
V(ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, \
@@ -199,26 +204,31 @@ enum ContextLookupFlags {
V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \
V(INTL_DATE_TIME_FORMAT_FUNCTION_INDEX, JSFunction, \
intl_date_time_format_function) \
+ V(INTL_DATE_FORMAT_INTERNAL_FORMAT_SHARED_FUN, SharedFunctionInfo, \
+ date_format_internal_format_shared_fun) \
V(INTL_NUMBER_FORMAT_FUNCTION_INDEX, JSFunction, \
intl_number_format_function) \
V(INTL_NUMBER_FORMAT_INTERNAL_FORMAT_NUMBER_SHARED_FUN, SharedFunctionInfo, \
number_format_internal_format_number_shared_fun) \
V(INTL_LOCALE_FUNCTION_INDEX, JSFunction, intl_locale_function) \
V(INTL_COLLATOR_FUNCTION_INDEX, JSFunction, intl_collator_function) \
+ V(INTL_COLLATOR_INTERNAL_COMPARE_SHARED_FUN, SharedFunctionInfo, \
+ collator_internal_compare_shared_fun) \
V(INTL_PLURAL_RULES_FUNCTION_INDEX, JSFunction, intl_plural_rules_function) \
V(INTL_V8_BREAK_ITERATOR_FUNCTION_INDEX, JSFunction, \
intl_v8_break_iterator_function) \
+ V(INTL_V8_BREAK_ITERATOR_INTERNAL_ADOPT_TEXT_SHARED_FUN, SharedFunctionInfo, \
+ break_iterator_internal_adopt_text_shared_fun) \
V(JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX, Map, \
- js_array_fast_smi_elements_map_index) \
+ js_array_packed_smi_elements_map) \
V(JS_ARRAY_HOLEY_SMI_ELEMENTS_MAP_INDEX, Map, \
- js_array_fast_holey_smi_elements_map_index) \
- V(JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX, Map, js_array_fast_elements_map_index) \
- V(JS_ARRAY_HOLEY_ELEMENTS_MAP_INDEX, Map, \
- js_array_fast_holey_elements_map_index) \
+ js_array_holey_smi_elements_map) \
+ V(JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX, Map, js_array_packed_elements_map) \
+ V(JS_ARRAY_HOLEY_ELEMENTS_MAP_INDEX, Map, js_array_holey_elements_map) \
V(JS_ARRAY_PACKED_DOUBLE_ELEMENTS_MAP_INDEX, Map, \
- js_array_fast_double_elements_map_index) \
+ js_array_packed_double_elements_map) \
V(JS_ARRAY_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX, Map, \
- js_array_fast_holey_double_elements_map_index) \
+ js_array_holey_double_elements_map) \
V(JS_MAP_FUN_INDEX, JSFunction, js_map_fun) \
V(JS_MAP_MAP_INDEX, Map, js_map_map) \
V(JS_MODULE_NAMESPACE_MAP, Map, js_module_namespace_map) \
@@ -269,11 +279,10 @@ enum ContextLookupFlags {
regexp_internal_match_info) \
V(REGEXP_PROTOTYPE_MAP_INDEX, Map, regexp_prototype_map) \
V(INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX, Map, \
- initial_regexp_string_iterator_prototype_map_index) \
+ initial_regexp_string_iterator_prototype_map) \
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
- V(SELF_WEAK_CELL_INDEX, WeakCell, self_weak_cell) \
V(SERIALIZED_OBJECTS, FixedArray, serialized_objects) \
V(SET_VALUE_ITERATOR_MAP_INDEX, Map, set_value_iterator_map) \
V(SET_KEY_VALUE_ITERATOR_MAP_INDEX, Map, set_key_value_iterator_map) \
@@ -521,8 +530,8 @@ class Context : public FixedArray, public NeverReadOnlySpaceObject {
Context* closure_context();
// Returns a JSGlobalProxy object or null.
- JSObject* global_proxy();
- void set_global_proxy(JSObject* global);
+ JSGlobalProxy* global_proxy();
+ void set_global_proxy(JSGlobalProxy* global);
// Get the JSGlobalObject object.
V8_EXPORT_PRIVATE JSGlobalObject* global_object();
@@ -531,13 +540,11 @@ class Context : public FixedArray, public NeverReadOnlySpaceObject {
Context* script_context();
// Compute the native context.
- inline Context* native_context() const;
- inline void set_native_context(Context* context);
+ inline NativeContext* native_context() const;
+ inline void set_native_context(NativeContext* context);
- // Predicates for context types. IsNativeContext is also defined on Object
- // because we frequently have to know if arbitrary objects are natives
- // contexts.
- inline bool IsNativeContext() const;
+ // Predicates for context types. IsNativeContext is already defined on
+ // Object.
inline bool IsFunctionContext() const;
inline bool IsCatchContext() const;
inline bool IsWithContext() const;
@@ -636,6 +643,15 @@ class Context : public FixedArray, public NeverReadOnlySpaceObject {
STATIC_ASSERT(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex);
};
+class NativeContext : public Context {
+ public:
+ static inline NativeContext* cast(Object* context);
+ // TODO(neis): Move some stuff from Context here.
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(NativeContext);
+};
+
typedef Context::Field ContextField;
} // namespace internal
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 0f7ae95769..fed7edb44a 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -1171,8 +1171,8 @@ class RuntimeCallTimerScope {
20) \
HR(wasm_lazy_compilation_throughput, V8.WasmLazyCompilationThroughput, 1, \
10000, 50) \
- HR(compile_script_cache_behaviour, V8.CompileScript.CacheBehaviour, 0, 19, \
- 20) \
+ HR(compile_script_cache_behaviour, V8.CompileScript.CacheBehaviour, 0, 20, \
+ 21) \
HR(wasm_memory_allocation_result, V8.WasmMemoryAllocationResult, 0, 3, 4) \
HR(wasm_address_space_usage_mb, V8.WasmAddressSpaceUsageMiB, 0, 1 << 20, \
128) \
@@ -1377,8 +1377,6 @@ class RuntimeCallTimerScope {
SC(sub_string_native, V8.SubStringNative) \
SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
SC(regexp_entry_native, V8.RegExpEntryNative) \
- SC(number_to_string_native, V8.NumberToStringNative) \
- SC(number_to_string_runtime, V8.NumberToStringRuntime) \
SC(math_exp_runtime, V8.MathExpRuntime) \
SC(math_log_runtime, V8.MathLogRuntime) \
SC(math_pow_runtime, V8.MathPowRuntime) \
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index 1172bf8536..a1fc3b5782 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -342,7 +342,7 @@ static Local<Value> GetStdout(Isolate* isolate, int child_fd,
Local<String> addition =
String::NewFromUtf8(isolate, buffer, NewStringType::kNormal, length)
.ToLocalChecked();
- accumulator = String::Concat(accumulator, addition);
+ accumulator = String::Concat(isolate, accumulator, addition);
fullness = bytes_read + fullness - length;
memcpy(buffer, buffer + length, fullness);
}
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 8feefa4634..349021d38c 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -24,7 +24,7 @@
#include "include/libplatform/libplatform.h"
#include "include/libplatform/v8-tracing.h"
#include "include/v8-inspector.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/base/cpu.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
@@ -411,6 +411,7 @@ base::LazyMutex Shell::workers_mutex_;
bool Shell::allow_new_workers_ = true;
std::vector<Worker*> Shell::workers_;
std::vector<ExternalizedContents> Shell::externalized_contents_;
+std::atomic<bool> Shell::script_executed_{false};
base::LazyMutex Shell::isolate_status_lock_;
std::map<v8::Isolate*, bool> Shell::isolate_status_;
base::LazyMutex Shell::cached_code_mutex_;
@@ -428,7 +429,7 @@ class DummySourceStream : public v8::ScriptCompiler::ExternalSourceStream {
DummySourceStream(Local<String> source, Isolate* isolate) : done_(false) {
source_length_ = source->Utf8Length(isolate);
source_buffer_.reset(new uint8_t[source_length_]);
- source->WriteUtf8(reinterpret_cast<char*>(source_buffer_.get()),
+ source->WriteUtf8(isolate, reinterpret_cast<char*>(source_buffer_.get()),
source_length_);
}
@@ -1339,20 +1340,22 @@ Local<String> Shell::ReadFromStdin(Isolate* isolate) {
return accumulator;
} else if (buffer[length-1] != '\n') {
accumulator = String::Concat(
- accumulator,
+ isolate, accumulator,
String::NewFromUtf8(isolate, buffer, NewStringType::kNormal, length)
.ToLocalChecked());
} else if (length > 1 && buffer[length-2] == '\\') {
buffer[length-2] = '\n';
- accumulator = String::Concat(
- accumulator,
- String::NewFromUtf8(isolate, buffer, NewStringType::kNormal,
- length - 1).ToLocalChecked());
+ accumulator =
+ String::Concat(isolate, accumulator,
+ String::NewFromUtf8(isolate, buffer,
+ NewStringType::kNormal, length - 1)
+ .ToLocalChecked());
} else {
return String::Concat(
- accumulator,
+ isolate, accumulator,
String::NewFromUtf8(isolate, buffer, NewStringType::kNormal,
- length - 1).ToLocalChecked());
+ length - 1)
+ .ToLocalChecked());
}
}
}
@@ -2085,8 +2088,8 @@ void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) {
void Shell::OnExit(v8::Isolate* isolate) {
// Dump basic block profiling data.
- if (i::BasicBlockProfiler* profiler =
- reinterpret_cast<i::Isolate*>(isolate)->basic_block_profiler()) {
+ if (i::FLAG_turbo_profiling) {
+ i::BasicBlockProfiler* profiler = i::BasicBlockProfiler::Get();
i::StdoutStream{} << *profiler;
}
isolate->Dispose();
@@ -2403,7 +2406,7 @@ class InspectorClient : public v8_inspector::V8InspectorClient {
InspectorClient::GetSession(context);
int length = message->Length();
std::unique_ptr<uint16_t[]> buffer(new uint16_t[length]);
- message->Write(buffer.get(), 0, length);
+ message->Write(isolate, buffer.get(), 0, length);
v8_inspector::StringView message_view(buffer.get(), length);
session->dispatchProtocolMessage(message_view);
args.GetReturnValue().Set(True(isolate));
@@ -2445,7 +2448,7 @@ void SourceGroup::Execute(Isolate* isolate) {
Local<String> source =
String::NewFromUtf8(isolate, argv_[i + 1], NewStringType::kNormal)
.ToLocalChecked();
- Shell::options.script_executed = true;
+ Shell::set_script_executed();
if (!Shell::ExecuteString(isolate, source, file_name,
Shell::kNoPrintResult, Shell::kReportExceptions,
Shell::kNoProcessMessageQueue)) {
@@ -2455,7 +2458,7 @@ void SourceGroup::Execute(Isolate* isolate) {
++i;
continue;
} else if (ends_with(arg, ".mjs")) {
- Shell::options.script_executed = true;
+ Shell::set_script_executed();
if (!Shell::ExecuteModule(isolate, arg)) {
exception_was_thrown = true;
break;
@@ -2464,7 +2467,7 @@ void SourceGroup::Execute(Isolate* isolate) {
} else if (strcmp(arg, "--module") == 0 && i + 1 < end_offset_) {
// Treat the next file as a module.
arg = argv_[++i];
- Shell::options.script_executed = true;
+ Shell::set_script_executed();
if (!Shell::ExecuteModule(isolate, arg)) {
exception_was_thrown = true;
break;
@@ -2485,7 +2488,7 @@ void SourceGroup::Execute(Isolate* isolate) {
printf("Error reading '%s'\n", arg);
base::OS::ExitProcess(1);
}
- Shell::options.script_executed = true;
+ Shell::set_script_executed();
if (!Shell::ExecuteString(isolate, source, file_name, Shell::kNoPrintResult,
Shell::kReportExceptions,
Shell::kProcessMessageQueue)) {
@@ -2564,12 +2567,8 @@ void SourceGroup::JoinThread() {
}
ExternalizedContents::~ExternalizedContents() {
- if (base_ != nullptr) {
- if (mode_ == ArrayBuffer::Allocator::AllocationMode::kReservation) {
- CHECK(i::FreePages(base_, length_));
- } else {
- Shell::array_buffer_allocator->Free(base_, length_);
- }
+ if (data_ != nullptr) {
+ deleter_(data_, length_, deleter_data_);
}
}
@@ -2790,9 +2789,6 @@ bool Shell::SetOptions(int argc, char* argv[]) {
strcmp(argv[i], "--no-stress-background-compile") == 0) {
options.stress_background_compile = false;
argv[i] = nullptr;
- } else if (strcmp(argv[i], "--mock-arraybuffer-allocator") == 0) {
- options.mock_arraybuffer_allocator = true;
- argv[i] = nullptr;
} else if (strcmp(argv[i], "--noalways-opt") == 0 ||
strcmp(argv[i], "--no-always-opt") == 0) {
// No support for stressing if we can't use --always-opt.
@@ -2907,6 +2903,7 @@ bool Shell::SetOptions(int argc, char* argv[]) {
}
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+ options.mock_arraybuffer_allocator = i::FLAG_mock_arraybuffer_allocator;
// Set up isolated source groups.
options.isolate_sources = new SourceGroup[options.num_isolates];
@@ -2923,10 +2920,10 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
- options.script_executed = true;
+ set_script_executed();
} else if (strncmp(str, "-", 1) != 0) {
// Not a flag, so it must be a script to execute.
- options.script_executed = true;
+ set_script_executed();
}
}
current->End(argc);
@@ -2949,7 +2946,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
}
HandleScope scope(isolate);
Local<Context> context = CreateEvaluationContext(isolate);
- bool use_existing_context = last_run && options.use_interactive_shell();
+ bool use_existing_context = last_run && use_interactive_shell();
if (use_existing_context) {
// Keep using the same context in the interactive shell.
evaluation_context_.Reset(isolate, context);
@@ -3039,9 +3036,9 @@ void Shell::CompleteMessageLoop(Isolate* isolate) {
DCHECK_GT(isolate_status_.count(isolate), 0);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::wasm::WasmEngine* wasm_engine = i_isolate->wasm_engine();
- bool should_wait =
- (options.wait_for_wasm && wasm_engine->HasRunningCompileJob()) ||
- isolate_status_[isolate];
+ bool should_wait = (options.wait_for_wasm &&
+ wasm_engine->HasRunningCompileJob(i_isolate)) ||
+ isolate_status_[isolate];
return should_wait ? platform::MessageLoopBehavior::kWaitForWork
: platform::MessageLoopBehavior::kDoNotWait;
};
@@ -3275,15 +3272,14 @@ std::unique_ptr<SerializationData> Shell::SerializeValue(
bool ok;
Local<Context> context = isolate->GetCurrentContext();
Serializer serializer(isolate);
+ std::unique_ptr<SerializationData> data;
if (serializer.WriteValue(context, value, transfer).To(&ok)) {
- std::unique_ptr<SerializationData> data = serializer.Release();
- base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
- serializer.AppendExternalizedContentsTo(&externalized_contents_);
- return data;
+ data = serializer.Release();
}
// Append externalized contents even when WriteValue fails.
+ base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
serializer.AppendExternalizedContentsTo(&externalized_contents_);
- return nullptr;
+ return data;
}
MaybeLocal<Value> Shell::DeserializeValue(
@@ -3482,7 +3478,7 @@ int Shell::Main(int argc, char* argv[]) {
// Run interactive shell if explicitly requested or if no script has been
// executed, but never on --test
- if (options.use_interactive_shell()) {
+ if (use_interactive_shell()) {
RunShell(isolate);
}
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index ef0ea7d898..2d60cb8327 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -132,36 +132,45 @@ class SourceGroup {
class ExternalizedContents {
public:
explicit ExternalizedContents(const ArrayBuffer::Contents& contents)
- : base_(contents.AllocationBase()),
- length_(contents.AllocationLength()),
- mode_(contents.AllocationMode()) {}
+ : data_(contents.Data()),
+ length_(contents.ByteLength()),
+ deleter_(contents.Deleter()),
+ deleter_data_(contents.DeleterData()) {}
explicit ExternalizedContents(const SharedArrayBuffer::Contents& contents)
- : base_(contents.AllocationBase()),
- length_(contents.AllocationLength()),
- mode_(contents.AllocationMode()) {}
- ExternalizedContents(ExternalizedContents&& other)
- : base_(other.base_), length_(other.length_), mode_(other.mode_) {
- other.base_ = nullptr;
+ : data_(contents.Data()),
+ length_(contents.ByteLength()),
+ deleter_(contents.Deleter()),
+ deleter_data_(contents.DeleterData()) {}
+ ExternalizedContents(ExternalizedContents&& other) V8_NOEXCEPT
+ : data_(other.data_),
+ length_(other.length_),
+ deleter_(other.deleter_),
+ deleter_data_(other.deleter_data_) {
+ other.data_ = nullptr;
other.length_ = 0;
- other.mode_ = ArrayBuffer::Allocator::AllocationMode::kNormal;
+ other.deleter_ = nullptr;
+ other.deleter_data_ = nullptr;
}
- ExternalizedContents& operator=(ExternalizedContents&& other) {
+ ExternalizedContents& operator=(ExternalizedContents&& other) V8_NOEXCEPT {
if (this != &other) {
- base_ = other.base_;
+ data_ = other.data_;
length_ = other.length_;
- mode_ = other.mode_;
- other.base_ = nullptr;
+ deleter_ = other.deleter_;
+ deleter_data_ = other.deleter_data_;
+ other.data_ = nullptr;
other.length_ = 0;
- other.mode_ = ArrayBuffer::Allocator::AllocationMode::kNormal;
+ other.deleter_ = nullptr;
+ other.deleter_data_ = nullptr;
}
return *this;
}
~ExternalizedContents();
private:
- void* base_;
+ void* data_;
size_t length_;
- ArrayBuffer::Allocator::AllocationMode mode_;
+ ArrayBuffer::Contents::DeleterCallback deleter_;
+ void* deleter_data_;
DISALLOW_COPY_AND_ASSIGN(ExternalizedContents);
};
@@ -318,8 +327,7 @@ class ShellOptions {
};
ShellOptions()
- : script_executed(false),
- send_idle_notification(false),
+ : send_idle_notification(false),
invoke_weak_callbacks(false),
omit_quit(false),
wait_for_wasm(true),
@@ -350,11 +358,6 @@ class ShellOptions {
delete[] isolate_sources;
}
- bool use_interactive_shell() {
- return (interactive_shell || !script_executed) && !test_shell;
- }
-
- bool script_executed;
bool send_idle_notification;
bool invoke_weak_callbacks;
bool omit_quit;
@@ -528,6 +531,12 @@ class Shell : public i::AllStatic {
static char* ReadCharsFromTcpPort(const char* name, int* size_out);
+ static void set_script_executed() { script_executed_.store(true); }
+ static bool use_interactive_shell() {
+ return (options.interactive_shell || !script_executed_.load()) &&
+ !options.test_shell;
+ }
+
private:
static Global<Context> evaluation_context_;
static base::OnceType quit_once_;
@@ -541,11 +550,14 @@ class Shell : public i::AllStatic {
static base::LazyMutex context_mutex_;
static const base::TimeTicks kInitialTicks;
- static base::LazyMutex workers_mutex_;
+ static base::LazyMutex workers_mutex_; // Guards the following members.
static bool allow_new_workers_;
static std::vector<Worker*> workers_;
static std::vector<ExternalizedContents> externalized_contents_;
+ // Multiple isolates may update this flag concurrently.
+ static std::atomic<bool> script_executed_;
+
static void WriteIgnitionDispatchCountersFile(v8::Isolate* isolate);
// Append LCOV coverage data to file.
static void WriteLcovData(v8::Isolate* isolate, const char* file);
diff --git a/deps/v8/src/debug/arm/OWNERS b/deps/v8/src/debug/arm/OWNERS
deleted file mode 100644
index 906a5ce641..0000000000
--- a/deps/v8/src/debug/arm/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-rmcilroy@chromium.org
diff --git a/deps/v8/src/debug/arm64/OWNERS b/deps/v8/src/debug/arm64/OWNERS
deleted file mode 100644
index 906a5ce641..0000000000
--- a/deps/v8/src/debug/arm64/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-rmcilroy@chromium.org
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index 9e7195b1f3..a71c9e572b 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -72,8 +72,7 @@ void SortBlockData(std::vector<CoverageBlock>& v) {
std::sort(v.begin(), v.end(), CompareCoverageBlock);
}
-std::vector<CoverageBlock> GetSortedBlockData(Isolate* isolate,
- SharedFunctionInfo* shared) {
+std::vector<CoverageBlock> GetSortedBlockData(SharedFunctionInfo* shared) {
DCHECK(shared->HasCoverageInfo());
CoverageInfo* coverage_info =
@@ -385,13 +384,12 @@ bool IsBinaryMode(debug::Coverage::Mode mode) {
}
}
-void CollectBlockCoverage(Isolate* isolate, CoverageFunction* function,
- SharedFunctionInfo* info,
+void CollectBlockCoverage(CoverageFunction* function, SharedFunctionInfo* info,
debug::Coverage::Mode mode) {
DCHECK(IsBlockMode(mode));
function->has_block_coverage = true;
- function->blocks = GetSortedBlockData(isolate, info);
+ function->blocks = GetSortedBlockData(info);
// If in binary mode, only report counts of 0/1.
if (mode == debug::Coverage::kBlockBinary) ClampToBinary(function);
@@ -544,7 +542,7 @@ std::unique_ptr<Coverage> Coverage::Collect(
CoverageFunction function(start, end, count, name);
if (IsBlockMode(collectionMode) && info->HasCoverageInfo()) {
- CollectBlockCoverage(isolate, &function, info, collectionMode);
+ CollectBlockCoverage(&function, info, collectionMode);
}
// Only include a function range if itself or its parent function is
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index d263fa45a9..5466ca050b 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -32,7 +32,8 @@ MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
ScriptOriginOptions origin_options(false, true);
MaybeHandle<SharedFunctionInfo> maybe_function_info =
Compiler::GetSharedFunctionInfoForScript(
- source, Compiler::ScriptDetails(isolate->factory()->empty_string()),
+ isolate, source,
+ Compiler::ScriptDetails(isolate->factory()->empty_string()),
origin_options, nullptr, nullptr, ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE);
@@ -254,111 +255,112 @@ namespace {
bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
// Use macro to include both inlined and non-inlined version of an intrinsic.
-#define INTRINSIC_WHITELIST(V) \
- /* Conversions */ \
- V(NumberToStringSkipCache) \
- V(ToBigInt) \
- V(ToInteger) \
- V(ToLength) \
- V(ToNumber) \
- V(ToObject) \
- V(ToString) \
- /* Type checks */ \
- V(IsArray) \
- V(IsDate) \
- V(IsFunction) \
- V(IsJSProxy) \
- V(IsJSReceiver) \
- V(IsJSWeakMap) \
- V(IsJSWeakSet) \
- V(IsRegExp) \
- V(IsSmi) \
- V(IsTypedArray) \
- /* Loads */ \
- V(LoadLookupSlotForCall) \
- /* Arrays */ \
- V(ArraySpeciesConstructor) \
- V(EstimateNumberOfElements) \
- V(GetArrayKeys) \
- V(HasComplexElements) \
- V(HasFastPackedElements) \
- V(NewArray) \
- V(NormalizeElements) \
- V(PrepareElementsForSort) \
- V(TrySliceSimpleNonFastElements) \
- V(TypedArrayGetBuffer) \
- /* Errors */ \
- V(NewTypeError) \
- V(ReThrow) \
- V(ThrowCalledNonCallable) \
- V(ThrowInvalidStringLength) \
- V(ThrowIteratorResultNotAnObject) \
- V(ThrowReferenceError) \
- V(ThrowSymbolIteratorInvalid) \
- /* Strings */ \
- V(RegExpInternalReplace) \
- V(StringIncludes) \
- V(StringIndexOf) \
- V(StringReplaceOneCharWithString) \
- V(StringSubstring) \
- V(StringToNumber) \
- V(StringTrim) \
- /* BigInts */ \
- V(BigIntEqualToBigInt) \
- V(BigIntToBoolean) \
- V(BigIntToNumber) \
- /* Literals */ \
- V(CreateArrayLiteral) \
- V(CreateObjectLiteral) \
- V(CreateRegExpLiteral) \
- /* Called from builtins */ \
- V(AllocateInNewSpace) \
- V(AllocateInTargetSpace) \
- V(AllocateSeqOneByteString) \
- V(AllocateSeqTwoByteString) \
- V(ArrayIncludes_Slow) \
- V(ArrayIndexOf) \
- V(ArrayIsArray) \
- V(ClassOf) \
- V(GenerateRandomNumbers) \
- V(GetFunctionName) \
- V(GetOwnPropertyDescriptor) \
- V(GlobalPrint) \
- V(HasProperty) \
- V(ObjectCreate) \
- V(ObjectEntries) \
- V(ObjectEntriesSkipFastPath) \
- V(ObjectHasOwnProperty) \
- V(ObjectValues) \
- V(ObjectValuesSkipFastPath) \
- V(ObjectGetOwnPropertyNames) \
- V(ObjectGetOwnPropertyNamesTryFast) \
- V(RegExpInitializeAndCompile) \
- V(StackGuard) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringEqual) \
- V(StringIndexOfUnchecked) \
- V(StringParseFloat) \
- V(StringParseInt) \
- V(SymbolDescriptiveString) \
- V(ThrowRangeError) \
- V(ThrowTypeError) \
- V(ToName) \
- V(TransitionElementsKind) \
- /* Misc. */ \
- V(Call) \
- V(CompleteInobjectSlackTrackingForMap) \
- V(HasInPrototypeChain) \
- V(MaxSmi) \
- V(NewObject) \
- V(SmiLexicographicCompare) \
- V(StringMaxLength) \
- V(StringToArray) \
- /* Test */ \
- V(GetOptimizationStatus) \
- V(OptimizeFunctionOnNextCall) \
- V(OptimizeOsr) \
+#define INTRINSIC_WHITELIST(V) \
+ /* Conversions */ \
+ V(NumberToString) \
+ V(ToBigInt) \
+ V(ToInteger) \
+ V(ToLength) \
+ V(ToNumber) \
+ V(ToObject) \
+ V(ToString) \
+ /* Type checks */ \
+ V(IsArray) \
+ V(IsDate) \
+ V(IsFunction) \
+ V(IsJSProxy) \
+ V(IsJSReceiver) \
+ V(IsRegExp) \
+ V(IsSmi) \
+ V(IsTypedArray) \
+ /* Loads */ \
+ V(LoadLookupSlotForCall) \
+ V(GetProperty) \
+ /* Arrays */ \
+ V(ArraySpeciesConstructor) \
+ V(EstimateNumberOfElements) \
+ V(GetArrayKeys) \
+ V(HasComplexElements) \
+ V(HasFastPackedElements) \
+ V(NewArray) \
+ V(NormalizeElements) \
+ V(PrepareElementsForSort) \
+ V(TrySliceSimpleNonFastElements) \
+ V(TypedArrayGetBuffer) \
+ /* Errors */ \
+ V(NewTypeError) \
+ V(ReThrow) \
+ V(ThrowCalledNonCallable) \
+ V(ThrowInvalidStringLength) \
+ V(ThrowIteratorResultNotAnObject) \
+ V(ThrowReferenceError) \
+ V(ThrowSymbolIteratorInvalid) \
+ /* Strings */ \
+ V(RegExpInternalReplace) \
+ V(StringIncludes) \
+ V(StringIndexOf) \
+ V(StringReplaceOneCharWithString) \
+ V(StringSubstring) \
+ V(StringToNumber) \
+ V(StringTrim) \
+ /* BigInts */ \
+ V(BigIntEqualToBigInt) \
+ V(BigIntToBoolean) \
+ V(BigIntToNumber) \
+ /* Literals */ \
+ V(CreateArrayLiteral) \
+ V(CreateArrayLiteralWithoutAllocationSite) \
+ V(CreateObjectLiteral) \
+ V(CreateObjectLiteralWithoutAllocationSite) \
+ V(CreateRegExpLiteral) \
+ /* Called from builtins */ \
+ V(AllocateInNewSpace) \
+ V(AllocateInTargetSpace) \
+ V(AllocateSeqOneByteString) \
+ V(AllocateSeqTwoByteString) \
+ V(ArrayIncludes_Slow) \
+ V(ArrayIndexOf) \
+ V(ArrayIsArray) \
+ V(ClassOf) \
+ V(GenerateRandomNumbers) \
+ V(GetFunctionName) \
+ V(GetOwnPropertyDescriptor) \
+ V(GlobalPrint) \
+ V(HasProperty) \
+ V(ObjectCreate) \
+ V(ObjectEntries) \
+ V(ObjectEntriesSkipFastPath) \
+ V(ObjectHasOwnProperty) \
+ V(ObjectValues) \
+ V(ObjectValuesSkipFastPath) \
+ V(ObjectGetOwnPropertyNames) \
+ V(ObjectGetOwnPropertyNamesTryFast) \
+ V(RegExpInitializeAndCompile) \
+ V(StackGuard) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringEqual) \
+ V(StringIndexOfUnchecked) \
+ V(StringParseFloat) \
+ V(StringParseInt) \
+ V(SymbolDescriptiveString) \
+ V(ThrowRangeError) \
+ V(ThrowTypeError) \
+ V(ToName) \
+ V(TransitionElementsKind) \
+ /* Misc. */ \
+ V(Call) \
+ V(CompleteInobjectSlackTrackingForMap) \
+ V(HasInPrototypeChain) \
+ V(MaxSmi) \
+ V(NewObject) \
+ V(SmiLexicographicCompare) \
+ V(StringMaxLength) \
+ V(StringToArray) \
+ /* Test */ \
+ V(GetOptimizationStatus) \
+ V(OptimizeFunctionOnNextCall) \
+ V(OptimizeOsr) \
V(UnblockConcurrentRecompilation)
#define CASE(Name) \
@@ -553,12 +555,14 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kArrayPrototypeValues:
case Builtins::kArrayIncludes:
case Builtins::kArrayPrototypeEntries:
+ case Builtins::kArrayPrototypeFill:
case Builtins::kArrayPrototypeFind:
case Builtins::kArrayPrototypeFindIndex:
case Builtins::kArrayPrototypeFlat:
case Builtins::kArrayPrototypeFlatMap:
case Builtins::kArrayPrototypeKeys:
case Builtins::kArrayPrototypeSlice:
+ case Builtins::kArrayPrototypeSort:
case Builtins::kArrayForEach:
case Builtins::kArrayEvery:
case Builtins::kArraySome:
@@ -813,6 +817,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kArrayIteratorPrototypeNext:
case Builtins::kArrayPrototypePop:
case Builtins::kArrayPrototypePush:
+ case Builtins::kArrayPrototypeReverse:
case Builtins::kArrayPrototypeShift:
case Builtins::kArraySplice:
case Builtins::kArrayUnshift:
diff --git a/deps/v8/src/debug/debug-scope-iterator.cc b/deps/v8/src/debug/debug-scope-iterator.cc
index dbade081cb..e71c1c07b3 100644
--- a/deps/v8/src/debug/debug-scope-iterator.cc
+++ b/deps/v8/src/debug/debug-scope-iterator.cc
@@ -4,11 +4,12 @@
#include "src/debug/debug-scope-iterator.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
#include "src/isolate.h"
+#include "src/objects/js-generator-inl.h"
#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 8c6fae1d9f..01cd017eb2 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -12,6 +12,7 @@
#include "src/frames-inl.h"
#include "src/globals.h"
#include "src/isolate-inl.h"
+#include "src/objects/js-generator-inl.h"
#include "src/objects/module.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index bf1e1b623b..14d2850b69 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -4,7 +4,7 @@
#include "src/debug/debug-stack-trace-iterator.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-scope-iterator.h"
#include "src/debug/debug.h"
@@ -173,8 +173,7 @@ v8::MaybeLocal<v8::Value> DebugStackTraceIterator::Evaluate(
v8::Local<v8::String> source, bool throw_on_side_effect) {
DCHECK(!Done());
Handle<Object> value;
- i::SafeForInterruptsScope safe_for_interrupt_scope(
- isolate_, i::StackGuard::TERMINATE_EXECUTION);
+ i::SafeForInterruptsScope safe_for_interrupt_scope(isolate_);
if (!DebugEvaluate::Local(isolate_, iterator_.frame()->id(),
inlined_frame_index_, Utils::OpenHandle(*source),
throw_on_side_effect)
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 3877f156ef..a7114b1434 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -7,7 +7,7 @@
#include <memory>
#include <unordered_set>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/arguments.h"
#include "src/assembler-inl.h"
#include "src/base/platform/mutex.h"
@@ -30,6 +30,7 @@
#include "src/log.h"
#include "src/messages.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/js-generator-inl.h"
#include "src/objects/js-promise-inl.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
@@ -322,6 +323,7 @@ BreakLocation BreakIterator::GetBreakLocation() {
generator_object_reg_index);
}
+Isolate* BreakIterator::isolate() { return debug_info_->GetIsolate(); }
void DebugFeatureTracker::Track(DebugFeatureTracker::Feature feature) {
uint32_t mask = 1 << feature;
@@ -334,8 +336,6 @@ void DebugFeatureTracker::Track(DebugFeatureTracker::Feature feature) {
// Threading support.
void Debug::ThreadInit() {
- thread_local_.break_count_ = 0;
- thread_local_.break_id_ = 0;
thread_local_.break_frame_id_ = StackFrame::NO_ID;
thread_local_.last_step_action_ = StepNone;
thread_local_.last_statement_position_ = kNoSourcePosition;
@@ -1583,11 +1583,10 @@ void Debug::FreeDebugInfoListNode(DebugInfoListNode* prev,
prev->set_next(node->next());
}
- // Pack function_identifier back into the
- // SFI::function_identifier_or_debug_info field.
+ // Pack script back into the
+ // SFI::script_or_debug_info field.
Handle<DebugInfo> debug_info(node->debug_info());
- debug_info->shared()->set_function_identifier_or_debug_info(
- debug_info->function_identifier());
+ debug_info->shared()->set_script_or_debug_info(debug_info->script());
delete node;
}
@@ -1632,12 +1631,12 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
isolate_->heap()->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
GarbageCollectionReason::kDebugger);
Factory* factory = isolate_->factory();
- if (!factory->script_list()->IsFixedArrayOfWeakCells()) {
+ if (!factory->script_list()->IsWeakArrayList()) {
return factory->empty_fixed_array();
}
- Handle<FixedArrayOfWeakCells> array =
- Handle<FixedArrayOfWeakCells>::cast(factory->script_list());
- Handle<FixedArray> results = factory->NewFixedArray(array->Length());
+ Handle<WeakArrayList> array =
+ Handle<WeakArrayList>::cast(factory->script_list());
+ Handle<FixedArray> results = factory->NewFixedArray(array->length());
int length = 0;
{
Script::Iterator iterator(isolate_);
@@ -1742,7 +1741,6 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
DebugScope debug_scope(this);
HandleScope scope(isolate_);
- PostponeInterruptsScope postpone(isolate_);
DisableBreak no_recursive_break(this);
Handle<Context> native_context(isolate_->native_context());
@@ -1870,7 +1868,6 @@ void Debug::ProcessCompileEvent(bool has_compile_error, Handle<Script> script) {
SuppressDebug while_processing(this);
DebugScope debug_scope(this);
HandleScope scope(isolate_);
- PostponeInterruptsScope postpone(isolate_);
DisableBreak no_recursive_break(this);
AllowJavascriptExecution allow_script(isolate_);
debug_delegate_->ScriptCompiled(ToApiHandle<debug::Script>(script),
@@ -2011,15 +2008,14 @@ void Debug::PrintBreakLocation() {
DebugScope::DebugScope(Debug* debug)
: debug_(debug),
- prev_(debug->debugger_entry()),
- no_termination_exceptons_(debug_->isolate_,
- StackGuard::TERMINATE_EXECUTION) {
+ prev_(reinterpret_cast<DebugScope*>(
+ base::Relaxed_Load(&debug->thread_local_.current_debug_scope_))),
+ no_interrupts_(debug_->isolate_) {
// Link recursive debugger entry.
base::Relaxed_Store(&debug_->thread_local_.current_debug_scope_,
reinterpret_cast<base::AtomicWord>(this));
- // Store the previous break id, frame id and return value.
- break_id_ = debug_->break_id();
+ // Store the previous frame id and return value.
break_frame_id_ = debug_->break_frame_id();
// Create the new break info. If there is no proper frames there is no break
@@ -2028,7 +2024,6 @@ DebugScope::DebugScope(Debug* debug)
bool has_frames = !it.done();
debug_->thread_local_.break_frame_id_ =
has_frames ? it.frame()->id() : StackFrame::NO_ID;
- debug_->SetNextBreakId();
debug_->UpdateState();
}
@@ -2041,7 +2036,6 @@ DebugScope::~DebugScope() {
// Restore to the previous break state.
debug_->thread_local_.break_frame_id_ = break_frame_id_;
- debug_->thread_local_.break_id_ = break_id_;
debug_->UpdateState();
}
@@ -2185,6 +2179,10 @@ bool Debug::PerformSideEffectCheck(Handle<JSFunction> function,
return false;
}
+Handle<Object> Debug::return_value_handle() {
+ return handle(thread_local_.return_value_, isolate_);
+}
+
bool Debug::PerformSideEffectCheckForCallback(Handle<Object> callback_info) {
DCHECK_EQ(isolate_->debug_execution_mode(), DebugInfo::kSideEffects);
if (!callback_info.is_null() && callback_info->IsCallHandlerInfo() &&
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 13844769c1..a6ad7bd4da 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -8,7 +8,6 @@
#include <vector>
#include "src/allocation.h"
-#include "src/assembler.h"
#include "src/base/atomicops.h"
#include "src/base/hashmap.h"
#include "src/base/platform/platform.h"
@@ -30,6 +29,7 @@ namespace internal {
// Forward declarations.
class DebugScope;
+class JSGeneratorObject;
// Step actions. NOTE: These values are in macros.py as well.
enum StepAction : int8_t {
@@ -152,7 +152,7 @@ class BreakIterator {
private:
int BreakIndexFromPosition(int position);
- Isolate* isolate() { return debug_info_->GetIsolate(); }
+ Isolate* isolate();
DebugBreakType GetDebugBreakType();
@@ -316,7 +316,7 @@ class Debug {
void Iterate(RootVisitor* v);
void InitThread(const ExecutionAccess& lock) { ThreadInit(); }
- bool CheckExecutionState() { return is_active() && break_id() != 0; }
+ bool CheckExecutionState() { return is_active(); }
void StartSideEffectCheckMode();
void StopSideEffectCheckMode();
@@ -331,11 +331,6 @@ class Debug {
bool PerformSideEffectCheckForObject(Handle<Object> object);
// Flags and states.
- DebugScope* debugger_entry() {
- return reinterpret_cast<DebugScope*>(
- base::Relaxed_Load(&thread_local_.current_debug_scope_));
- }
-
inline bool is_active() const { return is_active_; }
inline bool in_debug_scope() const {
return !!base::Relaxed_Load(&thread_local_.current_debug_scope_);
@@ -348,11 +343,8 @@ class Debug {
bool break_points_active() const { return break_points_active_; }
StackFrame::Id break_frame_id() { return thread_local_.break_frame_id_; }
- int break_id() { return thread_local_.break_id_; }
- Handle<Object> return_value_handle() {
- return handle(thread_local_.return_value_, isolate_);
- }
+ Handle<Object> return_value_handle();
Object* return_value() { return thread_local_.return_value_; }
void set_return_value(Object* value) { thread_local_.return_value_ = value; }
@@ -394,9 +386,6 @@ class Debug {
void UpdateState();
void UpdateHookOnFunctionCall();
void Unload();
- void SetNextBreakId() {
- thread_local_.break_id_ = ++thread_local_.break_count_;
- }
// Return the number of virtual frames below debugger entry.
int CurrentFrameCount();
@@ -504,12 +493,6 @@ class Debug {
// Top debugger entry.
base::AtomicWord current_debug_scope_;
- // Counter for generating next break id.
- int break_count_;
-
- // Current break id.
- int break_id_;
-
// Frame id for the frame of the current break.
StackFrame::Id break_frame_id_;
@@ -579,8 +562,7 @@ class DebugScope BASE_EMBEDDED {
Debug* debug_;
DebugScope* prev_; // Previous scope if entered recursively.
StackFrame::Id break_frame_id_; // Previous break frame id.
- int break_id_; // Previous break id.
- PostponeInterruptsScope no_termination_exceptons_;
+ PostponeInterruptsScope no_interrupts_;
};
// This scope is used to handle return values in nested debug break points.
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 7193d0abd1..371d1d5575 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -4,7 +4,7 @@
#include "src/debug/liveedit.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/ast/ast-traversal-visitor.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
@@ -17,6 +17,7 @@
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-generator-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
#include "src/source-position-table.h"
@@ -542,9 +543,25 @@ struct SourcePositionEvent {
if (a.position != b.position) return a.position < b.position;
if (a.type != b.type) return a.type < b.type;
if (a.type == LITERAL_STARTS && b.type == LITERAL_STARTS) {
- return a.literal->end_position() < b.literal->end_position();
+ // If the literals start in the same position, we want the one with the
+ // furthest (i.e. largest) end position to be first.
+ if (a.literal->end_position() != b.literal->end_position()) {
+ return a.literal->end_position() > b.literal->end_position();
+ }
+ // If they also end in the same position, we want the first in order of
+ // literal ids to be first.
+ return a.literal->function_literal_id() <
+ b.literal->function_literal_id();
} else if (a.type == LITERAL_ENDS && b.type == LITERAL_ENDS) {
- return a.literal->start_position() > b.literal->start_position();
+ // If the literals end in the same position, we want the one with the
+ // nearest (i.e. largest) start position to be first.
+ if (a.literal->start_position() != b.literal->start_position()) {
+ return a.literal->start_position() > b.literal->start_position();
+ }
+ // If they also end in the same position, we want the last in order of
+ // literal ids to be first.
+ return a.literal->function_literal_id() >
+ b.literal->function_literal_id();
} else {
return a.pos_diff < b.pos_diff;
}
@@ -658,20 +675,33 @@ using LiteralMap = std::unordered_map<FunctionLiteral*, FunctionLiteral*>;
void MapLiterals(const FunctionLiteralChanges& changes,
const std::vector<FunctionLiteral*>& new_literals,
LiteralMap* unchanged, LiteralMap* changed) {
+ // Track the top-level script function separately as it can overlap fully with
+ // another function, e.g. the script "()=>42".
+ const std::pair<int, int> kTopLevelMarker = std::make_pair(-1, -1);
std::map<std::pair<int, int>, FunctionLiteral*> position_to_new_literal;
for (FunctionLiteral* literal : new_literals) {
DCHECK(literal->start_position() != kNoSourcePosition);
DCHECK(literal->end_position() != kNoSourcePosition);
- position_to_new_literal[std::make_pair(literal->start_position(),
- literal->end_position())] = literal;
+ std::pair<int, int> key =
+ literal->function_literal_id() == FunctionLiteral::kIdTypeTopLevel
+ ? kTopLevelMarker
+ : std::make_pair(literal->start_position(),
+ literal->end_position());
+ // Make sure there are no duplicate keys.
+ DCHECK_EQ(position_to_new_literal.find(key), position_to_new_literal.end());
+ position_to_new_literal[key] = literal;
}
LiteralMap mappings;
std::unordered_map<FunctionLiteral*, ChangeState> change_state;
for (const auto& change_pair : changes) {
FunctionLiteral* literal = change_pair.first;
const FunctionLiteralChange& change = change_pair.second;
- auto it = position_to_new_literal.find(
- std::make_pair(change.new_start_position, change.new_end_position));
+ std::pair<int, int> key =
+ literal->function_literal_id() == FunctionLiteral::kIdTypeTopLevel
+ ? kTopLevelMarker
+ : std::make_pair(change.new_start_position,
+ change.new_end_position);
+ auto it = position_to_new_literal.find(key);
if (it == position_to_new_literal.end() ||
HasChangedScope(literal, it->second)) {
change_state[literal] = ChangeState::DAMAGED;
@@ -775,22 +805,22 @@ class FunctionDataMap : public ThreadVisitor {
public:
void AddInterestingLiteral(int script_id, FunctionLiteral* literal,
bool should_restart) {
- map_.emplace(std::make_pair(script_id, literal->function_literal_id()),
+ map_.emplace(GetFuncId(script_id, literal),
FunctionData{literal, should_restart});
}
- bool Lookup(Isolate* isolate, SharedFunctionInfo* sfi, FunctionData** data) {
- int function_literal_id = sfi->FunctionLiteralId(isolate);
- if (!sfi->script()->IsScript() || function_literal_id == -1) {
+ bool Lookup(SharedFunctionInfo* sfi, FunctionData** data) {
+ int start_position = sfi->StartPosition();
+ if (!sfi->script()->IsScript() || start_position == -1) {
return false;
}
Script* script = Script::cast(sfi->script());
- return Lookup(script->id(), function_literal_id, data);
+ return Lookup(GetFuncId(script->id(), sfi), data);
}
bool Lookup(Handle<Script> script, FunctionLiteral* literal,
FunctionData** data) {
- return Lookup(script->id(), literal->function_literal_id(), data);
+ return Lookup(GetFuncId(script->id(), literal), data);
}
void Fill(Isolate* isolate, Address* restart_frame_fp) {
@@ -800,20 +830,20 @@ class FunctionDataMap : public ThreadVisitor {
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
FunctionData* data = nullptr;
- if (!Lookup(isolate, sfi, &data)) continue;
+ if (!Lookup(sfi, &data)) continue;
data->shared = handle(sfi, isolate);
} else if (obj->IsJSFunction()) {
JSFunction* js_function = JSFunction::cast(obj);
SharedFunctionInfo* sfi = js_function->shared();
FunctionData* data = nullptr;
- if (!Lookup(isolate, sfi, &data)) continue;
+ if (!Lookup(sfi, &data)) continue;
data->js_functions.emplace_back(js_function, isolate);
} else if (obj->IsJSGeneratorObject()) {
JSGeneratorObject* gen = JSGeneratorObject::cast(obj);
if (gen->is_closed()) continue;
SharedFunctionInfo* sfi = gen->function()->shared();
FunctionData* data = nullptr;
- if (!Lookup(isolate, sfi, &data)) continue;
+ if (!Lookup(sfi, &data)) continue;
data->running_generators.emplace_back(gen, isolate);
}
}
@@ -843,7 +873,7 @@ class FunctionDataMap : public ThreadVisitor {
stack_position = FunctionData::BELOW_NON_DROPPABLE_FRAME;
}
FunctionData* data = nullptr;
- if (!Lookup(isolate, *sfi, &data)) continue;
+ if (!Lookup(*sfi, &data)) continue;
if (!data->should_restart) continue;
data->stack_position = stack_position;
*restart_frame_fp = frame->fp();
@@ -854,8 +884,36 @@ class FunctionDataMap : public ThreadVisitor {
}
private:
- bool Lookup(int script_id, int function_literal_id, FunctionData** data) {
- auto it = map_.find(std::make_pair(script_id, function_literal_id));
+ // Unique id for a function: script_id + start_position, where start_position
+ // is special cased to -1 for top-level so that it does not overlap with a
+ // function whose start position is 0.
+ using FuncId = std::pair<int, int>;
+
+ FuncId GetFuncId(int script_id, FunctionLiteral* literal) {
+ int start_position = literal->start_position();
+ if (literal->function_literal_id() == 0) {
+ // This is the top-level script function literal, so special case its
+ // start position
+ DCHECK_EQ(start_position, 0);
+ start_position = -1;
+ }
+ return FuncId(script_id, start_position);
+ }
+
+ FuncId GetFuncId(int script_id, SharedFunctionInfo* sfi) {
+ DCHECK_EQ(script_id, Script::cast(sfi->script())->id());
+ int start_position = sfi->StartPosition();
+ DCHECK_NE(start_position, -1);
+ if (sfi->is_toplevel()) {
+ // This is the top-level function, so special case its start position
+ DCHECK_EQ(start_position, 0);
+ start_position = -1;
+ }
+ return FuncId(script_id, start_position);
+ }
+
+ bool Lookup(FuncId id, FunctionData** data) {
+ auto it = map_.find(id);
if (it == map_.end()) return false;
*data = &it->second;
return true;
@@ -867,14 +925,13 @@ class FunctionDataMap : public ThreadVisitor {
it.frame()->GetFunctions(&sfis);
for (auto& sfi : sfis) {
FunctionData* data = nullptr;
- if (!Lookup(isolate, *sfi, &data)) continue;
+ if (!Lookup(*sfi, &data)) continue;
data->stack_position = FunctionData::ARCHIVED_THREAD;
}
}
}
- using UniqueLiteralId = std::pair<int, int>; // script_id + literal_id
- std::map<UniqueLiteralId, FunctionData> map_;
+ std::map<FuncId, FunctionData> map_;
};
bool CanPatchScript(const LiteralMap& changed, Handle<Script> script,
@@ -932,7 +989,7 @@ bool CanRestartFrame(Isolate* isolate, Address fp,
JavaScriptFrame::cast(restart_frame)->GetFunctions(&sfis);
for (auto& sfi : sfis) {
FunctionData* data = nullptr;
- if (!function_data_map.Lookup(isolate, *sfi, &data)) continue;
+ if (!function_data_map.Lookup(*sfi, &data)) continue;
auto new_literal_it = changed.find(data->literal);
if (new_literal_it == changed.end()) continue;
if (new_literal_it->second->scope()->new_target_var()) {
@@ -944,9 +1001,8 @@ bool CanRestartFrame(Isolate* isolate, Address fp,
return true;
}
-void TranslateSourcePositionTable(Handle<BytecodeArray> code,
+void TranslateSourcePositionTable(Isolate* isolate, Handle<BytecodeArray> code,
const std::vector<SourceChangeRange>& diffs) {
- Isolate* isolate = code->GetIsolate();
SourcePositionTableBuilder builder;
Handle<ByteArray> source_position_table(code->SourcePositionTable(), isolate);
@@ -979,8 +1035,8 @@ void UpdatePositions(Isolate* isolate, Handle<SharedFunctionInfo> sfi,
sfi->SetFunctionTokenPosition(new_function_token_position,
new_start_position);
if (sfi->HasBytecodeArray()) {
- TranslateSourcePositionTable(handle(sfi->GetBytecodeArray(), isolate),
- diffs);
+ TranslateSourcePositionTable(
+ isolate, handle(sfi->GetBytecodeArray(), isolate), diffs);
}
}
} // anonymous namespace
@@ -1042,6 +1098,7 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
return;
}
+ std::map<int, int> start_position_to_unchanged_id;
for (const auto& mapping : unchanged) {
FunctionData* data = nullptr;
if (!function_data_map.Lookup(script, mapping.first, &data)) continue;
@@ -1057,10 +1114,6 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
}
UpdatePositions(isolate, sfi, diffs);
- MaybeObject* weak_redundant_new_sfi =
- new_script->shared_function_infos()->Get(
- mapping.second->function_literal_id());
-
sfi->set_script(*new_script);
if (sfi->HasUncompiledData()) {
sfi->uncompiled_data()->set_function_literal_id(
@@ -1071,26 +1124,10 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
DCHECK_EQ(sfi->FunctionLiteralId(isolate),
mapping.second->function_literal_id());
- // Swap the now-redundant, newly compiled SFI into the old script, so that
- // we can look up the old function_literal_id using the new SFI when
- // processing changed functions.
- HeapObject* redundant_new_sfi_obj;
- if (weak_redundant_new_sfi->ToStrongOrWeakHeapObject(
- &redundant_new_sfi_obj)) {
- SharedFunctionInfo* redundant_new_sfi =
- SharedFunctionInfo::cast(redundant_new_sfi_obj);
-
- redundant_new_sfi->set_script(*script);
- if (redundant_new_sfi->HasUncompiledData()) {
- redundant_new_sfi->uncompiled_data()->set_function_literal_id(
- mapping.first->function_literal_id());
- }
- script->shared_function_infos()->Set(
- mapping.first->function_literal_id(),
- HeapObjectReference::Weak(redundant_new_sfi));
- DCHECK_EQ(redundant_new_sfi->FunctionLiteralId(isolate),
- mapping.first->function_literal_id());
- }
+ // Save the new start_position -> id mapping, so that we can recover it when
+ // iterating over changed functions' constant pools.
+ start_position_to_unchanged_id[mapping.second->start_position()] =
+ mapping.second->function_literal_id();
if (sfi->HasUncompiledDataWithPreParsedScope()) {
sfi->ClearPreParsedScopeData();
@@ -1107,8 +1144,8 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
for (int i = 0; i < constants->length(); ++i) {
if (!constants->get(i)->IsSharedFunctionInfo()) continue;
FunctionData* data = nullptr;
- if (!function_data_map.Lookup(
- isolate, SharedFunctionInfo::cast(constants->get(i)), &data)) {
+ if (!function_data_map.Lookup(SharedFunctionInfo::cast(constants->get(i)),
+ &data)) {
continue;
}
auto change_it = changed.find(data->literal);
@@ -1148,44 +1185,48 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
if (!constants->get(i)->IsSharedFunctionInfo()) continue;
SharedFunctionInfo* inner_sfi =
SharedFunctionInfo::cast(constants->get(i));
- if (inner_sfi->script() != *script) continue;
-
- // If the inner SFI's script is the old script, then this is actually a
- // redundant new_script SFI where the old script SFI was unchanged, so we
- // swapped their scripts in the unchanged iteration. This means that we
- // have to update this changed SFI's inner SFI constant to point at the
- // old inner SFI, which has already been patched to be on the new script.
- //
- // So, we look up FunctionData using the current, newly compiled
- // inner_sfi, but the resulting FunctionData will still be referring to
- // the old, unchanged SFI.
- FunctionData* data = nullptr;
- if (!function_data_map.Lookup(isolate, inner_sfi, &data)) continue;
- Handle<SharedFunctionInfo> old_unchanged_inner_sfi =
- data->shared.ToHandleChecked();
+
+ // See if there is a mapping from this function's start position to a
+ // unchanged function's id.
+ auto unchanged_it =
+ start_position_to_unchanged_id.find(inner_sfi->StartPosition());
+ if (unchanged_it == start_position_to_unchanged_id.end()) continue;
+
+ // Grab that function id from the new script's SFI list, which should have
+ // already been updated in in the unchanged pass.
+ SharedFunctionInfo* old_unchanged_inner_sfi =
+ SharedFunctionInfo::cast(new_script->shared_function_infos()
+ ->Get(unchanged_it->second)
+ ->GetHeapObject());
// Now some sanity checks. Make sure that this inner_sfi is not the
// unchanged SFI yet...
- DCHECK_NE(*old_unchanged_inner_sfi, inner_sfi);
- // ... that the unchanged SFI has already been processed and patched to be
- // on the new script ...
+ DCHECK_NE(old_unchanged_inner_sfi, inner_sfi);
+ // ... and that the unchanged SFI has already been processed and patched
+ // to be on the new script ...
DCHECK_EQ(old_unchanged_inner_sfi->script(), *new_script);
- // ... and that the id of the unchanged SFI matches the unchanged target
- // literal's id.
- DCHECK_EQ(old_unchanged_inner_sfi->FunctionLiteralId(isolate),
- unchanged[data->literal]->function_literal_id());
- constants->set(i, *old_unchanged_inner_sfi);
+
+ constants->set(i, old_unchanged_inner_sfi);
}
}
#ifdef DEBUG
{
- // Check that all the functions in the new script are valid and that their
- // function literals match what is expected.
+ // Check that all the functions in the new script are valid, that their
+ // function literals match what is expected, and that start positions are
+ // unique.
DisallowHeapAllocation no_gc;
SharedFunctionInfo::ScriptIterator it(isolate, *new_script);
+ std::set<int> start_positions;
while (SharedFunctionInfo* sfi = it.Next()) {
DCHECK_EQ(sfi->script(), *new_script);
DCHECK_EQ(sfi->FunctionLiteralId(isolate), it.CurrentIndex());
+ // Don't check the start position of the top-level function, as it can
+ // overlap with a function in the script.
+ if (sfi->is_toplevel()) {
+ DCHECK_EQ(start_positions.find(sfi->StartPosition()),
+ start_positions.end());
+ start_positions.insert(sfi->StartPosition());
+ }
if (!sfi->HasBytecodeArray()) continue;
// Check that all the functions in this function's constant pool are also
diff --git a/deps/v8/src/debug/mips/OWNERS b/deps/v8/src/debug/mips/OWNERS
index 4ce9d7f91d..8bbcab4c2d 100644
--- a/deps/v8/src/debug/mips/OWNERS
+++ b/deps/v8/src/debug/mips/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com \ No newline at end of file
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com \ No newline at end of file
diff --git a/deps/v8/src/debug/mips64/OWNERS b/deps/v8/src/debug/mips64/OWNERS
index 4ce9d7f91d..8bbcab4c2d 100644
--- a/deps/v8/src/debug/mips64/OWNERS
+++ b/deps/v8/src/debug/mips64/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com \ No newline at end of file
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com \ No newline at end of file
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 8106abea60..a2e59caf15 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -702,14 +702,14 @@ void Deoptimizer::DoComputeOutputFrames() {
caller_frame_top_ = stack_fp_ + ComputeInputFrameAboveFpFixedSize();
Address fp_address = input_->GetFramePointerAddress();
- caller_fp_ = Memory::intptr_at(fp_address);
+ caller_fp_ = Memory<intptr_t>(fp_address);
caller_pc_ =
- Memory::intptr_at(fp_address + CommonFrameConstants::kCallerPCOffset);
- input_frame_context_ = Memory::intptr_at(
+ Memory<intptr_t>(fp_address + CommonFrameConstants::kCallerPCOffset);
+ input_frame_context_ = Memory<intptr_t>(
fp_address + CommonFrameConstants::kContextOrFrameTypeOffset);
if (FLAG_enable_embedded_constant_pool) {
- caller_constant_pool_ = Memory::intptr_at(
+ caller_constant_pool_ = Memory<intptr_t>(
fp_address + CommonFrameConstants::kConstantPoolOffset);
}
}
@@ -1830,7 +1830,7 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
GenerateDeoptimizationEntries(&masm, kMaxNumberOfEntries, kind);
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(desc));
+ DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
// Allocate the code as immovable since the entry addresses will be used
// directly and there is no support for relocating them.
@@ -2596,9 +2596,9 @@ int TranslatedValue::GetChildrenCount() const {
uint32_t TranslatedState::GetUInt32Slot(Address fp, int slot_offset) {
Address address = fp + slot_offset;
#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT
- return Memory::uint32_at(address + kIntSize);
+ return Memory<uint32_t>(address + kIntSize);
#else
- return Memory::uint32_at(address);
+ return Memory<uint32_t>(address);
#endif
}
@@ -2606,12 +2606,12 @@ Float32 TranslatedState::GetFloatSlot(Address fp, int slot_offset) {
#if !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
return Float32::FromBits(GetUInt32Slot(fp, slot_offset));
#else
- return Float32::FromBits(Memory::uint32_at(fp + slot_offset));
+ return Float32::FromBits(Memory<uint32_t>(fp + slot_offset));
#endif
}
Float64 TranslatedState::GetDoubleSlot(Address fp, int slot_offset) {
- return Float64::FromBits(Memory::uint64_at(fp + slot_offset));
+ return Float64::FromBits(Memory<uint64_t>(fp + slot_offset));
}
void TranslatedValue::Handlify() {
@@ -2851,7 +2851,7 @@ Address TranslatedState::ComputeArgumentsPosition(Address input_frame_pointer,
int* length) {
Address parent_frame_pointer = *reinterpret_cast<Address*>(
input_frame_pointer + StandardFrameConstants::kCallerFPOffset);
- intptr_t parent_frame_type = Memory::intptr_at(
+ intptr_t parent_frame_type = Memory<intptr_t>(
parent_frame_pointer + CommonFrameConstants::kContextOrFrameTypeOffset);
Address arguments_frame;
@@ -3719,8 +3719,8 @@ void TranslatedState::InitializeJSObjectAt(
Handle<Object> properties = GetValueAndAdvance(frame, value_index);
WRITE_FIELD(*object_storage, JSObject::kPropertiesOrHashOffset,
*properties);
- WRITE_BARRIER(isolate()->heap(), *object_storage,
- JSObject::kPropertiesOrHashOffset, *properties);
+ WRITE_BARRIER(*object_storage, JSObject::kPropertiesOrHashOffset,
+ *properties);
}
// For all the other fields we first look at the fixed array and check the
@@ -3747,11 +3747,11 @@ void TranslatedState::InitializeJSObjectAt(
} else if (marker == kStoreMutableHeapNumber) {
CHECK(field_value->IsMutableHeapNumber());
WRITE_FIELD(*object_storage, offset, *field_value);
- WRITE_BARRIER(isolate()->heap(), *object_storage, offset, *field_value);
+ WRITE_BARRIER(*object_storage, offset, *field_value);
} else {
CHECK_EQ(kStoreTagged, marker);
WRITE_FIELD(*object_storage, offset, *field_value);
- WRITE_BARRIER(isolate()->heap(), *object_storage, offset, *field_value);
+ WRITE_BARRIER(*object_storage, offset, *field_value);
}
}
object_storage->synchronized_set_map(*map);
@@ -3787,7 +3787,7 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
}
WRITE_FIELD(*object_storage, offset, *field_value);
- WRITE_BARRIER(isolate()->heap(), *object_storage, offset, *field_value);
+ WRITE_BARRIER(*object_storage, offset, *field_value);
}
object_storage->synchronized_set_map(*map);
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 1f20dbdac1..d981a86253 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -18,6 +18,7 @@
#include "src/globals.h"
#include "src/isolate.h"
#include "src/macro-assembler.h"
+#include "src/objects/shared-function-info.h"
#include "src/source-position.h"
#include "src/zone/zone-chunk-list.h"
diff --git a/deps/v8/src/disasm.h b/deps/v8/src/disasm.h
index 00e0e29546..f8ef304d2c 100644
--- a/deps/v8/src/disasm.h
+++ b/deps/v8/src/disasm.h
@@ -24,6 +24,12 @@ class NameConverter {
virtual const char* NameOfConstant(byte* addr) const;
virtual const char* NameInCode(byte* addr) const;
+ // Given a root-relative offset, returns either a name or nullptr if none is
+ // found.
+ // TODO(jgruber,v8:7989): This is a temporary solution until we can preserve
+ // code comments through snapshotting.
+ virtual const char* RootRelativeName(int offset) const { UNREACHABLE(); }
+
protected:
v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
};
@@ -32,32 +38,39 @@ class NameConverter {
// A generic Disassembler interface
class Disassembler {
public:
+ enum UnimplementedOpcodeAction : int8_t {
+ kContinueOnUnimplementedOpcode,
+ kAbortOnUnimplementedOpcode
+ };
+
// Caller deallocates converter.
- explicit Disassembler(const NameConverter& converter);
+ explicit Disassembler(const NameConverter& converter,
+ UnimplementedOpcodeAction unimplemented_opcode_action =
+ kAbortOnUnimplementedOpcode)
+ : converter_(converter),
+ unimplemented_opcode_action_(unimplemented_opcode_action) {}
- virtual ~Disassembler();
+ UnimplementedOpcodeAction unimplemented_opcode_action() const {
+ return unimplemented_opcode_action_;
+ }
// Writes one disassembled instruction into 'buffer' (0-terminated).
// Returns the length of the disassembled machine instruction in bytes.
int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
- // Disassemblers on ia32/x64 need a separate method for testing, as
- // instruction decode method above continues on unimplemented opcodes, and
- // does not test the disassemblers. Basic functionality of the method remains
- // the same.
- int InstructionDecodeForTesting(v8::internal::Vector<char> buffer,
- byte* instruction);
-
// Returns -1 if instruction does not mark the beginning of a constant pool,
// or the number of entries in the constant pool beginning here.
int ConstantPoolSizeAt(byte* instruction);
// Write disassembly into specified file 'f' using specified NameConverter
// (see constructor).
- static void Disassemble(FILE* f, byte* begin, byte* end);
+ static void Disassemble(FILE* f, byte* begin, byte* end,
+ UnimplementedOpcodeAction unimplemented_action =
+ kAbortOnUnimplementedOpcode);
private:
const NameConverter& converter_;
+ const UnimplementedOpcodeAction unimplemented_opcode_action_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Disassembler);
};
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 6a9d8deee0..4ccddc289c 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -14,6 +14,7 @@
#include "src/deoptimizer.h"
#include "src/disasm.h"
#include "src/ic/ic.h"
+#include "src/instruction-stream.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/snapshot/serializer-common.h"
@@ -30,8 +31,10 @@ class V8NameConverter: public disasm::NameConverter {
public:
explicit V8NameConverter(Isolate* isolate, CodeReference code = {})
: isolate_(isolate), code_(code) {}
- virtual const char* NameOfAddress(byte* pc) const;
- virtual const char* NameInCode(byte* addr) const;
+ const char* NameOfAddress(byte* pc) const override;
+ const char* NameInCode(byte* addr) const override;
+ const char* RootRelativeName(int offset) const override;
+
const CodeReference& code() const { return code_; }
private:
@@ -82,6 +85,51 @@ const char* V8NameConverter::NameInCode(byte* addr) const {
return code_.is_null() ? "" : reinterpret_cast<const char*>(addr);
}
+const char* V8NameConverter::RootRelativeName(int offset) const {
+ if (isolate_ == nullptr) return nullptr;
+
+ const int kRootsStart = 0;
+ const int kRootsEnd = Heap::roots_to_external_reference_table_offset();
+ const int kExtRefsStart = Heap::roots_to_external_reference_table_offset();
+ const int kExtRefsEnd = Heap::roots_to_builtins_offset();
+
+ if (kRootsStart <= offset && offset < kRootsEnd) {
+ uint32_t offset_in_roots_table = offset - kRootsStart;
+
+ // Fail safe in the unlikely case of an arbitrary root-relative offset.
+ if (offset_in_roots_table % kPointerSize != 0) return nullptr;
+
+ Heap::RootListIndex root_index =
+ static_cast<Heap::RootListIndex>(offset_in_roots_table / kPointerSize);
+
+ HeapStringAllocator allocator;
+ StringStream accumulator(&allocator);
+ isolate_->heap()->root(root_index)->ShortPrint(&accumulator);
+ std::unique_ptr<char[]> obj_name = accumulator.ToCString();
+
+ SNPrintF(v8_buffer_, "root (%s)", obj_name.get());
+ return v8_buffer_.start();
+ } else if (kExtRefsStart <= offset && offset < kExtRefsEnd) {
+ uint32_t offset_in_extref_table = offset - kExtRefsStart;
+
+ // Fail safe in the unlikely case of an arbitrary root-relative offset.
+ if (offset_in_extref_table % ExternalReferenceTable::EntrySize() != 0) {
+ return nullptr;
+ }
+
+ // Likewise if the external reference table is uninitialized.
+ if (!isolate_->heap()->external_reference_table()->is_initialized()) {
+ return nullptr;
+ }
+
+ SNPrintF(v8_buffer_, "external reference (%s)",
+ isolate_->heap()->external_reference_table()->NameFromOffset(
+ offset_in_extref_table));
+ return v8_buffer_.start();
+ } else {
+ return nullptr;
+ }
+}
static void DumpBuffer(std::ostream* os, StringBuilder* out) {
(*os) << out->Finalize() << std::endl;
@@ -169,16 +217,18 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
}
static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
- std::ostream* os, const V8NameConverter& converter,
- byte* begin, byte* end, Address current_pc) {
+ std::ostream* os, CodeReference code,
+ const V8NameConverter& converter, byte* begin, byte* end,
+ Address current_pc) {
v8::internal::EmbeddedVector<char, 128> decode_buffer;
v8::internal::EmbeddedVector<char, kOutBufferSize> out_buffer;
StringBuilder out(out_buffer.start(), out_buffer.length());
byte* pc = begin;
- disasm::Disassembler d(converter);
+ disasm::Disassembler d(converter,
+ disasm::Disassembler::kContinueOnUnimplementedOpcode);
RelocIterator* it = nullptr;
- if (!converter.code().is_null()) {
- it = new RelocIterator(converter.code());
+ if (!code.is_null()) {
+ it = new RelocIterator(code);
} else {
// No relocation information when printing code stubs.
}
@@ -257,7 +307,7 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
// Print all the reloc info for this instruction which are not comments.
for (size_t i = 0; i < pcs.size(); i++) {
// Put together the reloc info
- const CodeReference& host = converter.code();
+ const CodeReference& host = code;
Address constant_pool =
host.is_null() ? kNullAddress : host.constant_pool();
RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], nullptr, constant_pool);
@@ -270,13 +320,13 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
// If this is a constant pool load and we haven't found any RelocInfo
// already, check if we can find some RelocInfo for the target address in
// the constant pool.
- if (pcs.empty() && !converter.code().is_null()) {
+ if (pcs.empty() && !code.is_null()) {
RelocInfo dummy_rinfo(reinterpret_cast<Address>(prev_pc), RelocInfo::NONE,
0, nullptr);
if (dummy_rinfo.IsInConstantPool()) {
Address constant_pool_entry_address =
dummy_rinfo.constant_pool_entry_address();
- RelocIterator reloc_it(converter.code());
+ RelocIterator reloc_it(code);
while (!reloc_it.done()) {
if (reloc_it.rinfo()->IsInConstantPool() &&
(reloc_it.rinfo()->constant_pool_entry_address() ==
@@ -314,16 +364,19 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
int Disassembler::Decode(Isolate* isolate, std::ostream* os, byte* begin,
byte* end, CodeReference code, Address current_pc) {
V8NameConverter v8NameConverter(isolate, code);
+ bool decode_off_heap = isolate && InstructionStream::PcIsOffHeap(
+ isolate, bit_cast<Address>(begin));
+ CodeReference code_ref = decode_off_heap ? CodeReference() : code;
if (isolate) {
// We have an isolate, so support external reference names.
SealHandleScope shs(isolate);
DisallowHeapAllocation no_alloc;
ExternalReferenceEncoder ref_encoder(isolate);
- return DecodeIt(isolate, &ref_encoder, os, v8NameConverter, begin, end,
- current_pc);
+ return DecodeIt(isolate, &ref_encoder, os, code_ref, v8NameConverter, begin,
+ end, current_pc);
} else {
// No isolate => isolate-independent code. No external reference names.
- return DecodeIt(nullptr, nullptr, os, v8NameConverter, begin, end,
+ return DecodeIt(nullptr, nullptr, os, code_ref, v8NameConverter, begin, end,
current_pc);
}
}
diff --git a/deps/v8/src/disassembler.h b/deps/v8/src/disassembler.h
index deb98ca140..5315d5598f 100644
--- a/deps/v8/src/disassembler.h
+++ b/deps/v8/src/disassembler.h
@@ -16,6 +16,8 @@ class Disassembler : public AllStatic {
// Decode instructions in the the interval [begin, end) and print the
// code into os. Returns the number of bytes disassembled or 1 if no
// instruction could be decoded.
+ // Does not abort on unimplemented opcodes, but prints them as 'Unimplemented
+ // Instruction'.
// the code object is used for name resolution and may be null.
// TODO(titzer): accept a {WasmCodeManager*} if {isolate} is null
static int Decode(Isolate* isolate, std::ostream* os, byte* begin, byte* end,
diff --git a/deps/v8/src/double.h b/deps/v8/src/double.h
index b0a2ecf05e..65964045e2 100644
--- a/deps/v8/src/double.h
+++ b/deps/v8/src/double.h
@@ -5,6 +5,7 @@
#ifndef V8_DOUBLE_H_
#define V8_DOUBLE_H_
+#include "src/base/macros.h"
#include "src/diy-fp.h"
namespace v8 {
@@ -17,13 +18,16 @@ inline double uint64_to_double(uint64_t d64) { return bit_cast<double>(d64); }
// Helper functions for doubles.
class Double {
public:
- static const uint64_t kSignMask = V8_2PART_UINT64_C(0x80000000, 00000000);
- static const uint64_t kExponentMask = V8_2PART_UINT64_C(0x7FF00000, 00000000);
- static const uint64_t kSignificandMask =
+ static constexpr uint64_t kSignMask = V8_2PART_UINT64_C(0x80000000, 00000000);
+ static constexpr uint64_t kExponentMask =
+ V8_2PART_UINT64_C(0x7FF00000, 00000000);
+ static constexpr uint64_t kSignificandMask =
V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
- static const uint64_t kHiddenBit = V8_2PART_UINT64_C(0x00100000, 00000000);
- static const int kPhysicalSignificandSize = 52; // Excludes the hidden bit.
- static const int kSignificandSize = 53;
+ static constexpr uint64_t kHiddenBit =
+ V8_2PART_UINT64_C(0x00100000, 00000000);
+ static constexpr int kPhysicalSignificandSize =
+ 52; // Excludes the hidden bit.
+ static constexpr int kSignificandSize = 53;
Double() : d64_(0) {}
explicit Double(double d) : d64_(double_to_uint64(d)) {}
@@ -169,10 +173,10 @@ class Double {
}
private:
- static const int kExponentBias = 0x3FF + kPhysicalSignificandSize;
- static const int kDenormalExponent = -kExponentBias + 1;
- static const int kMaxExponent = 0x7FF - kExponentBias;
- static const uint64_t kInfinity = V8_2PART_UINT64_C(0x7FF00000, 00000000);
+ static constexpr int kExponentBias = 0x3FF + kPhysicalSignificandSize;
+ static constexpr int kDenormalExponent = -kExponentBias + 1;
+ static constexpr int kMaxExponent = 0x7FF - kExponentBias;
+ static constexpr uint64_t kInfinity = V8_2PART_UINT64_C(0x7FF00000, 00000000);
// The field d64_ is not marked as const to permit the usage of the copy
// constructor.
diff --git a/deps/v8/src/elements-inl.h b/deps/v8/src/elements-inl.h
new file mode 100644
index 0000000000..68099d2f2c
--- /dev/null
+++ b/deps/v8/src/elements-inl.h
@@ -0,0 +1,38 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ELEMENTS_INL_H_
+#define V8_ELEMENTS_INL_H_
+
+#include "src/elements.h"
+
+#include "src/handles-inl.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+inline void ElementsAccessor::CollectElementIndices(Handle<JSObject> object,
+ KeyAccumulator* keys) {
+ CollectElementIndices(object, handle(object->elements(), keys->isolate()),
+ keys);
+}
+
+inline MaybeHandle<FixedArray> ElementsAccessor::PrependElementIndices(
+ Handle<JSObject> object, Handle<FixedArray> keys, GetKeysConversion convert,
+ PropertyFilter filter) {
+ return PrependElementIndices(object,
+ handle(object->elements(), object->GetIsolate()),
+ keys, convert, filter);
+}
+
+inline bool ElementsAccessor::HasElement(JSObject* holder, uint32_t index,
+ PropertyFilter filter) {
+ return HasElement(holder, index, holder->elements(), filter);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ELEMENTS_INL_H_
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc
index 95bfc2e93d..eedf74e49c 100644
--- a/deps/v8/src/elements-kind.cc
+++ b/deps/v8/src/elements-kind.cc
@@ -4,7 +4,6 @@
#include "src/elements-kind.h"
-#include "src/api.h"
#include "src/base/lazy-instance.h"
#include "src/elements.h"
#include "src/objects-inl.h"
@@ -131,31 +130,27 @@ static inline bool IsFastTransitionTarget(ElementsKind elements_kind) {
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind) {
- if (IsFixedTypedArrayElementsKind(from_kind) ||
- IsFixedTypedArrayElementsKind(to_kind)) {
- return false;
- }
- if (IsFastElementsKind(from_kind) && IsFastTransitionTarget(to_kind)) {
- switch (from_kind) {
- case PACKED_SMI_ELEMENTS:
- return to_kind != PACKED_SMI_ELEMENTS;
- case HOLEY_SMI_ELEMENTS:
- return to_kind != PACKED_SMI_ELEMENTS && to_kind != HOLEY_SMI_ELEMENTS;
- case PACKED_DOUBLE_ELEMENTS:
- return to_kind != PACKED_SMI_ELEMENTS &&
- to_kind != HOLEY_SMI_ELEMENTS &&
- to_kind != PACKED_DOUBLE_ELEMENTS;
- case HOLEY_DOUBLE_ELEMENTS:
- return to_kind == PACKED_ELEMENTS || to_kind == HOLEY_ELEMENTS;
- case PACKED_ELEMENTS:
- return to_kind == HOLEY_ELEMENTS;
- case HOLEY_ELEMENTS:
- return false;
- default:
- return false;
- }
+ if (!IsFastElementsKind(from_kind)) return false;
+ if (!IsFastTransitionTarget(to_kind)) return false;
+ DCHECK(!IsFixedTypedArrayElementsKind(from_kind));
+ DCHECK(!IsFixedTypedArrayElementsKind(to_kind));
+ switch (from_kind) {
+ case PACKED_SMI_ELEMENTS:
+ return to_kind != PACKED_SMI_ELEMENTS;
+ case HOLEY_SMI_ELEMENTS:
+ return to_kind != PACKED_SMI_ELEMENTS && to_kind != HOLEY_SMI_ELEMENTS;
+ case PACKED_DOUBLE_ELEMENTS:
+ return to_kind != PACKED_SMI_ELEMENTS && to_kind != HOLEY_SMI_ELEMENTS &&
+ to_kind != PACKED_DOUBLE_ELEMENTS;
+ case HOLEY_DOUBLE_ELEMENTS:
+ return to_kind == PACKED_ELEMENTS || to_kind == HOLEY_ELEMENTS;
+ case PACKED_ELEMENTS:
+ return to_kind == HOLEY_ELEMENTS;
+ case HOLEY_ELEMENTS:
+ return false;
+ default:
+ return false;
}
- return false;
}
bool UnionElementsKindUptoSize(ElementsKind* a_out, ElementsKind b) {
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index 88c5350b4d..e5d55c246a 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -11,6 +11,20 @@
namespace v8 {
namespace internal {
+// V has parameters (Type, type, TYPE, C type)
+#define TYPED_ARRAYS(V) \
+ V(Uint8, uint8, UINT8, uint8_t) \
+ V(Int8, int8, INT8, int8_t) \
+ V(Uint16, uint16, UINT16, uint16_t) \
+ V(Int16, int16, INT16, int16_t) \
+ V(Uint32, uint32, UINT32, uint32_t) \
+ V(Int32, int32, INT32, int32_t) \
+ V(Float32, float32, FLOAT32, float) \
+ V(Float64, float64, FLOAT64, double) \
+ V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t) \
+ V(BigUint64, biguint64, BIGUINT64, uint64_t) \
+ V(BigInt64, bigint64, BIGINT64, int64_t)
+
enum ElementsKind {
// The "fast" kind for elements that only contain SMI values. Must be first
// to make it possible to efficiently check maps for this kind.
@@ -39,18 +53,10 @@ enum ElementsKind {
FAST_STRING_WRAPPER_ELEMENTS,
SLOW_STRING_WRAPPER_ELEMENTS,
- // Fixed typed arrays.
- UINT8_ELEMENTS,
- INT8_ELEMENTS,
- UINT16_ELEMENTS,
- INT16_ELEMENTS,
- UINT32_ELEMENTS,
- INT32_ELEMENTS,
- FLOAT32_ELEMENTS,
- FLOAT64_ELEMENTS,
- UINT8_CLAMPED_ELEMENTS,
- BIGUINT64_ELEMENTS,
- BIGINT64_ELEMENTS,
+// Fixed typed arrays.
+#define TYPED_ARRAY_ELEMENTS_KIND(Type, type, TYPE, ctype) TYPE##_ELEMENTS,
+ TYPED_ARRAYS(TYPED_ARRAY_ELEMENTS_KIND)
+#undef TYPED_ARRAY_ELEMENTS_KIND
// Sentinel ElementsKind for objects with no elements.
NO_ELEMENTS,
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 575ccde2a7..6c4222385c 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -8,11 +8,14 @@
#include "src/conversions.h"
#include "src/frames.h"
#include "src/heap/factory.h"
+#include "src/heap/heap-write-barrier-inl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/utils.h"
// Each concrete ElementsAccessor can handle exactly one ElementsKind,
@@ -600,13 +603,12 @@ class ElementsAccessorBase : public InternalElementsAccessor {
}
static void TryTransitionResultArrayToPacked(Handle<JSArray> array) {
- if (!IsHoleyOrDictionaryElementsKind(kind())) return;
+ if (!IsHoleyElementsKind(kind())) return;
Handle<FixedArrayBase> backing_store(array->elements(),
array->GetIsolate());
int length = Smi::ToInt(array->length());
- if (!Subclass::IsPackedImpl(*array, *backing_store, 0, length)) {
- return;
- }
+ if (!Subclass::IsPackedImpl(*array, *backing_store, 0, length)) return;
+
ElementsKind packed_kind = GetPackedElementsKind(kind());
Handle<Map> new_map =
JSObject::GetElementsTransitionMap(array, packed_kind);
@@ -936,8 +938,9 @@ class ElementsAccessorBase : public InternalElementsAccessor {
Handle<FixedArrayBase> elements =
ConvertElementsWithCapacity(object, old_elements, from_kind, capacity);
- if (IsHoleyOrDictionaryElementsKind(from_kind))
+ if (IsHoleyElementsKind(from_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
+ }
Handle<Map> new_map = JSObject::GetElementsTransitionMap(object, to_kind);
JSObject::SetMapAndElements(object, new_map, elements);
@@ -1170,11 +1173,15 @@ class ElementsAccessorBase : public InternalElementsAccessor {
PropertyFilter filter, Handle<FixedArray> list, uint32_t* nof_indices,
uint32_t insertion_index = 0) {
uint32_t length = Subclass::GetMaxIndex(*object, *backing_store);
+ uint32_t const kMaxStringTableEntries =
+ isolate->heap()->MaxNumberToStringCacheSize();
for (uint32_t i = 0; i < length; i++) {
if (Subclass::HasElementImpl(isolate, *object, i, *backing_store,
filter)) {
if (convert == GetKeysConversion::kConvertToString) {
- Handle<String> index_string = isolate->factory()->Uint32ToString(i);
+ bool use_cache = i < kMaxStringTableEntries;
+ Handle<String> index_string =
+ isolate->factory()->Uint32ToString(i, use_cache);
list->set(insertion_index, *index_string);
} else {
list->set(insertion_index, Smi::FromInt(i), SKIP_WRITE_BARRIER);
@@ -1285,15 +1292,14 @@ class ElementsAccessorBase : public InternalElementsAccessor {
return Subclass::GetCapacityImpl(holder, backing_store);
}
- static Object* FillImpl(Isolate* isolate, Handle<JSObject> receiver,
- Handle<Object> obj_value, uint32_t start,
- uint32_t end) {
+ static Object* FillImpl(Handle<JSObject> receiver, Handle<Object> obj_value,
+ uint32_t start, uint32_t end) {
UNREACHABLE();
}
- Object* Fill(Isolate* isolate, Handle<JSObject> receiver,
- Handle<Object> obj_value, uint32_t start, uint32_t end) {
- return Subclass::FillImpl(isolate, receiver, obj_value, start, end);
+ Object* Fill(Handle<JSObject> receiver, Handle<Object> obj_value,
+ uint32_t start, uint32_t end) {
+ return Subclass::FillImpl(receiver, obj_value, start, end);
}
static Maybe<bool> IncludesValueImpl(Isolate* isolate,
@@ -1324,17 +1330,16 @@ class ElementsAccessorBase : public InternalElementsAccessor {
length);
}
- static Maybe<int64_t> LastIndexOfValueImpl(Isolate* isolate,
- Handle<JSObject> receiver,
+ static Maybe<int64_t> LastIndexOfValueImpl(Handle<JSObject> receiver,
Handle<Object> value,
uint32_t start_from) {
UNREACHABLE();
}
- Maybe<int64_t> LastIndexOfValue(Isolate* isolate, Handle<JSObject> receiver,
+ Maybe<int64_t> LastIndexOfValue(Handle<JSObject> receiver,
Handle<Object> value,
uint32_t start_from) final {
- return Subclass::LastIndexOfValueImpl(isolate, receiver, value, start_from);
+ return Subclass::LastIndexOfValueImpl(receiver, value, start_from);
}
static void ReverseImpl(JSObject* receiver) { UNREACHABLE(); }
@@ -1349,8 +1354,9 @@ class ElementsAccessorBase : public InternalElementsAccessor {
static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder,
FixedArrayBase* backing_store,
uint32_t index, PropertyFilter filter) {
+ DCHECK(IsFastElementsKind(kind()));
uint32_t length = Subclass::GetMaxIndex(holder, backing_store);
- if (IsHoleyOrDictionaryElementsKind(kind())) {
+ if (IsHoleyElementsKind(kind())) {
return index < length &&
!BackingStore::cast(backing_store)
->is_the_hole(isolate, index)
@@ -1960,7 +1966,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
int j = 0;
int max_number_key = -1;
for (int i = 0; j < capacity; i++) {
- if (IsHoleyOrDictionaryElementsKind(kind)) {
+ if (IsHoleyElementsKind(kind)) {
if (BackingStore::cast(*store)->is_the_hole(isolate, i)) continue;
}
max_number_key = i;
@@ -2305,6 +2311,32 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
}
+ static Object* FillImpl(Handle<JSObject> receiver, Handle<Object> obj_value,
+ uint32_t start, uint32_t end) {
+ // Ensure indexes are within array bounds
+ DCHECK_LE(0, start);
+ DCHECK_LE(start, end);
+
+ // Make sure COW arrays are copied.
+ if (IsSmiOrObjectElementsKind(Subclass::kind())) {
+ JSObject::EnsureWritableFastElements(receiver);
+ }
+
+ // Make sure we have enough space.
+ uint32_t capacity =
+ Subclass::GetCapacityImpl(*receiver, receiver->elements());
+ if (end > capacity) {
+ Subclass::GrowCapacityAndConvertImpl(receiver, end);
+ CHECK_EQ(Subclass::kind(), receiver->GetElementsKind());
+ }
+ DCHECK_LE(end, Subclass::GetCapacityImpl(*receiver, receiver->elements()));
+
+ for (uint32_t index = start; index < end; ++index) {
+ Subclass::SetImpl(receiver, index, *obj_value);
+ }
+ return *receiver;
+ }
+
static Maybe<bool> IncludesValueImpl(Isolate* isolate,
Handle<JSObject> receiver,
Handle<Object> search_value,
@@ -2536,7 +2568,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
Subclass::SetLengthImpl(isolate, receiver, new_length, backing_store);
- if (IsHoleyOrDictionaryElementsKind(kind) && result->IsTheHole(isolate)) {
+ if (IsHoleyElementsKind(kind) && result->IsTheHole(isolate)) {
return isolate->factory()->undefined_value();
}
return result;
@@ -2656,8 +2688,8 @@ class FastSmiOrObjectElementsAccessor
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
// This function is currently only used for JSArrays with non-zero
// length.
@@ -2828,8 +2860,8 @@ class FastDoubleElementsAccessor
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
case NO_ELEMENTS:
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
// This function is currently only used for JSArrays with non-zero
// length.
@@ -2945,7 +2977,7 @@ class TypedElementsAccessor
static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* backing_store,
uint32_t entry) {
- return BackingStore::get(BackingStore::cast(backing_store), entry);
+ return BackingStore::get(isolate, BackingStore::cast(backing_store), entry);
}
static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
@@ -3041,9 +3073,8 @@ class TypedElementsAccessor
return Just(true);
}
- static Object* FillImpl(Isolate* isolate, Handle<JSObject> receiver,
- Handle<Object> obj_value, uint32_t start,
- uint32_t end) {
+ static Object* FillImpl(Handle<JSObject> receiver, Handle<Object> obj_value,
+ uint32_t start, uint32_t end) {
Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(receiver);
DCHECK(!array->WasNeutered());
DCHECK(obj_value->IsNumeric());
@@ -3051,9 +3082,9 @@ class TypedElementsAccessor
ctype value = BackingStore::FromHandle(obj_value);
// Ensure indexes are within array bounds
- DCHECK_LE(0, start);
- DCHECK_LE(start, end);
- DCHECK_LE(end, array->length_value());
+ CHECK_LE(0, start);
+ CHECK_LE(start, end);
+ CHECK_LE(end, array->length_value());
DisallowHeapAllocation no_gc;
BackingStore* elements = BackingStore::cast(receiver->elements());
@@ -3175,8 +3206,7 @@ class TypedElementsAccessor
return Just<int64_t>(-1);
}
- static Maybe<int64_t> LastIndexOfValueImpl(Isolate* isolate,
- Handle<JSObject> receiver,
+ static Maybe<int64_t> LastIndexOfValueImpl(Handle<JSObject> receiver,
Handle<Object> value,
uint32_t start_from) {
DisallowHeapAllocation no_gc;
@@ -3285,7 +3315,7 @@ class TypedElementsAccessor
}
switch (source->GetElementsKind()) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
case TYPE##_ELEMENTS: \
CopyBetweenBackingStores<Type##ArrayTraits>(source_data, dest_elements, \
count, 0); \
@@ -3371,7 +3401,7 @@ class TypedElementsAccessor
}
switch (source->GetElementsKind()) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
case TYPE##_ELEMENTS: \
CopyBetweenBackingStores<Type##ArrayTraits>( \
source_data, destination_elements, length, offset); \
@@ -3590,8 +3620,8 @@ class TypedElementsAccessor
}
};
-#define FIXED_ELEMENTS_ACCESSOR(Type, type, TYPE, ctype, size) \
- typedef TypedElementsAccessor<TYPE##_ELEMENTS, ctype> \
+#define FIXED_ELEMENTS_ACCESSOR(Type, type, TYPE, ctype) \
+ typedef TypedElementsAccessor<TYPE##_ELEMENTS, ctype> \
Fixed##Type##ElementsAccessor;
TYPED_ARRAYS(FIXED_ELEMENTS_ACCESSOR)
@@ -3607,8 +3637,7 @@ class SloppyArgumentsElementsAccessor
}
static void ConvertArgumentsStoreResult(
- Isolate* isolate, Handle<SloppyArgumentsElements> elements,
- Handle<Object> result) {
+ Handle<SloppyArgumentsElements> elements, Handle<Object> result) {
UNREACHABLE();
}
@@ -4545,7 +4574,7 @@ void CopyFastNumberJSArrayElementsToTypedArray(Context* context,
DCHECK(destination->IsJSTypedArray());
switch (destination->GetElementsKind()) {
-#define TYPED_ARRAYS_CASE(Type, type, TYPE, ctype, size) \
+#define TYPED_ARRAYS_CASE(Type, type, TYPE, ctype) \
case TYPE##_ELEMENTS: \
CHECK(Fixed##Type##ElementsAccessor::TryCopyElementsFastNumber( \
context, source, destination, length, static_cast<uint32_t>(offset))); \
@@ -4561,7 +4590,7 @@ void CopyTypedArrayElementsToTypedArray(JSTypedArray* source,
JSTypedArray* destination,
uintptr_t length, uintptr_t offset) {
switch (destination->GetElementsKind()) {
-#define TYPED_ARRAYS_CASE(Type, type, TYPE, ctype, size) \
+#define TYPED_ARRAYS_CASE(Type, type, TYPE, ctype) \
case TYPE##_ELEMENTS: \
Fixed##Type##ElementsAccessor::CopyElementsFromTypedArray( \
source, destination, length, static_cast<uint32_t>(offset)); \
@@ -4655,5 +4684,7 @@ Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate, Arguments* args,
}
ElementsAccessor** ElementsAccessor::elements_accessors_ = nullptr;
+
+#undef ELEMENTS_LIST
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index a7dab7daa5..b0aa911f32 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -48,9 +48,7 @@ class ElementsAccessor {
PropertyFilter filter = ALL_PROPERTIES) = 0;
inline bool HasElement(JSObject* holder, uint32_t index,
- PropertyFilter filter = ALL_PROPERTIES) {
- return HasElement(holder, index, holder->elements(), filter);
- }
+ PropertyFilter filter = ALL_PROPERTIES);
// Note: this is currently not implemented for string wrapper and
// typed array elements.
@@ -88,10 +86,7 @@ class ElementsAccessor {
KeyAccumulator* keys) = 0;
inline void CollectElementIndices(Handle<JSObject> object,
- KeyAccumulator* keys) {
- CollectElementIndices(object, handle(object->elements(), keys->isolate()),
- keys);
- }
+ KeyAccumulator* keys);
virtual Maybe<bool> CollectValuesOrEntries(
Isolate* isolate, Handle<JSObject> object,
@@ -105,11 +100,7 @@ class ElementsAccessor {
inline MaybeHandle<FixedArray> PrependElementIndices(
Handle<JSObject> object, Handle<FixedArray> keys,
- GetKeysConversion convert, PropertyFilter filter = ALL_PROPERTIES) {
- return PrependElementIndices(
- object, handle(object->elements(), object->GetIsolate()), keys, convert,
- filter);
- }
+ GetKeysConversion convert, PropertyFilter filter = ALL_PROPERTIES);
virtual void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
KeyAccumulator* accumulator,
@@ -157,9 +148,8 @@ class ElementsAccessor {
virtual uint32_t GetCapacity(JSObject* holder,
FixedArrayBase* backing_store) = 0;
- virtual Object* Fill(Isolate* isolate, Handle<JSObject> receiver,
- Handle<Object> obj_value, uint32_t start,
- uint32_t end) = 0;
+ virtual Object* Fill(Handle<JSObject> receiver, Handle<Object> obj_value,
+ uint32_t start, uint32_t end) = 0;
// Check an Object's own elements for an element (using SameValueZero
// semantics)
@@ -174,8 +164,7 @@ class ElementsAccessor {
Handle<Object> value, uint32_t start,
uint32_t length) = 0;
- virtual Maybe<int64_t> LastIndexOfValue(Isolate* isolate,
- Handle<JSObject> receiver,
+ virtual Maybe<int64_t> LastIndexOfValue(Handle<JSObject> receiver,
Handle<Object> value,
uint32_t start) = 0;
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index dc8f4fbb2c..792d20ee58 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -4,7 +4,7 @@
#include "src/execution.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/bootstrapper.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/debug/debug.h"
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index 363c0c593d..de1530ba27 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -4,7 +4,7 @@
#include "src/extensions/externalize-string-extension.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/handles.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
@@ -85,11 +85,12 @@ void ExternalizeStringExtension::Externalize(
}
bool result = false;
Handle<String> string = Utils::OpenHandle(*args[0].As<v8::String>());
- if (string->IsExternalString()) {
+ if (!string->SupportsExternalization()) {
args.GetIsolate()->ThrowException(
v8::String::NewFromUtf8(args.GetIsolate(),
- "externalizeString() can't externalize twice.",
- NewStringType::kNormal).ToLocalChecked());
+ "string does not support externalization.",
+ NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
if (string->IsOneByteRepresentation() && !force_two_byte) {
@@ -97,14 +98,14 @@ void ExternalizeStringExtension::Externalize(
String::WriteToFlat(*string, data, 0, string->length());
SimpleOneByteStringResource* resource = new SimpleOneByteStringResource(
reinterpret_cast<char*>(data), string->length());
- result = string->MakeExternal(resource);
+ result = Utils::ToLocal(string)->MakeExternal(resource);
if (!result) delete resource;
} else {
uc16* data = new uc16[string->length()];
String::WriteToFlat(*string, data, 0, string->length());
SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource(
data, string->length());
- result = string->MakeExternal(resource);
+ result = Utils::ToLocal(string)->MakeExternal(resource);
if (!result) delete resource;
}
if (!result) {
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index e9c4221f9a..4d555e1829 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -30,10 +30,10 @@ void ExternalReferenceTable::Init(Isolate* isolate) {
// kNullAddress is preserved through serialization/deserialization.
Add(kNullAddress, "nullptr", &index);
AddReferences(isolate, &index);
- AddBuiltins(isolate, &index);
- AddRuntimeFunctions(isolate, &index);
+ AddBuiltins(&index);
+ AddRuntimeFunctions(&index);
AddIsolateAddresses(isolate, &index);
- AddAccessors(isolate, &index);
+ AddAccessors(&index);
AddStubCache(isolate, &index);
is_initialized_ = static_cast<uint32_t>(true);
USE(unused_padding_);
@@ -75,7 +75,7 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate, int* index) {
CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount, *index);
}
-void ExternalReferenceTable::AddBuiltins(Isolate* isolate, int* index) {
+void ExternalReferenceTable::AddBuiltins(int* index) {
CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount, *index);
struct CBuiltinEntry {
@@ -97,7 +97,7 @@ void ExternalReferenceTable::AddBuiltins(Isolate* isolate, int* index) {
*index);
}
-void ExternalReferenceTable::AddRuntimeFunctions(Isolate* isolate, int* index) {
+void ExternalReferenceTable::AddRuntimeFunctions(int* index) {
CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
kBuiltinsReferenceCount,
*index);
@@ -146,7 +146,7 @@ void ExternalReferenceTable::AddIsolateAddresses(Isolate* isolate, int* index) {
*index);
}
-void ExternalReferenceTable::AddAccessors(Isolate* isolate, int* index) {
+void ExternalReferenceTable::AddAccessors(int* index) {
CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
kBuiltinsReferenceCount + kRuntimeReferenceCount +
kIsolateAddressReferenceCount,
diff --git a/deps/v8/src/external-reference-table.h b/deps/v8/src/external-reference-table.h
index c880aa8c40..38acac2b7a 100644
--- a/deps/v8/src/external-reference-table.h
+++ b/deps/v8/src/external-reference-table.h
@@ -50,10 +50,21 @@ class ExternalReferenceTable {
static const char* ResolveSymbol(void* address);
+ static constexpr uint32_t EntrySize() {
+ return sizeof(ExternalReferenceEntry);
+ }
+
static constexpr uint32_t OffsetOfEntry(uint32_t i) {
// Used in CodeAssembler::LookupExternalReference.
STATIC_ASSERT(offsetof(ExternalReferenceEntry, address) == 0);
- return i * sizeof(ExternalReferenceEntry);
+ return i * EntrySize();
+ }
+
+ const char* NameFromOffset(uint32_t offset) {
+ DCHECK_EQ(offset % EntrySize(), 0);
+ DCHECK_LT(offset, SizeInBytes());
+ int index = offset / EntrySize();
+ return name(index);
}
static constexpr uint32_t SizeInBytes() {
@@ -78,10 +89,10 @@ class ExternalReferenceTable {
void Add(Address address, const char* name, int* index);
void AddReferences(Isolate* isolate, int* index);
- void AddBuiltins(Isolate* isolate, int* index);
- void AddRuntimeFunctions(Isolate* isolate, int* index);
+ void AddBuiltins(int* index);
+ void AddRuntimeFunctions(int* index);
void AddIsolateAddresses(Isolate* isolate, int* index);
- void AddAccessors(Isolate* isolate, int* index);
+ void AddAccessors(int* index);
void AddStubCache(Isolate* isolate, int* index);
ExternalReferenceEntry refs_[kSize];
diff --git a/deps/v8/src/external-reference.cc b/deps/v8/src/external-reference.cc
index dedf76b6d2..8f6bf22d38 100644
--- a/deps/v8/src/external-reference.cc
+++ b/deps/v8/src/external-reference.cc
@@ -180,7 +180,7 @@ ExternalReference::incremental_marking_record_write_function() {
ExternalReference ExternalReference::store_buffer_overflow_function() {
return ExternalReference(
- Redirect(FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
+ Redirect(Heap::store_buffer_overflow_function_address()));
}
ExternalReference ExternalReference::delete_handle_scope_extensions() {
@@ -364,16 +364,6 @@ ExternalReference ExternalReference::wasm_float64_pow() {
return ExternalReference(Redirect(FUNCTION_ADDR(wasm::float64_pow_wrapper)));
}
-ExternalReference ExternalReference::wasm_set_thread_in_wasm_flag() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(wasm::set_thread_in_wasm_flag)));
-}
-
-ExternalReference ExternalReference::wasm_clear_thread_in_wasm_flag() {
- return ExternalReference(
- Redirect(FUNCTION_ADDR(wasm::clear_thread_in_wasm_flag)));
-}
-
static void f64_mod_wrapper(Address data) {
double dividend = ReadUnalignedValue<double>(data);
double divisor = ReadUnalignedValue<double>(data + sizeof(dividend));
@@ -469,10 +459,18 @@ ExternalReference ExternalReference::address_of_pending_message_obj(
return ExternalReference(isolate->pending_message_obj_address());
}
+ExternalReference ExternalReference::abort_with_reason() {
+ return ExternalReference(Redirect(FUNCTION_ADDR(i::abort_with_reason)));
+}
+
ExternalReference ExternalReference::address_of_min_int() {
return ExternalReference(reinterpret_cast<Address>(&double_min_int_constant));
}
+ExternalReference ExternalReference::address_of_runtime_stats_flag() {
+ return ExternalReference(&FLAG_runtime_stats);
+}
+
ExternalReference ExternalReference::address_of_one_half() {
return ExternalReference(
reinterpret_cast<Address>(&double_one_half_constant));
@@ -931,6 +929,12 @@ ExternalReference ExternalReference::debug_restart_fp_address(
return ExternalReference(isolate->debug()->restart_fp_address());
}
+ExternalReference ExternalReference::wasm_thread_in_wasm_flag_address_address(
+ Isolate* isolate) {
+ return ExternalReference(reinterpret_cast<Address>(
+ &isolate->thread_local_top()->thread_in_wasm_flag_address_));
+}
+
ExternalReference ExternalReference::fixed_typed_array_base_data_offset() {
return ExternalReference(reinterpret_cast<void*>(
FixedTypedArrayBase::kDataOffset - kHeapObjectTag));
@@ -955,5 +959,16 @@ std::ostream& operator<<(std::ostream& os, ExternalReference reference) {
return os;
}
+void abort_with_reason(int reason) {
+ if (IsValidAbortReason(reason)) {
+ const char* message = GetAbortReason(static_cast<AbortReason>(reason));
+ base::OS::PrintError("abort: %s\n", message);
+ } else {
+ base::OS::PrintError("abort: <unknown reason: %d>\n", reason);
+ }
+ base::OS::Abort();
+ UNREACHABLE();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/external-reference.h b/deps/v8/src/external-reference.h
index a22ca0157e..2f77946982 100644
--- a/deps/v8/src/external-reference.h
+++ b/deps/v8/src/external-reference.h
@@ -66,15 +66,19 @@ class StatsCounter;
V(debug_suspended_generator_address, \
"Debug::step_suspended_generator_address()") \
V(debug_restart_fp_address, "Debug::restart_fp_address()") \
+ V(wasm_thread_in_wasm_flag_address_address, \
+ "&Isolate::thread_in_wasm_flag_address") \
EXTERNAL_REFERENCE_LIST_NON_INTERPRETED_REGEXP(V)
#define EXTERNAL_REFERENCE_LIST(V) \
+ V(abort_with_reason, "abort_with_reason") \
V(address_of_double_abs_constant, "double_absolute_constant") \
V(address_of_double_neg_constant, "double_negate_constant") \
V(address_of_float_abs_constant, "float_absolute_constant") \
V(address_of_float_neg_constant, "float_negate_constant") \
V(address_of_min_int, "LDoubleConstant::min_int") \
V(address_of_one_half, "LDoubleConstant::one_half") \
+ V(address_of_runtime_stats_flag, "FLAG_runtime_stats") \
V(address_of_the_hole_nan, "the_hole_nan") \
V(address_of_uint32_bias, "uint32_bias") \
V(bytecode_size_table_address, "Bytecodes::bytecode_size_table_address") \
@@ -137,7 +141,6 @@ class StatsCounter;
V(try_internalize_string_function, "try_internalize_string_function") \
V(wasm_call_trap_callback_for_testing, \
"wasm::call_trap_callback_for_testing") \
- V(wasm_clear_thread_in_wasm_flag, "wasm::clear_thread_in_wasm_flag") \
V(wasm_f32_ceil, "wasm::f32_ceil_wrapper") \
V(wasm_f32_floor, "wasm::f32_floor_wrapper") \
V(wasm_f32_nearest_int, "wasm::f32_nearest_int_wrapper") \
@@ -155,7 +158,6 @@ class StatsCounter;
V(wasm_int64_mod, "wasm::int64_mod") \
V(wasm_int64_to_float32, "wasm::int64_to_float32_wrapper") \
V(wasm_int64_to_float64, "wasm::int64_to_float64_wrapper") \
- V(wasm_set_thread_in_wasm_flag, "wasm::set_thread_in_wasm_flag") \
V(wasm_uint64_div, "wasm::uint64_div") \
V(wasm_uint64_mod, "wasm::uint64_mod") \
V(wasm_uint64_to_float32, "wasm::uint64_to_float32_wrapper") \
@@ -301,6 +303,8 @@ size_t hash_value(ExternalReference);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ExternalReference);
+void abort_with_reason(int reason);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/feedback-vector-inl.h b/deps/v8/src/feedback-vector-inl.h
index c3c0c8c63d..d539eef57b 100644
--- a/deps/v8/src/feedback-vector-inl.h
+++ b/deps/v8/src/feedback-vector-inl.h
@@ -67,6 +67,7 @@ int FeedbackMetadata::GetSlotSize(FeedbackSlotKind kind) {
return 1;
case FeedbackSlotKind::kCall:
+ case FeedbackSlotKind::kCloneObject:
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadGlobalInsideTypeof:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
@@ -166,7 +167,7 @@ void FeedbackVector::set(int index, MaybeObject* value, WriteBarrierMode mode) {
DCHECK_LT(index, this->length());
int offset = kFeedbackSlotsOffset + index * kPointerSize;
RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WEAK_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
+ CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode);
}
void FeedbackVector::Set(FeedbackSlot slot, Object* value,
@@ -337,6 +338,7 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
}
case FeedbackSlotKind::kCreateClosure:
case FeedbackSlotKind::kLiteral:
+ case FeedbackSlotKind::kCloneObject:
break;
case FeedbackSlotKind::kInvalid:
case FeedbackSlotKind::kKindsNumber:
diff --git a/deps/v8/src/feedback-vector.cc b/deps/v8/src/feedback-vector.cc
index ba3b711b1e..90ae08b0ba 100644
--- a/deps/v8/src/feedback-vector.cc
+++ b/deps/v8/src/feedback-vector.cc
@@ -7,6 +7,7 @@
#include "src/feedback-vector-inl.h"
#include "src/ic/ic-inl.h"
#include "src/objects.h"
+#include "src/objects/data-handler-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/object-macros.h"
@@ -170,6 +171,8 @@ const char* FeedbackMetadata::Kind2String(FeedbackSlotKind kind) {
return "ForIn";
case FeedbackSlotKind::kInstanceOf:
return "InstanceOf";
+ case FeedbackSlotKind::kCloneObject:
+ return "CloneObject";
case FeedbackSlotKind::kKindsNumber:
break;
}
@@ -254,6 +257,7 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
vector->set(index, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
extra_value = Smi::kZero;
break;
+ case FeedbackSlotKind::kCloneObject:
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kStoreNamedSloppy:
@@ -373,7 +377,6 @@ void FeedbackVector::AssertNoLegacyTypes(MaybeObject* object) {
// Instead of FixedArray, the Feedback and the Extra should contain
// WeakFixedArrays. The only allowed FixedArray subtype is HashTable.
DCHECK_IMPLIES(heap_object->IsFixedArray(), heap_object->IsHashTable());
- DCHECK(!heap_object->IsWeakCell());
}
#endif
}
@@ -416,6 +419,7 @@ void FeedbackNexus::ConfigureUninitialized() {
SKIP_WRITE_BARRIER);
break;
}
+ case FeedbackSlotKind::kCloneObject:
case FeedbackSlotKind::kCall: {
SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
SKIP_WRITE_BARRIER);
@@ -427,6 +431,14 @@ void FeedbackNexus::ConfigureUninitialized() {
SKIP_WRITE_BARRIER);
break;
}
+ case FeedbackSlotKind::kStoreNamedSloppy:
+ case FeedbackSlotKind::kStoreNamedStrict:
+ case FeedbackSlotKind::kStoreKeyedSloppy:
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreInArrayLiteral:
+ case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kLoadProperty:
+ case FeedbackSlotKind::kLoadKeyed:
case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
SKIP_WRITE_BARRIER);
@@ -467,12 +479,6 @@ bool FeedbackNexus::Clear() {
case FeedbackSlotKind::kStoreOwnNamed:
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadKeyed:
- if (!IsCleared()) {
- ConfigurePremonomorphic();
- feedback_updated = true;
- }
- break;
-
case FeedbackSlotKind::kStoreGlobalSloppy:
case FeedbackSlotKind::kStoreGlobalStrict:
case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
@@ -480,6 +486,7 @@ bool FeedbackNexus::Clear() {
case FeedbackSlotKind::kCall:
case FeedbackSlotKind::kInstanceOf:
case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+ case FeedbackSlotKind::kCloneObject:
if (!IsCleared()) {
ConfigureUninitialized();
feedback_updated = true;
@@ -494,11 +501,24 @@ bool FeedbackNexus::Clear() {
return feedback_updated;
}
-void FeedbackNexus::ConfigurePremonomorphic() {
+void FeedbackNexus::ConfigurePremonomorphic(Handle<Map> receiver_map) {
SetFeedback(*FeedbackVector::PremonomorphicSentinel(GetIsolate()),
SKIP_WRITE_BARRIER);
- SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(GetIsolate()),
- SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(HeapObjectReference::Weak(*receiver_map));
+}
+
+bool FeedbackNexus::ConfigureMegamorphic() {
+ DisallowHeapAllocation no_gc;
+ Isolate* isolate = GetIsolate();
+ MaybeObject* sentinel =
+ MaybeObject::FromObject(*FeedbackVector::MegamorphicSentinel(isolate));
+ if (GetFeedback() != sentinel) {
+ SetFeedback(sentinel, SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(HeapObjectReference::ClearedValue());
+ return true;
+ }
+
+ return false;
}
bool FeedbackNexus::ConfigureMegamorphic(IcCheckType property_type) {
@@ -661,6 +681,23 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
return MONOMORPHIC;
}
+ case FeedbackSlotKind::kCloneObject: {
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::UninitializedSentinel(isolate))) {
+ return UNINITIALIZED;
+ }
+ if (feedback == MaybeObject::FromObject(
+ *FeedbackVector::MegamorphicSentinel(isolate))) {
+ return MEGAMORPHIC;
+ }
+ if (feedback->IsWeakOrClearedHeapObject()) {
+ return MONOMORPHIC;
+ }
+
+ DCHECK(feedback->ToStrongHeapObject()->IsWeakFixedArray());
+ return POLYMORPHIC;
+ }
+
case FeedbackSlotKind::kInvalid:
case FeedbackSlotKind::kKindsNumber:
UNREACHABLE();
@@ -703,6 +740,81 @@ void FeedbackNexus::ConfigureHandlerMode(const MaybeObjectHandle& handler) {
SetFeedbackExtra(*handler);
}
+void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
+ Handle<Map> result_map) {
+ Isolate* isolate = GetIsolate();
+ MaybeObject* maybe_feedback = GetFeedback();
+ Handle<HeapObject> feedback(maybe_feedback->IsStrongOrWeakHeapObject()
+ ? maybe_feedback->GetHeapObject()
+ : nullptr,
+ isolate);
+ switch (ic_state()) {
+ case UNINITIALIZED:
+ // Cache the first map seen which meets the fast case requirements.
+ SetFeedback(HeapObjectReference::Weak(*source_map));
+ SetFeedbackExtra(*result_map);
+ break;
+ case MONOMORPHIC:
+ if (maybe_feedback->IsClearedWeakHeapObject() ||
+ feedback.is_identical_to(source_map) ||
+ Map::cast(*feedback)->is_deprecated()) {
+ // Remain in MONOMORPHIC state if previous feedback has been collected.
+ SetFeedback(HeapObjectReference::Weak(*source_map));
+ SetFeedbackExtra(*result_map);
+ } else {
+ // Transition to POLYMORPHIC.
+ Handle<WeakFixedArray> array =
+ EnsureArrayOfSize(2 * kCloneObjectPolymorphicEntrySize);
+ array->Set(0, maybe_feedback);
+ array->Set(1, GetFeedbackExtra());
+ array->Set(2, HeapObjectReference::Weak(*source_map));
+ array->Set(3, MaybeObject::FromObject(*result_map));
+ SetFeedbackExtra(HeapObjectReference::ClearedValue());
+ }
+ break;
+ case POLYMORPHIC: {
+ static constexpr int kMaxElements =
+ IC::kMaxPolymorphicMapCount * kCloneObjectPolymorphicEntrySize;
+ Handle<WeakFixedArray> array = Handle<WeakFixedArray>::cast(feedback);
+ int i = 0;
+ for (; i < array->length(); i += kCloneObjectPolymorphicEntrySize) {
+ MaybeObject* feedback = array->Get(i);
+ if (feedback->IsClearedWeakHeapObject()) break;
+ Handle<Map> cached_map(Map::cast(feedback->GetHeapObject()), isolate);
+ if (cached_map.is_identical_to(source_map) ||
+ cached_map->is_deprecated())
+ break;
+ }
+
+ if (i >= array->length()) {
+ if (i == kMaxElements) {
+ // Transition to MEGAMORPHIC.
+ MaybeObject* sentinel = MaybeObject::FromObject(
+ *FeedbackVector::MegamorphicSentinel(isolate));
+ SetFeedback(sentinel, SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(HeapObjectReference::ClearedValue());
+ break;
+ }
+
+ // Grow polymorphic feedback array.
+ Handle<WeakFixedArray> new_array = EnsureArrayOfSize(
+ array->length() + kCloneObjectPolymorphicEntrySize);
+ for (int j = 0; j < array->length(); ++j) {
+ new_array->Set(j, array->Get(j));
+ }
+ array = new_array;
+ }
+
+ array->Set(i, HeapObjectReference::Weak(*source_map));
+ array->Set(i + 1, MaybeObject::FromObject(*result_map));
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
int FeedbackNexus::GetCallCount() {
DCHECK(IsCallICKind(kind()));
@@ -823,6 +935,14 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
Map* map = Map::cast(heap_object);
maps->push_back(handle(map, isolate));
return 1;
+ } else if (feedback->ToStrongHeapObject(&heap_object) &&
+ heap_object ==
+ heap_object->GetReadOnlyRoots().premonomorphic_symbol()) {
+ if (GetFeedbackExtra()->ToWeakHeapObject(&heap_object)) {
+ Map* map = Map::cast(heap_object);
+ maps->push_back(handle(map, isolate));
+ return 1;
+ }
}
return 0;
diff --git a/deps/v8/src/feedback-vector.h b/deps/v8/src/feedback-vector.h
index 880e4713d4..3721ffec9a 100644
--- a/deps/v8/src/feedback-vector.h
+++ b/deps/v8/src/feedback-vector.h
@@ -51,6 +51,7 @@ enum class FeedbackSlotKind {
kLiteral,
kForIn,
kInstanceOf,
+ kCloneObject,
kKindsNumber // Last value indicating number of kinds.
};
@@ -107,6 +108,10 @@ inline bool IsTypeProfileKind(FeedbackSlotKind kind) {
return kind == FeedbackSlotKind::kTypeProfile;
}
+inline bool IsCloneObjectKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kCloneObject;
+}
+
inline TypeofMode GetTypeofModeFromSlotKind(FeedbackSlotKind kind) {
DCHECK(IsLoadGlobalICKind(kind));
return (kind == FeedbackSlotKind::kLoadGlobalInsideTypeof)
@@ -398,6 +403,10 @@ class V8_EXPORT_PRIVATE FeedbackVectorSpec {
FeedbackSlot AddTypeProfileSlot();
+ FeedbackSlot AddCloneObjectSlot() {
+ return AddSlot(FeedbackSlotKind::kCloneObject);
+ }
+
#ifdef OBJECT_PRINT
// For gdb debugging.
void Print();
@@ -492,14 +501,6 @@ class FeedbackMetadata : public HeapObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(FeedbackMetadata);
};
-// The following asserts protect an optimization in type feedback vector
-// code that looks into the contents of a slot assuming to find a String,
-// a Symbol, an AllocationSite, a WeakCell, or a FixedArray.
-STATIC_ASSERT(WeakCell::kSize >= 2 * kPointerSize);
-STATIC_ASSERT(WeakCell::kValueOffset ==
- AllocationSite::kTransitionInfoOrBoilerplateOffset);
-STATIC_ASSERT(WeakCell::kValueOffset == FixedArray::kLengthOffset);
-STATIC_ASSERT(WeakCell::kValueOffset == Name::kHashFieldSlot);
// Verify that an empty hash field looks like a tagged object, but can't
// possibly be confused with a pointer.
STATIC_ASSERT((Name::kEmptyHashField & kHeapObjectTag) == kHeapObjectTag);
@@ -601,7 +602,10 @@ class FeedbackNexus final {
// Clear() returns true if the state of the underlying vector was changed.
bool Clear();
void ConfigureUninitialized();
- void ConfigurePremonomorphic();
+ void ConfigurePremonomorphic(Handle<Map> receiver_map);
+ // ConfigureMegamorphic() returns true if the state of the underlying vector
+ // was changed. Extra feedback is cleared if the 0 parameter version is used.
+ bool ConfigureMegamorphic();
bool ConfigureMegamorphic(IcCheckType property_type);
inline MaybeObject* GetFeedback() const;
@@ -654,6 +658,10 @@ class FeedbackNexus final {
int context_slot_index);
void ConfigureHandlerMode(const MaybeObjectHandle& handler);
+ // For CloneObject ICs
+ static constexpr int kCloneObjectPolymorphicEntrySize = 2;
+ void ConfigureCloneObject(Handle<Map> source_map, Handle<Map> result_map);
+
// Bit positions in a smi that encodes lexical environment variable access.
#define LEXICAL_MODE_BIT_FIELDS(V, _) \
V(ContextIndexBits, unsigned, 12, _) \
@@ -676,7 +684,6 @@ class FeedbackNexus final {
std::vector<int> GetSourcePositions() const;
std::vector<Handle<String>> GetTypesForSourcePositions(uint32_t pos) const;
- protected:
inline void SetFeedback(Object* feedback,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline void SetFeedback(MaybeObject* feedback,
diff --git a/deps/v8/src/field-index-inl.h b/deps/v8/src/field-index-inl.h
index 41fddb6e0b..abc3166b56 100644
--- a/deps/v8/src/field-index-inl.h
+++ b/deps/v8/src/field-index-inl.h
@@ -41,32 +41,6 @@ inline FieldIndex FieldIndex::ForPropertyIndex(const Map* map,
first_inobject_offset);
}
-// Takes an index as computed by GetLoadByFieldIndex and reconstructs a
-// FieldIndex object from it.
-inline FieldIndex FieldIndex::ForLoadByFieldIndex(const Map* map,
- int orig_index) {
- int field_index = orig_index;
- bool is_inobject = true;
- int first_inobject_offset = 0;
- Encoding encoding = field_index & 1 ? kDouble : kTagged;
- field_index >>= 1;
- int offset;
- if (field_index < 0) {
- first_inobject_offset = FixedArray::kHeaderSize;
- field_index = -(field_index + 1);
- is_inobject = false;
- offset = FixedArray::kHeaderSize + field_index * kPointerSize;
- } else {
- first_inobject_offset = map->GetInObjectPropertyOffset(0);
- offset = map->GetInObjectPropertyOffset(field_index);
- }
- FieldIndex result(is_inobject, offset, encoding, map->GetInObjectProperties(),
- first_inobject_offset);
- DCHECK_EQ(result.GetLoadByFieldIndex(), orig_index);
- return result;
-}
-
-
// Returns the index format accepted by the HLoadFieldByIndex instruction.
// (In-object: zero-based from (object start + JSObject::kHeaderSize),
// out-of-object: zero-based from FixedArray::kHeaderSize.)
diff --git a/deps/v8/src/field-index.h b/deps/v8/src/field-index.h
index a1552f050e..2135c5ef25 100644
--- a/deps/v8/src/field-index.h
+++ b/deps/v8/src/field-index.h
@@ -29,7 +29,6 @@ class FieldIndex final {
static FieldIndex ForInObjectOffset(int offset, Encoding encoding,
const Map* map = nullptr);
static FieldIndex ForDescriptor(const Map* map, int descriptor_index);
- static FieldIndex ForLoadByFieldIndex(const Map* map, int index);
int GetLoadByFieldIndex() const;
diff --git a/deps/v8/src/field-type.cc b/deps/v8/src/field-type.cc
index 2eebebe3d6..c4d2080f78 100644
--- a/deps/v8/src/field-type.cc
+++ b/deps/v8/src/field-type.cc
@@ -49,14 +49,13 @@ FieldType* FieldType::cast(Object* object) {
bool FieldType::IsClass() { return this->IsMap(); }
-Handle<i::Map> FieldType::AsClass() {
+Map* FieldType::AsClass() {
DCHECK(IsClass());
- i::Map* map = Map::cast(this);
- return handle(map, map->GetIsolate());
+ return Map::cast(this);
}
bool FieldType::NowStable() {
- return !this->IsClass() || this->AsClass()->is_stable();
+ return !this->IsClass() || AsClass()->is_stable();
}
bool FieldType::NowIs(FieldType* other) {
@@ -78,10 +77,16 @@ void FieldType::PrintTo(std::ostream& os) {
os << "None";
} else {
DCHECK(IsClass());
- HandleScope scope(Map::cast(this)->GetIsolate());
- os << "Class(" << static_cast<void*>(*AsClass()) << ")";
+ os << "Class(" << static_cast<void*>(AsClass()) << ")";
}
}
+bool FieldType::NowContains(Object* value) {
+ if (this == Any()) return true;
+ if (this == None()) return false;
+ if (!value->IsHeapObject()) return false;
+ return HeapObject::cast(value)->map() == Map::cast(this);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/field-type.h b/deps/v8/src/field-type.h
index 8eec7a5b58..d66d12c095 100644
--- a/deps/v8/src/field-type.h
+++ b/deps/v8/src/field-type.h
@@ -24,17 +24,12 @@ class FieldType : public Object {
static Handle<FieldType> Class(i::Handle<i::Map> map, Isolate* isolate);
static FieldType* cast(Object* object);
- bool NowContains(Object* value) {
- if (this == Any()) return true;
- if (this == None()) return false;
- if (!value->IsHeapObject()) return false;
- return HeapObject::cast(value)->map() == Map::cast(this);
- }
+ bool NowContains(Object* value);
bool NowContains(Handle<Object> value) { return NowContains(*value); }
bool IsClass();
- Handle<i::Map> AsClass();
+ Map* AsClass();
bool IsNone() { return this == None(); }
bool IsAny() { return this == Any(); }
bool NowStable();
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 4d5aa6f885..69ec7472bb 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -216,9 +216,10 @@ DEFINE_IMPLICATION(harmony_class_fields, harmony_private_fields)
V(harmony_await_optimization, "harmony await taking 1 tick")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_INPROGRESS(V) \
- HARMONY_INPROGRESS_BASE(V) \
- V(harmony_locale, "Intl.Locale") \
+#define HARMONY_INPROGRESS(V) \
+ HARMONY_INPROGRESS_BASE(V) \
+ V(harmony_locale, "Intl.Locale") \
+ V(harmony_intl_list_format, "Intl.ListFormat") \
V(harmony_intl_relative_time_format, "Intl.RelativeTimeFormat")
#else
#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
@@ -230,18 +231,19 @@ DEFINE_IMPLICATION(harmony_class_fields, harmony_private_fields)
V(harmony_private_fields, "harmony private fields in class literals") \
V(harmony_numeric_separator, "harmony numeric separator between digits") \
V(harmony_string_matchall, "harmony String.prototype.matchAll") \
- V(harmony_symbol_description, "harmony Symbol.prototype.description")
+ V(harmony_global, "harmony global")
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING(V) \
- V(harmony_string_trimming, "harmony String.prototype.trim{Start,End}") \
- V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_function_tostring, "harmony Function.prototype.toString") \
- V(harmony_import_meta, "harmony import.meta property") \
- V(harmony_bigint, "harmony arbitrary precision integers") \
- V(harmony_dynamic_import, "harmony dynamic import") \
- V(harmony_array_prototype_values, "harmony Array.prototype.values") \
- V(harmony_array_flat, "harmony Array.prototype.{flat,flatMap}")
+#define HARMONY_SHIPPING(V) \
+ V(harmony_string_trimming, "harmony String.prototype.trim{Start,End}") \
+ V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
+ V(harmony_function_tostring, "harmony Function.prototype.toString") \
+ V(harmony_import_meta, "harmony import.meta property") \
+ V(harmony_bigint, "harmony arbitrary precision integers") \
+ V(harmony_dynamic_import, "harmony dynamic import") \
+ V(harmony_array_prototype_values, "harmony Array.prototype.values") \
+ V(harmony_array_flat, "harmony Array.prototype.{flat,flatMap}") \
+ V(harmony_symbol_description, "harmony Symbol.prototype.description")
// Once a shipping feature has proved stable in the wild, it will be dropped
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -314,6 +316,11 @@ DEFINE_BOOL(optimize_for_size, false,
"Enables optimizations which favor memory size over execution "
"speed")
+// Flag for one shot optimiztions.
+DEFINE_BOOL(enable_one_shot_optimization, true,
+ "Enable size optimizations for the code that will "
+ "only be executed once")
+
DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
// Flags for data representation optimizations
@@ -380,6 +387,11 @@ DEFINE_INT(concurrent_recompilation_delay, 0,
"artificial compilation delay in ms")
DEFINE_BOOL(block_concurrent_recompilation, false,
"block queued jobs until released")
+DEFINE_BOOL(concurrent_compiler_frontend, false,
+ "run optimizing compiler's frontend phases on a separate thread")
+DEFINE_IMPLICATION(future, concurrent_compiler_frontend)
+DEFINE_BOOL(strict_heap_broker, false, "fail on incomplete serialization")
+DEFINE_BOOL(trace_heap_broker, false, "trace the heap broker")
// Flags for stress-testing the compiler.
DEFINE_INT(stress_runs, 0, "number of stress runs")
@@ -429,6 +441,8 @@ DEFINE_BOOL(trace_verify_csa, false, "trace code stubs verification")
DEFINE_STRING(csa_trap_on_node, nullptr,
"trigger break point when a node with given id is created in "
"given stub. The format is: StubName,NodeId")
+DEFINE_BOOL_READONLY(fixed_array_bounds_checks, DEBUG_BOOL,
+ "enable FixedArray bounds checks")
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
DEFINE_BOOL(turbo_stats_nvp, false,
"print TurboFan statistics in machine-readable format")
@@ -533,7 +547,7 @@ DEFINE_BOOL(wasm_async_compilation, true,
DEFINE_BOOL(wasm_test_streaming, false,
"use streaming compilation instead of async compilation for tests")
DEFINE_UINT(wasm_max_mem_pages, v8::internal::wasm::kV8MaxWasmMemoryPages,
- "maximum memory size of a wasm instance")
+ "maximum number of 64KiB memory pages of a wasm instance")
DEFINE_UINT(wasm_max_table_size, v8::internal::wasm::kV8MaxWasmTableSize,
"maximum table size of a wasm instance")
// Enable Liftoff by default on ia32 and x64. More architectures will follow
@@ -586,22 +600,18 @@ DEFINE_DEBUG_BOOL(dump_wasm_module, false, "dump wasm module bytes")
DEFINE_STRING(dump_wasm_module_path, nullptr,
"directory to dump wasm modules to")
-DEFINE_BOOL(experimental_wasm_simd, false,
- "enable prototype simd opcodes for wasm")
-DEFINE_BOOL(experimental_wasm_eh, false,
- "enable prototype exception handling opcodes for wasm")
-DEFINE_BOOL(experimental_wasm_mv, false,
- "enable prototype multi-value support for wasm")
-DEFINE_BOOL(experimental_wasm_threads, false,
- "enable prototype threads for wasm")
-DEFINE_BOOL(experimental_wasm_sat_f2i_conversions, false,
- "enable non-trapping float-to-int conversions for wasm")
-DEFINE_BOOL(experimental_wasm_se, true,
- "enable prototype sign extension opcodes for wasm")
-DEFINE_BOOL(experimental_wasm_anyref, false,
- "enable prototype anyref support for wasm")
-DEFINE_BOOL(experimental_wasm_mut_global, true,
- "enable prototype import/export mutable global support for wasm")
+// Declare command-line flags for WASM features. Warning: avoid using these
+// flags directly in the implementation. Instead accept wasm::WasmFeatures
+// for configurability.
+#include "src/wasm/wasm-feature-flags.h"
+
+#define SPACE
+#define DECL_WASM_FLAG(feat, desc, val) \
+ DEFINE_BOOL(experimental_wasm_##feat, val, \
+ "enable prototype " desc " for wasm")
+FOREACH_WASM_FEATURE_FLAG(DECL_WASM_FLAG, SPACE)
+#undef DECL_WASM_FLAG
+#undef SPACE
DEFINE_BOOL(wasm_opt, false, "enable wasm optimization")
DEFINE_BOOL(wasm_no_bounds_checks, false,
@@ -609,6 +619,12 @@ DEFINE_BOOL(wasm_no_bounds_checks, false,
DEFINE_BOOL(wasm_no_stack_checks, false,
"disable stack checks (performance testing only)")
+DEFINE_BOOL(wasm_shared_engine, true,
+ "shares one wasm engine between all isolates within a process")
+DEFINE_IMPLICATION(future, wasm_shared_engine)
+DEFINE_BOOL(wasm_shared_code, true,
+ "shares code underlying a wasm module when it is transferred")
+DEFINE_IMPLICATION(future, wasm_shared_code)
DEFINE_BOOL(wasm_trap_handler, true,
"use signal handlers to catch out of bounds memory access in wasm"
" (currently Linux x86_64 only)")
@@ -821,11 +837,11 @@ DEFINE_BOOL(enable_popcnt, true,
DEFINE_STRING(arm_arch, ARM_ARCH_DEFAULT,
"generate instructions for the selected ARM architecture if "
"available: armv6, armv7, armv7+sudiv or armv8")
-DEFINE_BOOL(enable_vldr_imm, false,
- "enable use of constant pools for double immediate (ARM only)")
DEFINE_BOOL(force_long_branches, false,
"force all emitted branches to be in long mode (MIPS/PPC only)")
DEFINE_STRING(mcpu, "auto", "enable optimization for specific cpu")
+DEFINE_BOOL(partial_constant_pool, true,
+ "enable use of partial constant pools (X64 only)")
// Deprecated ARM flags (replaced by arm_arch).
DEFINE_MAYBE_BOOL(enable_armv7, "deprecated (use --arm_arch instead)")
@@ -963,7 +979,6 @@ DEFINE_INT(heap_snapshot_string_limit, 1024,
DEFINE_BOOL(sampling_heap_profiler_suppress_randomness, false,
"Use constant sample intervals to eliminate test flakiness")
-
// v8.cc
DEFINE_BOOL(use_idle_notification, true,
"Use idle notification to reduce memory footprint.")
@@ -1069,6 +1084,13 @@ DEFINE_VALUE_IMPLICATION(runtime_call_stats, runtime_stats, 1)
#endif
DEFINE_BOOL_READONLY(embedded_builtins, V8_EMBEDDED_BUILTINS_BOOL,
"Embed builtin code into the binary.")
+// TODO(jgruber,v8:6666): Remove once ia32 has full embedded builtin support.
+DEFINE_BOOL_READONLY(
+ ia32_verify_root_register, false,
+ "Check that the value of the root register was not clobbered.")
+// TODO(jgruber,v8:6666): Remove once ia32 has full embedded builtin support.
+DEFINE_BOOL(print_embedded_builtin_candidates, false,
+ "Prints builtins that are not yet embedded but could be.")
DEFINE_BOOL(lazy_deserialization, true,
"Deserialize code lazily from the snapshot.")
DEFINE_BOOL(lazy_handler_deserialization, true,
@@ -1130,6 +1152,8 @@ DEFINE_BOOL(use_external_strings, false, "Use external strings for source code")
DEFINE_STRING(map_counters, "", "Map counters to a file")
DEFINE_ARGS(js_arguments,
"Pass all remaining arguments to the script. Alias for \"--\".")
+DEFINE_BOOL(mock_arraybuffer_allocator, false,
+ "Use a mock ArrayBuffer allocator for testing.")
//
// GDB JIT integration flags.
@@ -1243,7 +1267,7 @@ DEFINE_BOOL(log_function_events, false,
DEFINE_BOOL(prof, false,
"Log statistical profiling information (implies --log-code).")
-DEFINE_BOOL(detailed_line_info, true,
+DEFINE_BOOL(detailed_line_info, false,
"Always generate detailed line information for CPU profiling.")
#if defined(ANDROID)
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index a64404c8ed..419adbf73e 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -21,7 +21,7 @@ inline Address StackHandler::address() const {
inline StackHandler* StackHandler::next() const {
const int offset = StackHandlerConstants::kNextOffset;
- return FromAddress(Memory::Address_at(address() + offset));
+ return FromAddress(Memory<Address>(address() + offset));
}
@@ -82,19 +82,19 @@ inline Object* BuiltinExitFrame::receiver_slot_object() const {
const int receiverOffset =
BuiltinExitFrameConstants::kNewTargetOffset + (argc - 1) * kPointerSize;
- return Memory::Object_at(fp() + receiverOffset);
+ return Memory<Object*>(fp() + receiverOffset);
}
inline Object* BuiltinExitFrame::argc_slot_object() const {
- return Memory::Object_at(fp() + BuiltinExitFrameConstants::kArgcOffset);
+ return Memory<Object*>(fp() + BuiltinExitFrameConstants::kArgcOffset);
}
inline Object* BuiltinExitFrame::target_slot_object() const {
- return Memory::Object_at(fp() + BuiltinExitFrameConstants::kTargetOffset);
+ return Memory<Object*>(fp() + BuiltinExitFrameConstants::kTargetOffset);
}
inline Object* BuiltinExitFrame::new_target_slot_object() const {
- return Memory::Object_at(fp() + BuiltinExitFrameConstants::kNewTargetOffset);
+ return Memory<Object*>(fp() + BuiltinExitFrameConstants::kNewTargetOffset);
}
inline StandardFrame::StandardFrame(StackFrameIteratorBase* iterator)
@@ -103,22 +103,22 @@ inline StandardFrame::StandardFrame(StackFrameIteratorBase* iterator)
inline Object* StandardFrame::GetExpression(int index) const {
- return Memory::Object_at(GetExpressionAddress(index));
+ return Memory<Object*>(GetExpressionAddress(index));
}
inline void StandardFrame::SetExpression(int index, Object* value) {
- Memory::Object_at(GetExpressionAddress(index)) = value;
+ Memory<Object*>(GetExpressionAddress(index)) = value;
}
inline Address StandardFrame::caller_fp() const {
- return Memory::Address_at(fp() + StandardFrameConstants::kCallerFPOffset);
+ return Memory<Address>(fp() + StandardFrameConstants::kCallerFPOffset);
}
inline Address StandardFrame::caller_pc() const {
- return Memory::Address_at(ComputePCAddress(fp()));
+ return Memory<Address>(ComputePCAddress(fp()));
}
@@ -134,14 +134,14 @@ inline Address StandardFrame::ComputeConstantPoolAddress(Address fp) {
inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
intptr_t frame_type =
- Memory::intptr_at(fp + TypedFrameConstants::kFrameTypeOffset);
+ Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
return frame_type == StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR);
}
inline bool StandardFrame::IsConstructFrame(Address fp) {
intptr_t frame_type =
- Memory::intptr_at(fp + TypedFrameConstants::kFrameTypeOffset);
+ Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
return frame_type == StackFrame::TypeToMarker(StackFrame::CONSTRUCT);
}
@@ -158,7 +158,7 @@ Address JavaScriptFrame::GetParameterSlot(int index) const {
}
inline void JavaScriptFrame::set_receiver(Object* value) {
- Memory::Object_at(GetParameterSlot(-1)) = value;
+ Memory<Object*>(GetParameterSlot(-1)) = value;
}
@@ -169,7 +169,7 @@ inline bool JavaScriptFrame::has_adapted_arguments() const {
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
+ return Memory<Object*>(fp() + offset);
}
inline StubFrame::StubFrame(StackFrameIteratorBase* iterator)
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 96930e6854..e5751e2bb7 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -183,12 +183,12 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc,
interpreter_bytecode_dispatch->contains(pc)) {
return true;
} else if (FLAG_interpreted_frames_native_stack) {
- intptr_t marker = Memory::intptr_at(
+ intptr_t marker = Memory<intptr_t>(
state->fp + CommonFrameConstants::kContextOrFrameTypeOffset);
MSAN_MEMORY_IS_INITIALIZED(
state->fp + StandardFrameConstants::kFunctionOffset, kPointerSize);
Object* maybe_function =
- Memory::Object_at(state->fp + StandardFrameConstants::kFunctionOffset);
+ Memory<Object*>(state->fp + StandardFrameConstants::kFunctionOffset);
// There's no need to run a full ContainsSlow if we know the frame can't be
// an InterpretedFrame, so we do these fast checks first
if (StackFrame::IsTypeMarker(marker) || maybe_function->IsSmi()) {
@@ -205,7 +205,7 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc,
}
DISABLE_ASAN Address ReadMemoryAt(Address address) {
- return Memory::Address_at(address);
+ return Memory<Address>(address);
}
} // namespace
@@ -323,8 +323,8 @@ bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
// See EntryFrame::GetCallerState. It computes the caller FP address
// and calls ExitFrame::GetStateForFramePointer on it. We need to be
// sure that caller FP address is valid.
- Address caller_fp = Memory::Address_at(
- frame->fp() + EntryFrameConstants::kCallerFPOffset);
+ Address caller_fp =
+ Memory<Address>(frame->fp() + EntryFrameConstants::kCallerFPOffset);
if (!IsValidExitFrame(caller_fp)) return false;
} else if (frame->is_arguments_adaptor()) {
// See ArgumentsAdaptorFrame::GetCallerStackPointer. It assumes that
@@ -430,7 +430,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
MSAN_MEMORY_IS_INITIALIZED(
state->fp + CommonFrameConstants::kContextOrFrameTypeOffset,
kPointerSize);
- intptr_t marker = Memory::intptr_at(
+ intptr_t marker = Memory<intptr_t>(
state->fp + CommonFrameConstants::kContextOrFrameTypeOffset);
if (!iterator->can_access_heap_objects_) {
// TODO(titzer): "can_access_heap_objects" is kind of bogus. It really
@@ -441,7 +441,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
MSAN_MEMORY_IS_INITIALIZED(
state->fp + StandardFrameConstants::kFunctionOffset, kPointerSize);
Object* maybe_function =
- Memory::Object_at(state->fp + StandardFrameConstants::kFunctionOffset);
+ Memory<Object*>(state->fp + StandardFrameConstants::kFunctionOffset);
if (!StackFrame::IsTypeMarker(marker)) {
if (maybe_function->IsSmi()) {
return NATIVE;
@@ -561,7 +561,7 @@ Address StackFrame::UnpaddedFP() const {
void NativeFrame::ComputeCallerState(State* state) const {
state->sp = caller_sp();
- state->fp = Memory::Address_at(fp() + CommonFrameConstants::kCallerFPOffset);
+ state->fp = Memory<Address>(fp() + CommonFrameConstants::kCallerFPOffset);
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(fp() + CommonFrameConstants::kCallerPCOffset));
state->callee_pc_address = nullptr;
@@ -580,7 +580,7 @@ void EntryFrame::ComputeCallerState(State* state) const {
StackFrame::Type EntryFrame::GetCallerState(State* state) const {
const int offset = EntryFrameConstants::kCallerFPOffset;
- Address fp = Memory::Address_at(this->fp() + offset);
+ Address fp = Memory<Address>(this->fp() + offset);
return ExitFrame::GetStateForFramePointer(fp, state);
}
@@ -591,7 +591,7 @@ Code* ConstructEntryFrame::unchecked_code() const {
Object*& ExitFrame::code_slot() const {
const int offset = ExitFrameConstants::kCodeOffset;
- return Memory::Object_at(fp() + offset);
+ return Memory<Object*>(fp() + offset);
}
Code* ExitFrame::unchecked_code() const {
@@ -602,7 +602,7 @@ Code* ExitFrame::unchecked_code() const {
void ExitFrame::ComputeCallerState(State* state) const {
// Set up the caller state.
state->sp = caller_sp();
- state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
+ state->fp = Memory<Address>(fp() + ExitFrameConstants::kCallerFPOffset);
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset));
state->callee_pc_address = nullptr;
@@ -639,7 +639,7 @@ StackFrame::Type ExitFrame::ComputeFrameType(Address fp) {
// Distinguish between between regular and builtin exit frames.
// Default to EXIT in all hairy cases (e.g., when called from profiler).
const int offset = ExitFrameConstants::kFrameTypeOffset;
- Object* marker = Memory::Object_at(fp + offset);
+ Object* marker = Memory<Object*>(fp + offset);
if (!marker->IsSmi()) {
return EXIT;
@@ -657,7 +657,7 @@ StackFrame::Type ExitFrame::ComputeFrameType(Address fp) {
Address ExitFrame::ComputeStackPointer(Address fp) {
MSAN_MEMORY_IS_INITIALIZED(fp + ExitFrameConstants::kSPOffset, kPointerSize);
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
+ return Memory<Address>(fp + ExitFrameConstants::kSPOffset);
}
void ExitFrame::FillState(Address fp, Address sp, State* state) {
@@ -687,7 +687,7 @@ Object* BuiltinExitFrame::GetParameter(int i) const {
DCHECK(i >= 0 && i < ComputeParametersCount());
int offset =
BuiltinExitFrameConstants::kFirstArgumentOffset + i * kPointerSize;
- return Memory::Object_at(fp() + offset);
+ return Memory<Object*>(fp() + offset);
}
int BuiltinExitFrame::ComputeParametersCount() const {
@@ -855,7 +855,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
// Determine the fixed header and spill slot area size.
int frame_header_size = StandardFrameConstants::kFixedFrameSizeFromFp;
intptr_t marker =
- Memory::intptr_at(fp() + CommonFrameConstants::kContextOrFrameTypeOffset);
+ Memory<intptr_t>(fp() + CommonFrameConstants::kContextOrFrameTypeOffset);
if (StackFrame::IsTypeMarker(marker)) {
StackFrame::Type candidate = StackFrame::MarkerToType(marker);
switch (candidate) {
@@ -898,10 +898,10 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
slot_space -=
(frame_header_size + StandardFrameConstants::kFixedFrameSizeAboveFp);
- Object** frame_header_base = &Memory::Object_at(fp() - frame_header_size);
+ Object** frame_header_base = &Memory<Object*>(fp() - frame_header_size);
Object** frame_header_limit =
- &Memory::Object_at(fp() - StandardFrameConstants::kCPSlotSize);
- Object** parameters_base = &Memory::Object_at(sp());
+ &Memory<Object*>(fp() - StandardFrameConstants::kCPSlotSize);
+ Object** parameters_base = &Memory<Object*>(sp());
Object** parameters_limit = frame_header_base - slot_space / kPointerSize;
// Visit the parameters that may be on top of the saved registers.
@@ -994,7 +994,7 @@ int StubFrame::LookupExceptionHandlerInTable(int* stack_slots) {
void OptimizedFrame::Iterate(RootVisitor* v) const { IterateCompiledFrame(v); }
void JavaScriptFrame::SetParameterValue(int index, Object* value) const {
- Memory::Object_at(GetParameterSlot(index)) = value;
+ Memory<Object*>(GetParameterSlot(index)) = value;
}
@@ -1002,7 +1002,7 @@ bool JavaScriptFrame::IsConstructor() const {
Address fp = caller_fp();
if (has_adapted_arguments()) {
// Skip the arguments adaptor frame and look at the real caller.
- fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
+ fp = Memory<Address>(fp + StandardFrameConstants::kCallerFPOffset);
}
return IsConstructFrame(fp);
}
@@ -1030,7 +1030,7 @@ int OptimizedFrame::GetNumberOfIncomingArguments() const {
Code* code = LookupCode();
if (code->kind() == Code::BUILTIN) {
return static_cast<int>(
- Memory::intptr_at(fp() + OptimizedBuiltinFrameConstants::kArgCOffset));
+ Memory<intptr_t>(fp() + OptimizedBuiltinFrameConstants::kArgCOffset));
} else {
return JavaScriptFrame::GetNumberOfIncomingArguments();
}
@@ -1085,7 +1085,7 @@ Object* JavaScriptFrame::receiver() const { return GetParameter(-1); }
Object* JavaScriptFrame::context() const {
const int offset = StandardFrameConstants::kContextOffset;
- Object* maybe_result = Memory::Object_at(fp() + offset);
+ Object* maybe_result = Memory<Object*>(fp() + offset);
DCHECK(!maybe_result->IsSmi());
return maybe_result;
}
@@ -1215,7 +1215,7 @@ void JavaScriptFrame::CollectTopFrameForICStats(Isolate* isolate) {
}
Object* JavaScriptFrame::GetParameter(int index) const {
- return Memory::Object_at(GetParameterSlot(index));
+ return Memory<Object*>(GetParameterSlot(index));
}
int JavaScriptFrame::ComputeParametersCount() const {
@@ -1228,7 +1228,7 @@ int JavaScriptBuiltinContinuationFrame::ComputeParametersCount() const {
DCHECK_EQ(RegisterConfiguration::Default()->GetAllocatableGeneralCode(0),
kJavaScriptCallArgCountRegister.code());
Object* argc_object =
- Memory::Object_at(fp() + BuiltinContinuationFrameConstants::kArgCOffset);
+ Memory<Object*>(fp() + BuiltinContinuationFrameConstants::kArgCOffset);
return Smi::ToInt(argc_object);
}
@@ -1240,7 +1240,7 @@ intptr_t JavaScriptBuiltinContinuationFrame::GetSPToFPDelta() const {
}
Object* JavaScriptBuiltinContinuationFrame::context() const {
- return Memory::Object_at(
+ return Memory<Object*>(
fp() + BuiltinContinuationFrameConstants::kBuiltinContextOffset);
}
@@ -1252,8 +1252,8 @@ void JavaScriptBuiltinContinuationWithCatchFrame::SetException(
// Only allow setting exception if previous value was the hole.
CHECK_EQ(ReadOnlyRoots(isolate()).the_hole_value(),
- Memory::Object_at(exception_argument_slot));
- Memory::Object_at(exception_argument_slot) = exception;
+ Memory<Object*>(exception_argument_slot));
+ Memory<Object*>(exception_argument_slot) = exception;
}
FrameSummary::JavaScriptFrameSummary::JavaScriptFrameSummary(
@@ -1643,7 +1643,7 @@ int OptimizedFrame::StackSlotOffsetRelativeToFp(int slot_index) {
Object* OptimizedFrame::StackSlotAt(int index) const {
- return Memory::Object_at(fp() + StackSlotOffsetRelativeToFp(index));
+ return Memory<Object*>(fp() + StackSlotOffsetRelativeToFp(index));
}
int InterpretedFrame::position() const {
@@ -1674,7 +1674,7 @@ int InterpretedFrame::GetBytecodeOffset(Address fp) {
InterpreterFrameConstants::kBytecodeOffsetFromFp,
InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
Address expression_offset = fp + offset - index * kPointerSize;
- int raw_offset = Smi::ToInt(Memory::Object_at(expression_offset));
+ int raw_offset = Smi::ToInt(Memory<Object*>(expression_offset));
return raw_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
}
@@ -1801,7 +1801,7 @@ wasm::WasmCode* WasmCompiledFrame::wasm_code() const {
WasmInstanceObject* WasmCompiledFrame::wasm_instance() const {
const int offset = WasmCompiledFrameConstants::kWasmInstanceOffset;
- Object* instance = Memory::Object_at(fp() + offset);
+ Object* instance = Memory<Object*>(fp() + offset);
return WasmInstanceObject::cast(instance);
}
@@ -1889,7 +1889,7 @@ Code* WasmInterpreterEntryFrame::unchecked_code() const { UNREACHABLE(); }
WasmInstanceObject* WasmInterpreterEntryFrame::wasm_instance() const {
const int offset = WasmCompiledFrameConstants::kWasmInstanceOffset;
- Object* instance = Memory::Object_at(fp() + offset);
+ Object* instance = Memory<Object*>(fp() + offset);
return WasmInstanceObject::cast(instance);
}
@@ -1923,13 +1923,13 @@ WasmInstanceObject* WasmCompileLazyFrame::wasm_instance() const {
Object** WasmCompileLazyFrame::wasm_instance_slot() const {
const int offset = WasmCompileLazyFrameConstants::kWasmInstanceOffset;
- return &Memory::Object_at(fp() + offset);
+ return &Memory<Object*>(fp() + offset);
}
void WasmCompileLazyFrame::Iterate(RootVisitor* v) const {
const int header_size = WasmCompileLazyFrameConstants::kFixedFrameSizeFromFp;
- Object** base = &Memory::Object_at(sp());
- Object** limit = &Memory::Object_at(fp() - header_size);
+ Object** base = &Memory<Object*>(sp());
+ Object** limit = &Memory<Object*>(fp() - header_size);
v->VisitRootPointers(Root::kTop, nullptr, base, limit);
v->VisitRootPointer(Root::kTop, nullptr, wasm_instance_slot());
}
@@ -2105,8 +2105,8 @@ void EntryFrame::Iterate(RootVisitor* v) const {
void StandardFrame::IterateExpressions(RootVisitor* v) const {
const int offset = StandardFrameConstants::kLastObjectOffset;
- Object** base = &Memory::Object_at(sp());
- Object** limit = &Memory::Object_at(fp() + offset) + 1;
+ Object** base = &Memory<Object*>(sp());
+ Object** limit = &Memory<Object*>(fp() + offset) + 1;
v->VisitRootPointers(Root::kTop, nullptr, base, limit);
}
diff --git a/deps/v8/src/futex-emulation.cc b/deps/v8/src/futex-emulation.cc
index 718673e697..c0704ee9a2 100644
--- a/deps/v8/src/futex-emulation.cc
+++ b/deps/v8/src/futex-emulation.cc
@@ -12,6 +12,7 @@
#include "src/handles-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
namespace v8 {
namespace internal {
@@ -235,8 +236,7 @@ Object* FutexEmulation::Wait(Isolate* isolate,
return result;
}
-Object* FutexEmulation::Wake(Isolate* isolate,
- Handle<JSArrayBuffer> array_buffer, size_t addr,
+Object* FutexEmulation::Wake(Handle<JSArrayBuffer> array_buffer, size_t addr,
uint32_t num_waiters_to_wake) {
DCHECK(addr < NumberToSize(array_buffer->byte_length()));
@@ -261,9 +261,7 @@ Object* FutexEmulation::Wake(Isolate* isolate,
return Smi::FromInt(waiters_woken);
}
-
-Object* FutexEmulation::NumWaitersForTesting(Isolate* isolate,
- Handle<JSArrayBuffer> array_buffer,
+Object* FutexEmulation::NumWaitersForTesting(Handle<JSArrayBuffer> array_buffer,
size_t addr) {
DCHECK(addr < NumberToSize(array_buffer->byte_length()));
void* backing_store = array_buffer->backing_store();
diff --git a/deps/v8/src/futex-emulation.h b/deps/v8/src/futex-emulation.h
index a1580099d6..80c1a6322b 100644
--- a/deps/v8/src/futex-emulation.h
+++ b/deps/v8/src/futex-emulation.h
@@ -125,13 +125,12 @@ class FutexEmulation : public AllStatic {
// |num_waiters_to_wake| can be kWakeAll, in which case all waiters are
// woken. The rest of the waiters will continue to wait. The return value is
// the number of woken waiters.
- static Object* Wake(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
- size_t addr, uint32_t num_waiters_to_wake);
+ static Object* Wake(Handle<JSArrayBuffer> array_buffer, size_t addr,
+ uint32_t num_waiters_to_wake);
// Return the number of threads waiting on |addr|. Should only be used for
// testing.
- static Object* NumWaitersForTesting(Isolate* isolate,
- Handle<JSArrayBuffer> array_buffer,
+ static Object* NumWaitersForTesting(Handle<JSArrayBuffer> array_buffer,
size_t addr);
private:
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 4e2587dec2..4fa7ce0699 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -7,7 +7,7 @@
#include <memory>
#include <vector>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
@@ -19,6 +19,7 @@
#include "src/ostreams.h"
#include "src/snapshot/natives.h"
#include "src/splay-tree-inl.h"
+#include "src/zone/zone-chunk-list.h"
namespace v8 {
namespace internal {
@@ -473,11 +474,11 @@ void ELFSection::PopulateHeader(Writer::Slot<ELFSection::Header> header,
#if defined(__MACH_O)
class MachO BASE_EMBEDDED {
public:
- explicit MachO(Zone* zone) : zone_(zone), sections_(6, zone) { }
+ explicit MachO(Zone* zone) : sections_(zone) {}
- uint32_t AddSection(MachOSection* section) {
- sections_.Add(section, zone_);
- return sections_.length() - 1;
+ size_t AddSection(MachOSection* section) {
+ sections_.push_back(section);
+ return sections_.size() - 1;
}
void Write(Writer* w, uintptr_t code_start, uintptr_t code_size) {
@@ -570,7 +571,7 @@ class MachO BASE_EMBEDDED {
cmd->maxprot = 7;
cmd->initprot = 7;
cmd->flags = 0;
- cmd->nsects = sections_.length();
+ cmd->nsects = static_cast<uint32_t>(sections_.size());
memset(cmd->segname, 0, 16);
cmd->cmdsize = sizeof(MachOSegmentCommand) + sizeof(MachOSection::Header) *
cmd->nsects;
@@ -583,19 +584,21 @@ class MachO BASE_EMBEDDED {
Writer::Slot<MachOHeader> header,
uintptr_t load_command_start) {
Writer::Slot<MachOSection::Header> headers =
- w->CreateSlotsHere<MachOSection::Header>(sections_.length());
+ w->CreateSlotsHere<MachOSection::Header>(
+ static_cast<uint32_t>(sections_.size()));
cmd->fileoff = w->position();
header->sizeofcmds =
static_cast<uint32_t>(w->position() - load_command_start);
- for (int section = 0; section < sections_.length(); ++section) {
- sections_[section]->PopulateHeader(headers.at(section));
- sections_[section]->WriteBody(headers.at(section), w);
+ uint32_t index = 0;
+ for (MachOSection* section : sections_) {
+ section->PopulateHeader(headers.at(index));
+ section->WriteBody(headers.at(index), w);
+ index++;
}
cmd->filesize = w->position() - (uintptr_t)cmd->fileoff;
}
- Zone* zone_;
- ZoneList<MachOSection*> sections_;
+ ZoneChunkList<MachOSection*> sections_;
};
#endif // defined(__MACH_O)
@@ -603,9 +606,9 @@ class MachO BASE_EMBEDDED {
#if defined(__ELF)
class ELF BASE_EMBEDDED {
public:
- explicit ELF(Zone* zone) : zone_(zone), sections_(6, zone) {
- sections_.Add(new(zone) ELFSection("", ELFSection::TYPE_NULL, 0), zone);
- sections_.Add(new(zone) ELFStringTable(".shstrtab"), zone);
+ explicit ELF(Zone* zone) : sections_(zone) {
+ sections_.push_back(new (zone) ELFSection("", ELFSection::TYPE_NULL, 0));
+ sections_.push_back(new (zone) ELFStringTable(".shstrtab"));
}
void Write(Writer* w) {
@@ -614,14 +617,12 @@ class ELF BASE_EMBEDDED {
WriteSections(w);
}
- ELFSection* SectionAt(uint32_t index) {
- return sections_[index];
- }
+ ELFSection* SectionAt(uint32_t index) { return *sections_.Find(index); }
- uint32_t AddSection(ELFSection* section) {
- sections_.Add(section, zone_);
- section->set_index(sections_.length() - 1);
- return sections_.length() - 1;
+ size_t AddSection(ELFSection* section) {
+ sections_.push_back(section);
+ section->set_index(sections_.size() - 1);
+ return sections_.size() - 1;
}
private:
@@ -704,7 +705,7 @@ class ELF BASE_EMBEDDED {
header->pht_entry_size = 0;
header->pht_entry_num = 0;
header->sht_entry_size = sizeof(ELFSection::Header);
- header->sht_entry_num = sections_.length();
+ header->sht_entry_num = sections_.size();
header->sht_strtab_index = 1;
}
@@ -713,15 +714,16 @@ class ELF BASE_EMBEDDED {
DCHECK(w->position() == sizeof(ELFHeader));
Writer::Slot<ELFSection::Header> headers =
- w->CreateSlotsHere<ELFSection::Header>(sections_.length());
+ w->CreateSlotsHere<ELFSection::Header>(
+ static_cast<uint32_t>(sections_.size()));
// String table for section table is the first section.
ELFStringTable* strtab = static_cast<ELFStringTable*>(SectionAt(1));
strtab->AttachWriter(w);
- for (int i = 0, length = sections_.length();
- i < length;
- i++) {
- sections_[i]->PopulateHeader(headers.at(i), strtab);
+ uint32_t index = 0;
+ for (ELFSection* section : sections_) {
+ section->PopulateHeader(headers.at(index), strtab);
+ index++;
}
strtab->DetachWriter();
}
@@ -734,15 +736,14 @@ class ELF BASE_EMBEDDED {
Writer::Slot<ELFSection::Header> headers =
w->SlotAt<ELFSection::Header>(sizeof(ELFHeader));
- for (int i = 0, length = sections_.length();
- i < length;
- i++) {
- sections_[i]->WriteBody(headers.at(i), w);
+ uint32_t index = 0;
+ for (ELFSection* section : sections_) {
+ section->WriteBody(headers.at(index), w);
+ index++;
}
}
- Zone* zone_;
- ZoneList<ELFSection*> sections_;
+ ZoneChunkList<ELFSection*> sections_;
};
@@ -834,7 +835,7 @@ class ELFSymbol BASE_EMBEDDED {
};
#endif
- void Write(Writer::Slot<SerializedLayout> s, ELFStringTable* t) {
+ void Write(Writer::Slot<SerializedLayout> s, ELFStringTable* t) const {
// Convert symbol names from strings to indexes in the string table.
s->name = static_cast<uint32_t>(t->Add(name));
s->value = value;
@@ -858,17 +859,17 @@ class ELFSymbolTable : public ELFSection {
public:
ELFSymbolTable(const char* name, Zone* zone)
: ELFSection(name, TYPE_SYMTAB, sizeof(uintptr_t)),
- locals_(1, zone),
- globals_(1, zone) {
- }
+ locals_(zone),
+ globals_(zone) {}
virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
w->Align(header->alignment);
- int total_symbols = locals_.length() + globals_.length() + 1;
+ size_t total_symbols = locals_.size() + globals_.size() + 1;
header->offset = w->position();
Writer::Slot<ELFSymbol::SerializedLayout> symbols =
- w->CreateSlotsHere<ELFSymbol::SerializedLayout>(total_symbols);
+ w->CreateSlotsHere<ELFSymbol::SerializedLayout>(
+ static_cast<uint32_t>(total_symbols));
header->size = w->position() - header->offset;
@@ -883,15 +884,17 @@ class ELFSymbolTable : public ELFSection {
ELFSymbol::TYPE_NOTYPE,
0));
WriteSymbolsList(&locals_, symbols.at(1), strtab);
- WriteSymbolsList(&globals_, symbols.at(locals_.length() + 1), strtab);
+ WriteSymbolsList(&globals_,
+ symbols.at(static_cast<uint32_t>(locals_.size() + 1)),
+ strtab);
strtab->DetachWriter();
}
- void Add(const ELFSymbol& symbol, Zone* zone) {
+ void Add(const ELFSymbol& symbol) {
if (symbol.binding() == ELFSymbol::BIND_LOCAL) {
- locals_.Add(symbol, zone);
+ locals_.push_back(symbol);
} else {
- globals_.Add(symbol, zone);
+ globals_.push_back(symbol);
}
}
@@ -900,23 +903,22 @@ class ELFSymbolTable : public ELFSection {
ELFSection::PopulateHeader(header);
// We are assuming that string table will follow symbol table.
header->link = index() + 1;
- header->info = locals_.length() + 1;
+ header->info = static_cast<uint32_t>(locals_.size() + 1);
header->entry_size = sizeof(ELFSymbol::SerializedLayout);
}
private:
- void WriteSymbolsList(const ZoneList<ELFSymbol>* src,
+ void WriteSymbolsList(const ZoneChunkList<ELFSymbol>* src,
Writer::Slot<ELFSymbol::SerializedLayout> dst,
ELFStringTable* strtab) {
- for (int i = 0, len = src->length();
- i < len;
- i++) {
- src->at(i).Write(dst.at(i), strtab);
+ int i = 0;
+ for (const ELFSymbol& symbol : *src) {
+ symbol.Write(dst.at(i++), strtab);
}
}
- ZoneList<ELFSymbol> locals_;
- ZoneList<ELFSymbol> globals_;
+ ZoneChunkList<ELFSymbol> locals_;
+ ZoneChunkList<ELFSymbol> globals_;
};
#endif // defined(__ELF)
@@ -1039,10 +1041,8 @@ class CodeDescription BASE_EMBEDDED {
};
#if defined(__ELF)
-static void CreateSymbolsTable(CodeDescription* desc,
- Zone* zone,
- ELF* elf,
- int text_section_index) {
+static void CreateSymbolsTable(CodeDescription* desc, Zone* zone, ELF* elf,
+ size_t text_section_index) {
ELFSymbolTable* symtab = new(zone) ELFSymbolTable(".symtab", zone);
ELFStringTable* strtab = new(zone) ELFStringTable(".strtab");
@@ -1050,21 +1050,12 @@ static void CreateSymbolsTable(CodeDescription* desc,
elf->AddSection(symtab);
elf->AddSection(strtab);
- symtab->Add(ELFSymbol("V8 Code",
- 0,
- 0,
- ELFSymbol::BIND_LOCAL,
- ELFSymbol::TYPE_FILE,
- ELFSection::INDEX_ABSOLUTE),
- zone);
-
- symtab->Add(ELFSymbol(desc->name(),
- 0,
- desc->CodeSize(),
- ELFSymbol::BIND_GLOBAL,
- ELFSymbol::TYPE_FUNC,
- text_section_index),
- zone);
+ symtab->Add(ELFSymbol("V8 Code", 0, 0, ELFSymbol::BIND_LOCAL,
+ ELFSymbol::TYPE_FILE, ELFSection::INDEX_ABSOLUTE));
+
+ symtab->Add(ELFSymbol(desc->name(), 0, desc->CodeSize(),
+ ELFSymbol::BIND_GLOBAL, ELFSymbol::TYPE_FUNC,
+ text_section_index));
}
#endif // defined(__ELF)
@@ -1928,15 +1919,9 @@ static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
ELF elf(&zone);
Writer w(&elf);
- int text_section_index = elf.AddSection(
- new(&zone) FullHeaderELFSection(
- ".text",
- ELFSection::TYPE_NOBITS,
- kCodeAlignment,
- desc->CodeStart(),
- 0,
- desc->CodeSize(),
- ELFSection::FLAG_ALLOC | ELFSection::FLAG_EXEC));
+ size_t text_section_index = elf.AddSection(new (&zone) FullHeaderELFSection(
+ ".text", ELFSection::TYPE_NOBITS, kCodeAlignment, desc->CodeStart(), 0,
+ desc->CodeSize(), ELFSection::FLAG_ALLOC | ELFSection::FLAG_EXEC));
CreateSymbolsTable(desc, &zone, &elf, text_section_index);
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index c7e5e76b34..a3c146cc70 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -4,7 +4,7 @@
#include "src/global-handles.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/cancelable-task.h"
#include "src/objects-inl.h"
#include "src/v8.h"
@@ -191,12 +191,6 @@ class GlobalHandles::Node {
set_state(PENDING);
}
- // Independent flag accessors.
- void MarkIndependent() {
- DCHECK(IsInUse());
- set_independent(true);
- }
-
// Callback parameter accessors.
void set_parameter(void* parameter) {
DCHECK(IsInUse());
@@ -262,7 +256,7 @@ class GlobalHandles::Node {
}
void CollectPhantomCallbackData(
- Isolate* isolate,
+
std::vector<PendingPhantomCallback>* pending_phantom_callbacks) {
DCHECK(weakness_type() == PHANTOM_WEAK ||
weakness_type() == PHANTOM_WEAK_2_EMBEDDER_FIELDS);
@@ -600,14 +594,6 @@ void GlobalHandles::AnnotateStrongRetainer(Object** location,
Node::FromLocation(location)->AnnotateStrongRetainer(label);
}
-void GlobalHandles::MarkIndependent(Object** location) {
- Node::FromLocation(location)->MarkIndependent();
-}
-
-bool GlobalHandles::IsIndependent(Object** location) {
- return Node::FromLocation(location)->is_independent();
-}
-
bool GlobalHandles::IsNearDeath(Object** location) {
return Node::FromLocation(location)->IsNearDeath();
}
@@ -644,8 +630,7 @@ void GlobalHandles::IterateWeakRootsForPhantomHandles(
++number_of_phantom_handle_resets_;
} else if (node->IsPhantomCallback()) {
node->MarkPending();
- node->CollectPhantomCallbackData(isolate(),
- &pending_phantom_callbacks_);
+ node->CollectPhantomCallbackData(&pending_phantom_callbacks_);
}
}
}
@@ -743,8 +728,7 @@ void GlobalHandles::IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
} else if (node->IsPhantomCallback()) {
node->MarkPending();
- node->CollectPhantomCallbackData(isolate(),
- &pending_phantom_callbacks_);
+ node->CollectPhantomCallbackData(&pending_phantom_callbacks_);
} else {
UNREACHABLE();
}
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index a379a74d44..246dc0c469 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -99,11 +99,6 @@ class GlobalHandles {
// Clear the weakness of a global handle.
static void* ClearWeakness(Object** location);
- // Mark the reference to this object independent.
- static void MarkIndependent(Object** location);
-
- static bool IsIndependent(Object** location);
-
// Tells whether global handle is near death.
static bool IsNearDeath(Object** location);
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 5fe6f232fe..e9142276e0 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -17,32 +17,6 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
-#ifdef V8_OS_WIN
-
-// Setup for Windows shared library export.
-#ifdef BUILDING_V8_SHARED
-#define V8_EXPORT_PRIVATE __declspec(dllexport)
-#elif USING_V8_SHARED
-#define V8_EXPORT_PRIVATE __declspec(dllimport)
-#else
-#define V8_EXPORT_PRIVATE
-#endif // BUILDING_V8_SHARED
-
-#else // V8_OS_WIN
-
-// Setup for Linux shared library export.
-#if V8_HAS_ATTRIBUTE_VISIBILITY
-#ifdef BUILDING_V8_SHARED
-#define V8_EXPORT_PRIVATE __attribute__((visibility("default")))
-#else
-#define V8_EXPORT_PRIVATE
-#endif
-#else
-#define V8_EXPORT_PRIVATE
-#endif
-
-#endif // V8_OS_WIN
-
#define V8_INFINITY std::numeric_limits<double>::infinity()
namespace v8 {
@@ -183,7 +157,7 @@ constexpr int kDoubleSizeLog2 = 3;
// ARM64 only supports direct calls within a 128 MB range.
constexpr size_t kMaxWasmCodeMemory = 128 * MB;
#else
-constexpr size_t kMaxWasmCodeMemory = 512 * MB;
+constexpr size_t kMaxWasmCodeMemory = 1024 * MB;
#endif
#if V8_HOST_ARCH_64_BIT
@@ -488,6 +462,9 @@ constexpr uint32_t kFreeListZapValue = 0xfeed1eaf;
constexpr int kCodeZapValue = 0xbadc0de;
constexpr uint32_t kPhantomReferenceZap = 0xca11bac;
+// Page constants.
+static const intptr_t kPageAlignmentMask = (intptr_t{1} << kPageSizeBits) - 1;
+
// On Intel architecture, cache line size is 64 bytes.
// On ARM it may be less (32 bytes), but as far this constant is
// used for aligning data, it doesn't hurt to align on a greater value.
@@ -1101,7 +1078,6 @@ enum InitializationFlag : uint8_t { kNeedsInitialization, kCreatedInitialized };
enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned };
-// Serialized in PreparseData, so numeric values should not be changed.
enum ParseErrorType { kSyntaxError = 0, kReferenceError = 1 };
enum FunctionKind : uint8_t {
@@ -1551,7 +1527,7 @@ V8_INLINE static bool HasWeakHeapObjectTag(const Object* value) {
kWeakHeapObjectTag);
}
-V8_INLINE static bool IsClearedWeakHeapObject(MaybeObject* value) {
+V8_INLINE static bool IsClearedWeakHeapObject(const MaybeObject* value) {
return reinterpret_cast<intptr_t>(value) == kClearedWeakHeapObject;
}
@@ -1594,6 +1570,7 @@ enum class LoadSensitivity {
#define FOREACH_WASM_TRAPREASON(V) \
V(TrapUnreachable) \
V(TrapMemOutOfBounds) \
+ V(TrapUnalignedAccess) \
V(TrapDivByZero) \
V(TrapDivUnrepresentable) \
V(TrapRemByZero) \
diff --git a/deps/v8/src/handler-table.cc b/deps/v8/src/handler-table.cc
index 57cf4e2648..dd246575ab 100644
--- a/deps/v8/src/handler-table.cc
+++ b/deps/v8/src/handler-table.cc
@@ -37,7 +37,7 @@ HandlerTable::HandlerTable(Address instruction_start,
#endif
raw_encoded_data_(instruction_start + handler_table_offset) {
if (handler_table_offset > 0) {
- number_of_entries_ = Memory::int32_at(raw_encoded_data_);
+ number_of_entries_ = Memory<int32_t>(raw_encoded_data_);
raw_encoded_data_ += sizeof(int32_t);
}
}
@@ -46,14 +46,14 @@ int HandlerTable::GetRangeStart(int index) const {
DCHECK_EQ(kRangeBasedEncoding, mode_);
DCHECK_LT(index, NumberOfRangeEntries());
int offset = index * kRangeEntrySize + kRangeStartIndex;
- return Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t));
+ return Memory<int32_t>(raw_encoded_data_ + offset * sizeof(int32_t));
}
int HandlerTable::GetRangeEnd(int index) const {
DCHECK_EQ(kRangeBasedEncoding, mode_);
DCHECK_LT(index, NumberOfRangeEntries());
int offset = index * kRangeEntrySize + kRangeEndIndex;
- return Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t));
+ return Memory<int32_t>(raw_encoded_data_ + offset * sizeof(int32_t));
}
int HandlerTable::GetRangeHandler(int index) const {
@@ -61,14 +61,14 @@ int HandlerTable::GetRangeHandler(int index) const {
DCHECK_LT(index, NumberOfRangeEntries());
int offset = index * kRangeEntrySize + kRangeHandlerIndex;
return HandlerOffsetField::decode(
- Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)));
+ Memory<int32_t>(raw_encoded_data_ + offset * sizeof(int32_t)));
}
int HandlerTable::GetRangeData(int index) const {
DCHECK_EQ(kRangeBasedEncoding, mode_);
DCHECK_LT(index, NumberOfRangeEntries());
int offset = index * kRangeEntrySize + kRangeDataIndex;
- return Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t));
+ return Memory<int32_t>(raw_encoded_data_ + offset * sizeof(int32_t));
}
HandlerTable::CatchPrediction HandlerTable::GetRangePrediction(
@@ -77,14 +77,14 @@ HandlerTable::CatchPrediction HandlerTable::GetRangePrediction(
DCHECK_LT(index, NumberOfRangeEntries());
int offset = index * kRangeEntrySize + kRangeHandlerIndex;
return HandlerPredictionField::decode(
- Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)));
+ Memory<int32_t>(raw_encoded_data_ + offset * sizeof(int32_t)));
}
int HandlerTable::GetReturnOffset(int index) const {
DCHECK_EQ(kReturnAddressBasedEncoding, mode_);
DCHECK_LT(index, NumberOfReturnEntries());
int offset = index * kReturnEntrySize + kReturnOffsetIndex;
- return Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t));
+ return Memory<int32_t>(raw_encoded_data_ + offset * sizeof(int32_t));
}
int HandlerTable::GetReturnHandler(int index) const {
@@ -92,17 +92,17 @@ int HandlerTable::GetReturnHandler(int index) const {
DCHECK_LT(index, NumberOfReturnEntries());
int offset = index * kReturnEntrySize + kReturnHandlerIndex;
return HandlerOffsetField::decode(
- Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)));
+ Memory<int32_t>(raw_encoded_data_ + offset * sizeof(int32_t)));
}
void HandlerTable::SetRangeStart(int index, int value) {
int offset = index * kRangeEntrySize + kRangeStartIndex;
- Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)) = value;
+ Memory<int32_t>(raw_encoded_data_ + offset * sizeof(int32_t)) = value;
}
void HandlerTable::SetRangeEnd(int index, int value) {
int offset = index * kRangeEntrySize + kRangeEndIndex;
- Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)) = value;
+ Memory<int32_t>(raw_encoded_data_ + offset * sizeof(int32_t)) = value;
}
void HandlerTable::SetRangeHandler(int index, int handler_offset,
@@ -110,12 +110,12 @@ void HandlerTable::SetRangeHandler(int index, int handler_offset,
int value = HandlerOffsetField::encode(handler_offset) |
HandlerPredictionField::encode(prediction);
int offset = index * kRangeEntrySize + kRangeHandlerIndex;
- Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)) = value;
+ Memory<int32_t>(raw_encoded_data_ + offset * sizeof(int32_t)) = value;
}
void HandlerTable::SetRangeData(int index, int value) {
int offset = index * kRangeEntrySize + kRangeDataIndex;
- Memory::int32_at(raw_encoded_data_ + offset * sizeof(int32_t)) = value;
+ Memory<int32_t>(raw_encoded_data_ + offset * sizeof(int32_t)) = value;
}
// static
@@ -206,12 +206,12 @@ void HandlerTable::HandlerTableRangePrint(std::ostream& os) {
}
void HandlerTable::HandlerTableReturnPrint(std::ostream& os) {
- os << " off hdlr\n";
+ os << " offset handler\n";
for (int i = 0; i < NumberOfReturnEntries(); ++i) {
int pc_offset = GetReturnOffset(i);
int handler_offset = GetReturnHandler(i);
- os << " " << std::setw(4) << pc_offset << " -> " << std::setw(4)
- << handler_offset << "\n";
+ os << std::hex << " " << std::setw(4) << pc_offset << " -> "
+ << std::setw(4) << handler_offset << std::dec << "\n";
}
}
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index 536388476b..d8e195c6f9 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -8,8 +8,6 @@
#include "src/handles.h"
#include "src/isolate.h"
#include "src/msan.h"
-#include "src/objects-inl.h"
-#include "src/objects/maybe-object-inl.h"
namespace v8 {
namespace internal {
@@ -25,6 +23,12 @@ Handle<T> Handle<T>::New(T* object, Isolate* isolate) {
reinterpret_cast<T**>(HandleScope::CreateHandle(isolate, object)));
}
+template <typename T>
+template <typename S>
+const Handle<T> Handle<T>::cast(Handle<S> that) {
+ T::cast(*reinterpret_cast<T**>(that.location()));
+ return Handle<T>(reinterpret_cast<T**>(that.location_));
+}
HandleScope::HandleScope(Isolate* isolate) {
HandleScopeData* data = isolate->handle_scope_data();
@@ -38,68 +42,13 @@ template <typename T>
Handle<T>::Handle(T* object, Isolate* isolate) : HandleBase(object, isolate) {}
template <typename T>
-inline std::ostream& operator<<(std::ostream& os, Handle<T> handle) {
- return os << Brief(*handle);
-}
-
-MaybeObjectHandle::MaybeObjectHandle()
- : reference_type_(HeapObjectReferenceType::STRONG),
- handle_(Handle<Object>::null()) {}
-
-MaybeObjectHandle::MaybeObjectHandle(MaybeObject* object, Isolate* isolate) {
- HeapObject* heap_object;
- DCHECK(!object->IsClearedWeakHeapObject());
- if (object->ToWeakHeapObject(&heap_object)) {
- handle_ = handle(heap_object, isolate);
- reference_type_ = HeapObjectReferenceType::WEAK;
- } else {
- handle_ = handle(object->ToObject(), isolate);
- reference_type_ = HeapObjectReferenceType::STRONG;
- }
-}
-
-MaybeObjectHandle::MaybeObjectHandle(Handle<Object> object)
- : reference_type_(HeapObjectReferenceType::STRONG), handle_(object) {}
-
-MaybeObjectHandle::MaybeObjectHandle(Object* object, Isolate* isolate)
- : reference_type_(HeapObjectReferenceType::STRONG),
- handle_(object, isolate) {}
-
-MaybeObjectHandle::MaybeObjectHandle(Object* object,
- HeapObjectReferenceType reference_type,
- Isolate* isolate)
- : reference_type_(reference_type), handle_(handle(object, isolate)) {}
-
-MaybeObjectHandle::MaybeObjectHandle(Handle<Object> object,
- HeapObjectReferenceType reference_type)
- : reference_type_(reference_type), handle_(object) {}
-
-MaybeObjectHandle MaybeObjectHandle::Weak(Handle<Object> object) {
- return MaybeObjectHandle(object, HeapObjectReferenceType::WEAK);
-}
-
-MaybeObject* MaybeObjectHandle::operator*() const {
- if (reference_type_ == HeapObjectReferenceType::WEAK) {
- return HeapObjectReference::Weak(*handle_.ToHandleChecked());
- } else {
- return MaybeObject::FromObject(*handle_.ToHandleChecked());
- }
-}
-
-MaybeObject* MaybeObjectHandle::operator->() const {
- if (reference_type_ == HeapObjectReferenceType::WEAK) {
- return HeapObjectReference::Weak(*handle_.ToHandleChecked());
- } else {
- return MaybeObject::FromObject(*handle_.ToHandleChecked());
- }
-}
-
-Handle<Object> MaybeObjectHandle::object() const {
- return handle_.ToHandleChecked();
+V8_INLINE Handle<T> handle(T* object, Isolate* isolate) {
+ return Handle<T>(object, isolate);
}
-inline MaybeObjectHandle handle(MaybeObject* object, Isolate* isolate) {
- return MaybeObjectHandle(object, isolate);
+template <typename T>
+inline std::ostream& operator<<(std::ostream& os, Handle<T> handle) {
+ return os << Brief(*handle);
}
HandleScope::~HandleScope() {
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index ff4a5f88de..b0ffe6a13e 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -8,6 +8,7 @@
#include "src/api.h"
#include "src/base/logging.h"
#include "src/identity-map.h"
+#include "src/maybe-handles.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -26,9 +27,9 @@ bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
Object* object = *location_;
if (object->IsSmi()) return true;
HeapObject* heap_object = HeapObject::cast(object);
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(heap_object);
- if (chunk->owner()->identity() == RO_SPACE) return true;
- Heap* heap = chunk->heap();
+ Isolate* isolate;
+ if (!Isolate::FromWritableHeapObject(heap_object, &isolate)) return true;
+ Heap* heap = isolate->heap();
Object** roots_array_start = heap->roots_array_start();
if (roots_array_start <= location_ &&
location_ < roots_array_start + Heap::kStrongRootListLength &&
@@ -43,7 +44,7 @@ bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
if (heap_object->IsCell()) return true;
if (heap_object->IsMap()) return true;
if (heap_object->IsInternalizedString()) return true;
- return !heap->isolate()->IsDeferredHandle(location_);
+ return !isolate->IsDeferredHandle(location_);
}
return true;
}
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 714139869f..c0a7ac9420 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -21,6 +21,8 @@ namespace internal {
class DeferredHandles;
class HandleScopeImplementer;
class Isolate;
+template <typename T>
+class MaybeHandle;
class Object;
// ----------------------------------------------------------------------------
@@ -121,10 +123,7 @@ class Handle final : public HandleBase {
}
template <typename S>
- static const Handle<T> cast(Handle<S> that) {
- T::cast(*reinterpret_cast<T**>(that.location()));
- return Handle<T>(reinterpret_cast<T**>(that.location_));
- }
+ inline static const Handle<T> cast(Handle<S> that);
// TODO(yangguo): Values that contain empty handles should be declared as
// MaybeHandle to force validation before being used as handles.
@@ -159,114 +158,6 @@ class Handle final : public HandleBase {
template <typename T>
inline std::ostream& operator<<(std::ostream& os, Handle<T> handle);
-template <typename T>
-V8_INLINE Handle<T> handle(T* object, Isolate* isolate) {
- return Handle<T>(object, isolate);
-}
-
-// ----------------------------------------------------------------------------
-// A Handle can be converted into a MaybeHandle. Converting a MaybeHandle
-// into a Handle requires checking that it does not point to nullptr. This
-// ensures nullptr checks before use.
-//
-// Also note that Handles do not provide default equality comparison or hashing
-// operators on purpose. Such operators would be misleading, because intended
-// semantics is ambiguous between Handle location and object identity.
-template <typename T>
-class MaybeHandle final {
- public:
- V8_INLINE MaybeHandle() {}
-
- // Constructor for handling automatic up casting from Handle.
- // Ex. Handle<JSArray> can be passed when MaybeHandle<Object> is expected.
- template <typename S, typename = typename std::enable_if<
- std::is_convertible<S*, T*>::value>::type>
- V8_INLINE MaybeHandle(Handle<S> handle)
- : location_(reinterpret_cast<T**>(handle.location_)) {}
-
- // Constructor for handling automatic up casting.
- // Ex. MaybeHandle<JSArray> can be passed when Handle<Object> is expected.
- template <typename S, typename = typename std::enable_if<
- std::is_convertible<S*, T*>::value>::type>
- V8_INLINE MaybeHandle(MaybeHandle<S> maybe_handle)
- : location_(reinterpret_cast<T**>(maybe_handle.location_)) {}
-
- V8_INLINE MaybeHandle(T* object, Isolate* isolate)
- : MaybeHandle(handle(object, isolate)) {}
-
- V8_INLINE void Assert() const { DCHECK_NOT_NULL(location_); }
- V8_INLINE void Check() const { CHECK_NOT_NULL(location_); }
-
- V8_INLINE Handle<T> ToHandleChecked() const {
- Check();
- return Handle<T>(location_);
- }
-
- // Convert to a Handle with a type that can be upcasted to.
- template <typename S>
- V8_INLINE bool ToHandle(Handle<S>* out) const {
- if (location_ == nullptr) {
- *out = Handle<T>::null();
- return false;
- } else {
- *out = Handle<T>(location_);
- return true;
- }
- }
-
- // Returns the raw address where this handle is stored. This should only be
- // used for hashing handles; do not ever try to dereference it.
- V8_INLINE Address address() const { return bit_cast<Address>(location_); }
-
- bool is_null() const { return location_ == nullptr; }
-
- protected:
- T** location_ = nullptr;
-
- // MaybeHandles of different classes are allowed to access each
- // other's location_.
- template <typename>
- friend class MaybeHandle;
-};
-
-// A handle which contains a potentially weak pointer. Keeps it alive (strongly)
-// while the MaybeObjectHandle is alive.
-class MaybeObjectHandle {
- public:
- inline MaybeObjectHandle();
- inline MaybeObjectHandle(MaybeObject* object, Isolate* isolate);
- inline MaybeObjectHandle(Object* object, Isolate* isolate);
- inline explicit MaybeObjectHandle(Handle<Object> object);
-
- static inline MaybeObjectHandle Weak(Object* object, Isolate* isolate);
- static inline MaybeObjectHandle Weak(Handle<Object> object);
-
- inline MaybeObject* operator*() const;
- inline MaybeObject* operator->() const;
- inline Handle<Object> object() const;
-
- bool is_identical_to(const MaybeObjectHandle& other) const {
- Handle<Object> this_handle;
- Handle<Object> other_handle;
- return reference_type_ == other.reference_type_ &&
- handle_.ToHandle(&this_handle) ==
- other.handle_.ToHandle(&other_handle) &&
- this_handle.is_identical_to(other_handle);
- }
-
- bool is_null() const { return handle_.is_null(); }
-
- private:
- inline MaybeObjectHandle(Object* object,
- HeapObjectReferenceType reference_type,
- Isolate* isolate);
- inline MaybeObjectHandle(Handle<Object> object,
- HeapObjectReferenceType reference_type);
-
- HeapObjectReferenceType reference_type_;
- MaybeHandle<Object> handle_;
-};
-
// ----------------------------------------------------------------------------
// A stack-allocated class that governs a number of local handles.
// After a handle scope has been created, all local handles will be
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index 32ff8e3fd8..9f13253219 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -37,14 +37,17 @@
V(call_string, "call") \
V(callee_string, "callee") \
V(caller_string, "caller") \
+ V(caseFirst_string, "caseFirst") \
V(cell_value_string, "%cell_value") \
V(char_at_string, "CharAt") \
V(closure_string, "(closure)") \
+ V(collation_string, "collation") \
V(column_string, "column") \
V(CompileError_string, "CompileError") \
V(configurable_string, "configurable") \
V(construct_string, "construct") \
V(constructor_string, "constructor") \
+ V(conjunction_string, "conjunction") \
V(create_string, "create") \
V(currency_string, "currency") \
V(Date_string, "Date") \
@@ -56,6 +59,7 @@
V(defineProperty_string, "defineProperty") \
V(deleteProperty_string, "deleteProperty") \
V(did_handle_string, "didHandle") \
+ V(disjunction_string, "disjunction") \
V(display_name_string, "displayName") \
V(done_string, "done") \
V(dot_catch_string, ".catch") \
@@ -69,6 +73,7 @@
V(enqueue_string, "enqueue") \
V(entries_string, "entries") \
V(enumerable_string, "enumerable") \
+ V(element_string, "element") \
V(era_string, "era") \
V(Error_string, "Error") \
V(error_to_string, "[object Error]") \
@@ -91,11 +96,13 @@
V(getOwnPropertyDescriptors_string, "getOwnPropertyDescriptors") \
V(getPrototypeOf_string, "getPrototypeOf") \
V(global_string, "global") \
+ V(globalThis_string, "globalThis") \
V(group_string, "group") \
V(groups_string, "groups") \
V(has_string, "has") \
V(hour_string, "hour") \
V(ignoreCase_string, "ignoreCase") \
+ V(ignorePunctuation_string, "ignorePunctuation") \
V(illegal_access_string, "illegal access") \
V(illegal_argument_string, "illegal argument") \
V(index_string, "index") \
@@ -179,10 +186,10 @@
V(Script_string, "Script") \
V(script_string, "script") \
V(short_string, "short") \
- V(style_string, "style") \
V(second_string, "second") \
- V(set_space_string, "set ") \
V(Set_string, "Set") \
+ V(sensitivity_string, "sensitivity") \
+ V(set_space_string, "set ") \
V(set_string, "set") \
V(SetIterator_string, "Set Iterator") \
V(setPrototypeOf_string, "setPrototypeOf") \
@@ -196,6 +203,7 @@
V(String_string, "String") \
V(string_string, "string") \
V(string_to_string, "[object String]") \
+ V(style_string, "style") \
V(symbol_species_string, "[Symbol.species]") \
V(Symbol_string, "Symbol") \
V(symbol_string, "symbol") \
@@ -220,6 +228,7 @@
V(unicode_string, "unicode") \
V(unit_string, "unit") \
V(URIError_string, "URIError") \
+ V(usage_string, "usage") \
V(use_asm_string, "use asm") \
V(use_strict_string, "use strict") \
V(value_string, "value") \
@@ -324,7 +333,6 @@
F(MC_CLEAR_SLOTS_BUFFER) \
F(MC_CLEAR_STORE_BUFFER) \
F(MC_CLEAR_STRING_TABLE) \
- F(MC_CLEAR_WEAK_CELLS) \
F(MC_CLEAR_WEAK_COLLECTIONS) \
F(MC_CLEAR_WEAK_LISTS) \
F(MC_CLEAR_WEAK_REFERENCES) \
@@ -349,6 +357,7 @@
F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
F(MC_MARK_WEAK_CLOSURE_HARMONY) \
+ F(MC_MARK_WRAPPERS) \
F(MC_MARK_WRAPPER_EPILOGUE) \
F(MC_MARK_WRAPPER_PROLOGUE) \
F(MC_MARK_WRAPPER_TRACING) \
diff --git a/deps/v8/src/heap/array-buffer-collector.cc b/deps/v8/src/heap/array-buffer-collector.cc
index bce22c39ba..2c28f46a85 100644
--- a/deps/v8/src/heap/array-buffer-collector.cc
+++ b/deps/v8/src/heap/array-buffer-collector.cc
@@ -6,6 +6,7 @@
#include "src/base/template-utils.h"
#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
namespace v8 {
diff --git a/deps/v8/src/heap/array-buffer-collector.h b/deps/v8/src/heap/array-buffer-collector.h
index b44af2f2ad..74a28c3d06 100644
--- a/deps/v8/src/heap/array-buffer-collector.h
+++ b/deps/v8/src/heap/array-buffer-collector.h
@@ -8,7 +8,7 @@
#include <vector>
#include "src/base/platform/mutex.h"
-#include "src/objects/js-array.h"
+#include "src/objects/js-array-buffer.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index 697d4405d8..e0d862aed7 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -10,6 +10,7 @@
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
#include "src/objects.h"
+#include "src/objects/js-array-buffer-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index 4f92e7e17c..0a158e3543 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -45,24 +45,25 @@ void LocalArrayBufferTracker::Process(Callback callback) {
tracker = target_page->local_tracker();
}
DCHECK_NOT_NULL(tracker);
- const size_t size = NumberToSize(new_buffer->byte_length());
+ const size_t length = it->second.length;
// We should decrement before adding to avoid potential overflows in
// the external memory counters.
DCHECK_EQ(it->first->is_wasm_memory(), it->second.is_wasm_memory);
old_page->DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kArrayBuffer, it->second.length);
- tracker->Add(new_buffer, size);
+ ExternalBackingStoreType::kArrayBuffer, length);
+ tracker->Add(new_buffer, length);
}
moved_memory += it->second.length;
} else if (result == kRemoveEntry) {
- freed_memory += it->second.length;
+ const size_t length = it->second.length;
+ freed_memory += length;
// We pass backing_store() and stored length to the collector for freeing
// the backing store. Wasm allocations will go through their own tracker
// based on the backing store.
backing_stores_to_free.push_back(it->second);
old_page->DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kArrayBuffer, it->second.length);
+ ExternalBackingStoreType::kArrayBuffer, length);
} else {
UNREACHABLE();
}
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index 347260dde0..e60fe6c6c0 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -10,7 +10,7 @@
#include "src/allocation.h"
#include "src/base/platform/mutex.h"
#include "src/globals.h"
-#include "src/objects/js-array.h"
+#include "src/objects/js-array-buffer.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/code-stats.cc b/deps/v8/src/heap/code-stats.cc
index 1753e29afd..5d8c2ab527 100644
--- a/deps/v8/src/heap/code-stats.cc
+++ b/deps/v8/src/heap/code-stats.cc
@@ -3,7 +3,9 @@
// found in the LICENSE file.
#include "src/heap/code-stats.h"
+
#include "src/objects-inl.h"
+#include "src/reloc-info.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 4d41df88af..f6eabbb021 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -310,7 +310,6 @@ class ConcurrentMarkingVisitor final
VisitPointer(map, HeapObject::RawMaybeWeakField(
map, Map::kTransitionsOrPrototypeInfoOffset));
VisitPointer(map, HeapObject::RawField(map, Map::kDependentCodeOffset));
- VisitPointer(map, HeapObject::RawField(map, Map::kWeakCellCacheOffset));
bailout_.Push(map);
}
return 0;
@@ -333,26 +332,6 @@ class ConcurrentMarkingVisitor final
return size;
}
- int VisitWeakCell(Map* map, WeakCell* object) {
- if (!ShouldVisit(object)) return 0;
- VisitMapPointer(object, object->map_slot());
- if (!object->cleared()) {
- HeapObject* value = HeapObject::cast(object->value());
- if (marking_state_.IsBlackOrGrey(value)) {
- // Weak cells with live values are directly processed here to reduce
- // the processing time of weak cells during the main GC pause.
- Object** slot = HeapObject::RawField(object, WeakCell::kValueOffset);
- MarkCompactCollector::RecordSlot(object, slot, value);
- } else {
- // If we do not know about liveness of values of weak cells, we have to
- // process them when we know the liveness of the whole transitive
- // closure.
- weak_objects_->weak_cells.Push(task_id_, object);
- }
- }
- return WeakCell::BodyDescriptor::SizeOf(map, object);
- }
-
int VisitJSWeakCollection(Map* map, JSWeakCollection* object) {
return VisitJSObjectSubclass(map, object);
}
@@ -648,7 +627,6 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
bailout_->FlushToGlobal(task_id);
on_hold_->FlushToGlobal(task_id);
- weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id);
weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
weak_objects_->current_ephemerons.FlushToGlobal(task_id);
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index d8659ec889..bf6d5f3b90 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -34,15 +34,17 @@ void LocalEmbedderHeapTracer::AbortTracing() {
void LocalEmbedderHeapTracer::EnterFinalPause() {
if (!InUse()) return;
- remote_tracer_->EnterFinalPause();
+ remote_tracer_->EnterFinalPause(embedder_stack_state_);
+ // Resetting to state unknown as there may be follow up garbage collections
+ // triggered from callbacks that have a different stack state.
+ embedder_stack_state_ = EmbedderHeapTracer::kUnknown;
}
-bool LocalEmbedderHeapTracer::Trace(
- double deadline, EmbedderHeapTracer::AdvanceTracingActions actions) {
- if (!InUse()) return false;
+bool LocalEmbedderHeapTracer::Trace(double deadline) {
+ if (!InUse()) return true;
DCHECK_EQ(0, NumberOfCachedWrappersToTrace());
- return remote_tracer_->AdvanceTracing(deadline, actions);
+ return remote_tracer_->AdvanceTracing(deadline);
}
bool LocalEmbedderHeapTracer::IsRemoteTracingDone() {
@@ -67,5 +69,12 @@ bool LocalEmbedderHeapTracer::RequiresImmediateWrapperProcessing() {
return cached_wrappers_to_trace_.size() > kTooManyWrappers;
}
+void LocalEmbedderHeapTracer::SetEmbedderStackStateForNextFinalization(
+ EmbedderHeapTracer::EmbedderStackState stack_state) {
+ if (!InUse()) return;
+
+ embedder_stack_state_ = stack_state;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 994695942b..ab8a46bb53 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -18,10 +18,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
public:
typedef std::pair<void*, void*> WrapperInfo;
- explicit LocalEmbedderHeapTracer(Isolate* isolate)
- : isolate_(isolate),
- remote_tracer_(nullptr),
- num_v8_marking_worklist_was_empty_(0) {}
+ explicit LocalEmbedderHeapTracer(Isolate* isolate) : isolate_(isolate) {}
~LocalEmbedderHeapTracer() {
if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
@@ -35,14 +32,13 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
remote_tracer_->isolate_ = reinterpret_cast<v8::Isolate*>(isolate_);
}
- bool InUse() { return remote_tracer_ != nullptr; }
+ bool InUse() const { return remote_tracer_ != nullptr; }
void TracePrologue();
void TraceEpilogue();
void AbortTracing();
void EnterFinalPause();
- bool Trace(double deadline,
- EmbedderHeapTracer::AdvanceTracingActions actions);
+ bool Trace(double deadline);
bool IsRemoteTracingDone();
size_t NumberOfCachedWrappersToTrace() {
@@ -68,13 +64,20 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
num_v8_marking_worklist_was_empty_ > kMaxIncrementalFixpointRounds;
}
+ void SetEmbedderStackStateForNextFinalization(
+ EmbedderHeapTracer::EmbedderStackState stack_state);
+
private:
typedef std::vector<WrapperInfo> WrapperCache;
Isolate* const isolate_;
- EmbedderHeapTracer* remote_tracer_;
WrapperCache cached_wrappers_to_trace_;
- size_t num_v8_marking_worklist_was_empty_;
+ EmbedderHeapTracer* remote_tracer_ = nullptr;
+ size_t num_v8_marking_worklist_was_empty_ = 0;
+ EmbedderHeapTracer::EmbedderStackState embedder_stack_state_ =
+ EmbedderHeapTracer::kUnknown;
+
+ friend class EmbedderStackStateScope;
};
} // namespace internal
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index 85f2679b3f..614c6ec174 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -7,6 +7,8 @@
#include "src/heap/factory.h"
+// Clients of this interface shouldn't depend on lots of heap internals.
+// Do not include anything from src/heap here!
#include "src/handles-inl.h"
#include "src/objects-inl.h"
#include "src/string-hasher.h"
@@ -162,8 +164,14 @@ Handle<Object> Factory::NewURIError() {
MessageTemplate::kURIMalformed);
}
-Handle<String> Factory::Uint32ToString(uint32_t value) {
- Handle<String> result = NumberToString(NewNumberFromUint(value));
+Handle<String> Factory::Uint32ToString(uint32_t value, bool check_cache) {
+ Handle<String> result;
+ int32_t int32v = static_cast<int32_t>(value);
+ if (int32v >= 0 && Smi::IsValid(int32v)) {
+ result = NumberToString(Smi::FromInt(int32v), check_cache);
+ } else {
+ result = NumberToString(NewNumberFromUint(value), check_cache);
+ }
if (result->length() <= String::kMaxArrayIndexSize &&
result->hash_field() == String::kEmptyHashField) {
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index a04e2e734b..c8528f9fdb 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -21,7 +21,9 @@
#include "src/objects/bigint.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/frame-array-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-generator-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/microtask-inl.h"
@@ -208,6 +210,7 @@ Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
result->set_prototype_users(*empty_weak_array_list());
result->set_registry_slot(PrototypeInfo::UNREGISTERED);
result->set_bit_field(0);
+ result->set_module_namespace(*undefined_value());
return result;
}
@@ -926,7 +929,7 @@ Handle<StringClass> Factory::InternalizeExternalString(Handle<String> string) {
isolate());
external_string->set_length(cast_string->length());
external_string->set_hash_field(cast_string->hash_field());
- external_string->set_resource(nullptr);
+ external_string->SetResource(isolate(), nullptr);
isolate()->heap()->RegisterExternalString(*external_string);
return external_string;
}
@@ -1250,7 +1253,7 @@ MaybeHandle<String> Factory::NewExternalStringFromOneByte(
ExternalOneByteString::cast(New(map, TENURED)), isolate());
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
- external_string->set_resource(resource);
+ external_string->SetResource(isolate(), resource);
isolate()->heap()->RegisterExternalString(*external_string);
return external_string;
@@ -1283,7 +1286,7 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
ExternalTwoByteString::cast(New(map, TENURED)), isolate());
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
- external_string->set_resource(resource);
+ external_string->SetResource(isolate(), resource);
isolate()->heap()->RegisterExternalString(*external_string);
return external_string;
@@ -1299,7 +1302,7 @@ Handle<ExternalOneByteString> Factory::NewNativeSourceString(
ExternalOneByteString::cast(New(map, TENURED)), isolate());
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
- external_string->set_resource(resource);
+ external_string->SetResource(isolate(), resource);
isolate()->heap()->RegisterExternalString(*external_string);
return external_string;
@@ -1350,23 +1353,19 @@ Handle<Symbol> Factory::NewPrivateFieldSymbol() {
return symbol;
}
-Handle<Context> Factory::NewNativeContext() {
- Handle<Context> context = NewFixedArrayWithMap<Context>(
+Handle<NativeContext> Factory::NewNativeContext() {
+ Handle<NativeContext> context = NewFixedArrayWithMap<NativeContext>(
Heap::kNativeContextMapRootIndex, Context::NATIVE_CONTEXT_SLOTS, TENURED);
context->set_native_context(*context);
context->set_errors_thrown(Smi::kZero);
context->set_math_random_index(Smi::kZero);
- Handle<WeakCell> weak_cell = NewWeakCell(context);
- context->set_self_weak_cell(*weak_cell);
context->set_serialized_objects(*empty_fixed_array());
- DCHECK(context->IsNativeContext());
return context;
}
-Handle<Context> Factory::NewScriptContext(Handle<Context> outer,
+Handle<Context> Factory::NewScriptContext(Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), SCRIPT_SCOPE);
- DCHECK(outer->IsNativeContext());
Handle<Context> context = NewFixedArrayWithMap<Context>(
Heap::kScriptContextMapRootIndex, scope_info->ContextLength(), TENURED);
context->set_scope_info(*scope_info);
@@ -1387,7 +1386,7 @@ Handle<ScriptContextTable> Factory::NewScriptContextTable() {
}
Handle<Context> Factory::NewModuleContext(Handle<Module> module,
- Handle<Context> outer,
+ Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), MODULE_SCOPE);
Handle<Context> context = NewFixedArrayWithMap<Context>(
@@ -1482,7 +1481,7 @@ Handle<Context> Factory::NewBlockContext(Handle<Context> previous,
return context;
}
-Handle<Context> Factory::NewBuiltinContext(Handle<Context> native_context,
+Handle<Context> Factory::NewBuiltinContext(Handle<NativeContext> native_context,
int length) {
DCHECK_GE(length, Context::MIN_CONTEXT_SLOTS);
Handle<Context> context =
@@ -1555,8 +1554,10 @@ Handle<Script> Factory::NewScriptWithId(Handle<String> source, int script_id,
SKIP_WRITE_BARRIER);
script->set_flags(0);
script->set_host_defined_options(*empty_fixed_array());
- heap->set_script_list(
- *FixedArrayOfWeakCells::Add(isolate(), script_list(), script));
+ Handle<WeakArrayList> scripts = script_list();
+ scripts = WeakArrayList::AddToEnd(isolate(), scripts,
+ MaybeObjectHandle::Weak(script));
+ heap->set_script_list(*scripts);
LOG(isolate(), ScriptEvent(Logger::ScriptEventType::kCreate, script_id));
return script;
}
@@ -1581,8 +1582,10 @@ Handle<Script> Factory::CloneScript(Handle<Script> script) {
new_script->set_eval_from_position(script->eval_from_position());
new_script->set_flags(script->flags());
new_script->set_host_defined_options(script->host_defined_options());
- heap->set_script_list(
- *FixedArrayOfWeakCells::Add(isolate(), script_list(), new_script));
+ Handle<WeakArrayList> scripts = script_list();
+ scripts = WeakArrayList::AddToEnd(isolate(), scripts,
+ MaybeObjectHandle::Weak(new_script));
+ heap->set_script_list(*scripts);
LOG(isolate(), ScriptEvent(Logger::ScriptEventType::kCreate, script_id));
return new_script;
}
@@ -1680,6 +1683,7 @@ Handle<BytecodeArray> Factory::NewBytecodeArray(
Handle<FixedTypedArrayBase> Factory::NewFixedTypedArrayWithExternalPointer(
int length, ExternalArrayType array_type, void* external_pointer,
PretenureFlag pretenure) {
+ // TODO(7881): Smi length check
DCHECK(0 <= length && length <= Smi::kMaxValue);
int size = FixedTypedArrayBase::kHeaderSize;
HeapObject* result = AllocateRawWithImmortalMap(
@@ -1695,6 +1699,7 @@ Handle<FixedTypedArrayBase> Factory::NewFixedTypedArrayWithExternalPointer(
Handle<FixedTypedArrayBase> Factory::NewFixedTypedArray(
size_t length, size_t byte_length, ExternalArrayType array_type,
bool initialize, PretenureFlag pretenure) {
+ // TODO(7881): Smi length check
DCHECK(0 <= length && length <= Smi::kMaxValue);
CHECK(byte_length <= kMaxInt - FixedTypedArrayBase::kDataOffset);
size_t size =
@@ -1761,7 +1766,7 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
HeapObject* result = AllocateRawWithImmortalMap(
PropertyCell::kSize, pretenure, *global_property_cell_map());
Handle<PropertyCell> cell(PropertyCell::cast(result), isolate());
- cell->set_dependent_code(DependentCode::cast(*empty_fixed_array()),
+ cell->set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
SKIP_WRITE_BARRIER);
cell->set_property_details(PropertyDetails(Smi::kZero));
cell->set_name(*name);
@@ -1769,19 +1774,6 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
return cell;
}
-Handle<WeakCell> Factory::NewWeakCell(Handle<HeapObject> value,
- PretenureFlag pretenure) {
- // It is safe to dereference the value because we are embedding it
- // in cell and not inspecting its fields.
- AllowDeferredHandleDereference convert_to_cell;
- STATIC_ASSERT(WeakCell::kSize <= kMaxRegularHeapObjectSize);
- HeapObject* result =
- AllocateRawWithImmortalMap(WeakCell::kSize, pretenure, *weak_cell_map());
- Handle<WeakCell> cell(WeakCell::cast(result), isolate());
- cell->initialize(*value);
- return cell;
-}
-
Handle<TransitionArray> Factory::NewTransitionArray(int number_of_transitions,
int slack) {
int capacity = TransitionArray::LengthFor(number_of_transitions + slack);
@@ -1850,9 +1842,8 @@ Map* Factory::InitializeMap(Map* map, InstanceType type, int instance_size,
map->set_inobject_properties_start_or_constructor_function_index(0);
map->set_prototype_validity_cell(Smi::FromInt(Map::kPrototypeChainValid));
}
- map->set_dependent_code(DependentCode::cast(*empty_fixed_array()),
+ map->set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
SKIP_WRITE_BARRIER);
- map->set_weak_cell_cache(Smi::kZero);
map->set_raw_transitions(MaybeObject::FromSmi(Smi::kZero));
map->SetInObjectUnusedPropertyFields(inobject_properties);
map->set_instance_descriptors(*empty_descriptor_array());
@@ -2311,7 +2302,7 @@ Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
DCHECK(!args.name_.is_null());
// Create the SharedFunctionInfo.
- Handle<Context> context(isolate()->native_context());
+ Handle<NativeContext> context(isolate()->native_context());
Handle<Map> map = args.GetMap(isolate());
Handle<SharedFunctionInfo> info =
NewSharedFunctionInfo(args.name_, args.maybe_exported_function_data_,
@@ -2392,8 +2383,8 @@ Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
// Make sure to use globals from the function's context, since the function
// can be from a different context.
- Handle<Context> native_context(function->context()->native_context(),
- isolate());
+ Handle<NativeContext> native_context(function->context()->native_context(),
+ isolate());
Handle<Map> new_map;
if (V8_UNLIKELY(IsAsyncGeneratorFunction(function->shared()->kind()))) {
new_map = handle(native_context->async_generator_object_prototype_map(),
@@ -2513,13 +2504,15 @@ Handle<PreParsedScopeData> Factory::NewPreParsedScopeData(int length) {
}
Handle<UncompiledDataWithoutPreParsedScope>
-Factory::NewUncompiledDataWithoutPreParsedScope(int32_t start_position,
+Factory::NewUncompiledDataWithoutPreParsedScope(Handle<String> inferred_name,
+ int32_t start_position,
int32_t end_position,
int32_t function_literal_id) {
Handle<UncompiledDataWithoutPreParsedScope> result(
UncompiledDataWithoutPreParsedScope::cast(
New(uncompiled_data_without_pre_parsed_scope_map(), TENURED)),
isolate());
+ result->set_inferred_name(*inferred_name);
result->set_start_position(start_position);
result->set_end_position(end_position);
result->set_function_literal_id(function_literal_id);
@@ -2530,12 +2523,14 @@ Factory::NewUncompiledDataWithoutPreParsedScope(int32_t start_position,
Handle<UncompiledDataWithPreParsedScope>
Factory::NewUncompiledDataWithPreParsedScope(
- int32_t start_position, int32_t end_position, int32_t function_literal_id,
+ Handle<String> inferred_name, int32_t start_position, int32_t end_position,
+ int32_t function_literal_id,
Handle<PreParsedScopeData> pre_parsed_scope_data) {
Handle<UncompiledDataWithPreParsedScope> result(
UncompiledDataWithPreParsedScope::cast(
New(uncompiled_data_with_pre_parsed_scope_map(), TENURED)),
isolate());
+ result->set_inferred_name(*inferred_name);
result->set_start_position(start_position);
result->set_end_position(end_position);
result->set_function_literal_id(function_literal_id);
@@ -2728,7 +2723,7 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
// allocation is on.
heap->incremental_marking()->ProcessBlackAllocatedObject(*new_code);
// Record all references to embedded objects in the new code object.
- heap->RecordWritesIntoCode(*new_code);
+ WriteBarrierForCode(*new_code);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) new_code->ObjectVerify(isolate());
@@ -2918,7 +2913,7 @@ Handle<JSObject> Factory::NewSlowJSObjectFromMap(Handle<Map> map, int capacity,
Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
PretenureFlag pretenure) {
- Context* native_context = isolate()->raw_native_context();
+ NativeContext* native_context = isolate()->raw_native_context();
Map* map = native_context->GetInitialJSArrayMap(elements_kind);
if (map == nullptr) {
JSFunction* array_function = native_context->array_function();
@@ -2985,7 +2980,7 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array, int length, int capacity,
}
Handle<JSWeakMap> Factory::NewJSWeakMap() {
- Context* native_context = isolate()->raw_native_context();
+ NativeContext* native_context = isolate()->raw_native_context();
Handle<Map> map(native_context->js_weak_map_fun()->initial_map(), isolate());
Handle<JSWeakMap> weakmap(JSWeakMap::cast(*NewJSObjectFromMap(map)),
isolate());
@@ -3124,10 +3119,10 @@ void Factory::TypeAndSizeForElementsKind(ElementsKind kind,
ExternalArrayType* array_type,
size_t* element_size) {
switch (kind) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
- *array_type = kExternal##Type##Array; \
- *element_size = size; \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
+ *array_type = kExternal##Type##Array; \
+ *element_size = sizeof(ctype); \
break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -3143,10 +3138,10 @@ static void ForFixedTypedArray(ExternalArrayType array_type,
size_t* element_size,
ElementsKind* element_kind) {
switch (array_type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- *element_size = size; \
- *element_kind = TYPE##_ELEMENTS; \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case kExternal##Type##Array: \
+ *element_size = sizeof(ctype); \
+ *element_kind = TYPE##_ELEMENTS; \
return;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -3156,10 +3151,10 @@ static void ForFixedTypedArray(ExternalArrayType array_type,
}
JSFunction* GetTypedArrayFun(ExternalArrayType type, Isolate* isolate) {
- Context* native_context = isolate->context()->native_context();
+ NativeContext* native_context = isolate->context()->native_context();
switch (type) {
-#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
+#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype) \
+ case kExternal##Type##Array: \
return native_context->type##_array_fun();
TYPED_ARRAYS(TYPED_ARRAY_FUN)
@@ -3169,10 +3164,10 @@ JSFunction* GetTypedArrayFun(ExternalArrayType type, Isolate* isolate) {
}
JSFunction* GetTypedArrayFun(ElementsKind elements_kind, Isolate* isolate) {
- Context* native_context = isolate->context()->native_context();
+ NativeContext* native_context = isolate->context()->native_context();
switch (elements_kind) {
-#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
+#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
return native_context->type##_array_fun();
TYPED_ARRAYS(TYPED_ARRAY_FUN)
@@ -3239,6 +3234,7 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
CHECK_EQ(byte_offset % element_size, 0);
CHECK(length <= (std::numeric_limits<size_t>::max() / element_size));
+ // TODO(7881): Smi length check
CHECK(length <= static_cast<size_t>(Smi::kMaxValue));
size_t byte_length = length * element_size;
SetupArrayBufferView(isolate(), obj, buffer, byte_offset, byte_length,
@@ -3271,6 +3267,7 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
CHECK(number_of_elements <=
(std::numeric_limits<size_t>::max() / element_size));
+ // TODO(7881): Smi length check
CHECK(number_of_elements <= static_cast<size_t>(Smi::kMaxValue));
size_t byte_length = number_of_elements * element_size;
@@ -3509,9 +3506,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_raw_outer_scope_info_or_feedback_metadata(
*empty_feedback_metadata(), SKIP_WRITE_BARRIER);
}
- share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
- share->set_function_identifier_or_debug_info(*undefined_value(),
- SKIP_WRITE_BARRIER);
+ share->set_script_or_debug_info(*undefined_value(), SKIP_WRITE_BARRIER);
#if V8_SFI_HAS_UNIQUE_ID
share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
#endif
@@ -3520,6 +3515,8 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_length(0);
share->set_internal_formal_parameter_count(0);
share->set_expected_nof_properties(0);
+ share->set_builtin_function_id(
+ BuiltinFunctionId::kInvalidBuiltinFunctionId);
share->set_raw_function_token_offset(0);
// All flags default to false or 0.
share->set_flags(0);
@@ -3529,9 +3526,10 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->clear_padding();
}
// Link into the list.
- Handle<Object> new_noscript_list = FixedArrayOfWeakCells::Add(
- isolate(), noscript_shared_function_infos(), share);
- isolate()->heap()->set_noscript_shared_function_infos(*new_noscript_list);
+ Handle<WeakArrayList> noscript_list = noscript_shared_function_infos();
+ noscript_list = WeakArrayList::AddToEnd(isolate(), noscript_list,
+ MaybeObjectHandle::Weak(share));
+ isolate()->heap()->set_noscript_shared_function_infos(*noscript_list);
#ifdef VERIFY_HEAP
share->SharedFunctionInfoVerify(isolate());
@@ -3539,68 +3537,90 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
return share;
}
-static inline int NumberCacheHash(Handle<FixedArray> cache,
- Handle<Object> number) {
+namespace {
+inline int NumberToStringCacheHash(Handle<FixedArray> cache, Smi* number) {
int mask = (cache->length() >> 1) - 1;
- if (number->IsSmi()) {
- return Handle<Smi>::cast(number)->value() & mask;
- } else {
- int64_t bits = bit_cast<int64_t>(number->Number());
- return (static_cast<int>(bits) ^ static_cast<int>(bits >> 32)) & mask;
+ return number->value() & mask;
+}
+inline int NumberToStringCacheHash(Handle<FixedArray> cache, double number) {
+ int mask = (cache->length() >> 1) - 1;
+ int64_t bits = bit_cast<int64_t>(number);
+ return (static_cast<int>(bits) ^ static_cast<int>(bits >> 32)) & mask;
+}
+} // namespace
+
+Handle<String> Factory::NumberToStringCacheSet(Handle<Object> number, int hash,
+ const char* string,
+ bool check_cache) {
+ // We tenure the allocated string since it is referenced from the
+ // number-string cache which lives in the old space.
+ Handle<String> js_string =
+ NewStringFromAsciiChecked(string, check_cache ? TENURED : NOT_TENURED);
+ if (!check_cache) return js_string;
+
+ if (!number_string_cache()->get(hash * 2)->IsUndefined(isolate())) {
+ int full_size = isolate()->heap()->MaxNumberToStringCacheSize();
+ if (number_string_cache()->length() != full_size) {
+ Handle<FixedArray> new_cache = NewFixedArray(full_size, TENURED);
+ isolate()->heap()->set_number_string_cache(*new_cache);
+ return js_string;
+ }
}
+ number_string_cache()->set(hash * 2, *number);
+ number_string_cache()->set(hash * 2 + 1, *js_string);
+ return js_string;
}
-Handle<Object> Factory::GetNumberStringCache(Handle<Object> number) {
+Handle<Object> Factory::NumberToStringCacheGet(Object* number, int hash) {
DisallowHeapAllocation no_gc;
- int hash = NumberCacheHash(number_string_cache(), number);
Object* key = number_string_cache()->get(hash * 2);
- if (key == *number || (key->IsHeapNumber() && number->IsHeapNumber() &&
- key->Number() == number->Number())) {
+ if (key == number || (key->IsHeapNumber() && number->IsHeapNumber() &&
+ key->Number() == number->Number())) {
return Handle<String>(
String::cast(number_string_cache()->get(hash * 2 + 1)), isolate());
}
return undefined_value();
}
-void Factory::SetNumberStringCache(Handle<Object> number,
- Handle<String> string) {
- int hash = NumberCacheHash(number_string_cache(), number);
- if (number_string_cache()->get(hash * 2) != *undefined_value()) {
- int full_size = isolate()->heap()->FullSizeNumberStringCacheLength();
- if (number_string_cache()->length() != full_size) {
- Handle<FixedArray> new_cache = NewFixedArray(full_size, TENURED);
- isolate()->heap()->set_number_string_cache(*new_cache);
- return;
- }
+Handle<String> Factory::NumberToString(Handle<Object> number,
+ bool check_cache) {
+ if (number->IsSmi()) return NumberToString(Smi::cast(*number), check_cache);
+
+ double double_value = Handle<HeapNumber>::cast(number)->value();
+ // Try to canonicalize doubles.
+ int smi_value;
+ if (DoubleToSmiInteger(double_value, &smi_value)) {
+ return NumberToString(Smi::FromInt(smi_value), check_cache);
}
- number_string_cache()->set(hash * 2, *number);
- number_string_cache()->set(hash * 2 + 1, *string);
-}
-Handle<String> Factory::NumberToString(Handle<Object> number,
- bool check_number_string_cache) {
- isolate()->counters()->number_to_string_runtime()->Increment();
- if (check_number_string_cache) {
- Handle<Object> cached = GetNumberStringCache(number);
+ int hash = 0;
+ if (check_cache) {
+ hash = NumberToStringCacheHash(number_string_cache(), double_value);
+ Handle<Object> cached = NumberToStringCacheGet(*number, hash);
if (!cached->IsUndefined(isolate())) return Handle<String>::cast(cached);
}
char arr[100];
Vector<char> buffer(arr, arraysize(arr));
- const char* str;
- if (number->IsSmi()) {
- int num = Handle<Smi>::cast(number)->value();
- str = IntToCString(num, buffer);
- } else {
- double num = Handle<HeapNumber>::cast(number)->value();
- str = DoubleToCString(num, buffer);
+ const char* string = DoubleToCString(double_value, buffer);
+
+ return NumberToStringCacheSet(number, hash, string, check_cache);
+}
+
+Handle<String> Factory::NumberToString(Smi* number, bool check_cache) {
+ int hash = 0;
+ if (check_cache) {
+ hash = NumberToStringCacheHash(number_string_cache(), number);
+ Handle<Object> cached = NumberToStringCacheGet(number, hash);
+ if (!cached->IsUndefined(isolate())) return Handle<String>::cast(cached);
}
- // We tenure the allocated string since it is referenced from the
- // number-string cache which lives in the old space.
- Handle<String> js_string = NewStringFromAsciiChecked(str, TENURED);
- SetNumberStringCache(number, js_string);
- return js_string;
+ char arr[100];
+ Vector<char> buffer(arr, arraysize(arr));
+ const char* string = IntToCString(number->value(), buffer);
+
+ return NumberToStringCacheSet(handle(number, isolate()), hash, string,
+ check_cache);
}
Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
@@ -3614,8 +3634,7 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
debug_info->set_debugger_hints(0);
DCHECK_EQ(DebugInfo::kNoDebuggingId, debug_info->debugging_id());
DCHECK(!shared->HasDebugInfo());
- debug_info->set_function_identifier(
- shared->function_identifier_or_debug_info());
+ debug_info->set_script(shared->script_or_debug_info());
debug_info->set_original_bytecode_array(
ReadOnlyRoots(heap).undefined_value());
debug_info->set_break_points(ReadOnlyRoots(heap).empty_fixed_array());
@@ -3708,50 +3727,48 @@ Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
return result;
}
-Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> native_context,
+Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<NativeContext> context,
int number_of_properties) {
- DCHECK(native_context->IsNativeContext());
- const int kMapCacheSize = 128;
+ if (number_of_properties == 0) {
+ // Reuse the initial map of the Object function if the literal has no
+ // predeclared properties.
+ return handle(context->object_function()->initial_map(), isolate());
+ }
+
// We do not cache maps for too many properties or when running builtin code.
if (isolate()->bootstrapper()->IsActive()) {
return Map::Create(isolate(), number_of_properties);
}
+
// Use initial slow object proto map for too many properties.
+ const int kMapCacheSize = 128;
if (number_of_properties > kMapCacheSize) {
- return handle(native_context->slow_object_with_object_prototype_map(),
- isolate());
- }
- if (number_of_properties == 0) {
- // Reuse the initial map of the Object function if the literal has no
- // predeclared properties.
- return handle(native_context->object_function()->initial_map(), isolate());
+ return handle(context->slow_object_with_object_prototype_map(), isolate());
}
int cache_index = number_of_properties - 1;
- Handle<Object> maybe_cache(native_context->map_cache(), isolate());
+ Handle<Object> maybe_cache(context->map_cache(), isolate());
if (maybe_cache->IsUndefined(isolate())) {
// Allocate the new map cache for the native context.
- maybe_cache = NewFixedArray(kMapCacheSize, TENURED);
- native_context->set_map_cache(*maybe_cache);
+ maybe_cache = NewWeakFixedArray(kMapCacheSize, TENURED);
+ context->set_map_cache(*maybe_cache);
} else {
// Check to see whether there is a matching element in the cache.
- Handle<FixedArray> cache = Handle<FixedArray>::cast(maybe_cache);
- Object* result = cache->get(cache_index);
- if (result->IsWeakCell()) {
- WeakCell* cell = WeakCell::cast(result);
- if (!cell->cleared()) {
- Map* map = Map::cast(cell->value());
- DCHECK(!map->is_dictionary_map());
- return handle(map, isolate());
- }
+ Handle<WeakFixedArray> cache = Handle<WeakFixedArray>::cast(maybe_cache);
+ MaybeObject* result = cache->Get(cache_index);
+ HeapObject* heap_object;
+ if (result->ToWeakHeapObject(&heap_object)) {
+ Map* map = Map::cast(heap_object);
+ DCHECK(!map->is_dictionary_map());
+ return handle(map, isolate());
}
}
+
// Create a new map and add it to the cache.
- Handle<FixedArray> cache = Handle<FixedArray>::cast(maybe_cache);
+ Handle<WeakFixedArray> cache = Handle<WeakFixedArray>::cast(maybe_cache);
Handle<Map> map = Map::Create(isolate(), number_of_properties);
DCHECK(!map->is_dictionary_map());
- Handle<WeakCell> cell = NewWeakCell(map);
- cache->set(cache_index, *cell);
+ cache->Set(cache_index, HeapObjectReference::Weak(*map));
return map;
}
@@ -3911,8 +3928,8 @@ Handle<Map> Factory::CreateSloppyFunctionMap(
if (IsFunctionModeWithName(function_mode)) {
// Add name field.
Handle<Name> name = isolate()->factory()->name_string();
- Descriptor d = Descriptor::DataField(name, field_index++, roc_attribs,
- Representation::Tagged());
+ Descriptor d = Descriptor::DataField(isolate(), name, field_index++,
+ roc_attribs, Representation::Tagged());
map->AppendDescriptor(&d);
} else {
@@ -3987,8 +4004,8 @@ Handle<Map> Factory::CreateStrictFunctionMap(
if (IsFunctionModeWithName(function_mode)) {
// Add name field.
Handle<Name> name = isolate()->factory()->name_string();
- Descriptor d = Descriptor::DataField(name, field_index++, roc_attribs,
- Representation::Tagged());
+ Descriptor d = Descriptor::DataField(isolate(), name, field_index++,
+ roc_attribs, Representation::Tagged());
map->AppendDescriptor(&d);
} else {
@@ -4002,8 +4019,8 @@ Handle<Map> Factory::CreateStrictFunctionMap(
if (IsFunctionModeWithHomeObject(function_mode)) {
// Add home object field.
Handle<Name> name = isolate()->factory()->home_object_symbol();
- Descriptor d = Descriptor::DataField(name, field_index++, DONT_ENUM,
- Representation::Tagged());
+ Descriptor d = Descriptor::DataField(isolate(), name, field_index++,
+ DONT_ENUM, Representation::Tagged());
map->AppendDescriptor(&d);
}
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index b73e8a922a..cd57b5bf87 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -5,14 +5,18 @@
#ifndef V8_HEAP_FACTORY_H_
#define V8_HEAP_FACTORY_H_
+// Clients of this interface shouldn't depend on lots of heap internals.
+// Do not include anything from src/heap here!
#include "src/builtins/builtins.h"
#include "src/globals.h"
#include "src/handles.h"
#include "src/heap/heap.h"
+#include "src/maybe-handles.h"
#include "src/messages.h"
#include "src/objects/code.h"
#include "src/objects/dictionary.h"
#include "src/objects/hash-table.h"
+#include "src/objects/js-array-buffer.h"
#include "src/objects/js-array.h"
#include "src/objects/js-regexp.h"
#include "src/objects/ordered-hash-table.h"
@@ -36,14 +40,17 @@ class DebugInfo;
class EnumCache;
class FreshlyAllocatedBigInt;
class Isolate;
+class JSGeneratorObject;
class JSMap;
class JSMapIterator;
class JSModuleNamespace;
+class JSProxy;
class JSSet;
class JSSetIterator;
class JSWeakMap;
class LoadHandler;
class ModuleInfo;
+class NativeContext;
class NewFunctionArgs;
class PreParsedScopeData;
class PromiseResolveThenableJobTask;
@@ -355,17 +362,18 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Symbol> NewPrivateFieldSymbol();
// Create a global (but otherwise uninitialized) context.
- Handle<Context> NewNativeContext();
+ Handle<NativeContext> NewNativeContext();
// Create a script context.
- Handle<Context> NewScriptContext(Handle<Context> outer,
+ Handle<Context> NewScriptContext(Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info);
// Create an empty script context table.
Handle<ScriptContextTable> NewScriptContextTable();
// Create a module context.
- Handle<Context> NewModuleContext(Handle<Module> module, Handle<Context> outer,
+ Handle<Context> NewModuleContext(Handle<Module> module,
+ Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info);
// Create a function or eval context.
@@ -397,7 +405,8 @@ class V8_EXPORT_PRIVATE Factory {
// These are similar to function context but don't have a previous
// context or any scope info. These are used to store spec defined
// context values.
- Handle<Context> NewBuiltinContext(Handle<Context> native_context, int length);
+ Handle<Context> NewBuiltinContext(Handle<NativeContext> native_context,
+ int length);
Handle<Struct> NewStruct(InstanceType type,
PretenureFlag pretenure = NOT_TENURED);
@@ -454,9 +463,6 @@ class V8_EXPORT_PRIVATE Factory {
Handle<PropertyCell> NewPropertyCell(Handle<Name> name,
PretenureFlag pretenure = TENURED);
- Handle<WeakCell> NewWeakCell(Handle<HeapObject> value,
- PretenureFlag pretenure = TENURED);
-
Handle<FeedbackCell> NewNoClosuresCell(Handle<HeapObject> value);
Handle<FeedbackCell> NewOneClosureCell(Handle<HeapObject> value);
Handle<FeedbackCell> NewManyClosuresCell(Handle<HeapObject> value);
@@ -727,12 +733,14 @@ class V8_EXPORT_PRIVATE Factory {
Handle<PreParsedScopeData> NewPreParsedScopeData(int length);
Handle<UncompiledDataWithoutPreParsedScope>
- NewUncompiledDataWithoutPreParsedScope(int32_t start_position,
+ NewUncompiledDataWithoutPreParsedScope(Handle<String> inferred_name,
+ int32_t start_position,
int32_t end_position,
int32_t function_literal_id);
Handle<UncompiledDataWithPreParsedScope> NewUncompiledDataWithPreParsedScope(
- int32_t start_position, int32_t end_position, int32_t function_literal_id,
+ Handle<String> inferred_name, int32_t start_position,
+ int32_t end_position, int32_t function_literal_id,
Handle<PreParsedScopeData>);
// Create an External object for V8's external API.
@@ -817,10 +825,11 @@ class V8_EXPORT_PRIVATE Factory {
DECLARE_ERROR(WasmRuntimeError)
#undef DECLARE_ERROR
- Handle<String> NumberToString(Handle<Object> number,
- bool check_number_string_cache = true);
+ Handle<String> NumberToString(Handle<Object> number, bool check_cache = true);
+ Handle<String> NumberToString(Smi* number, bool check_cache = true);
- inline Handle<String> Uint32ToString(uint32_t value);
+ inline Handle<String> Uint32ToString(uint32_t value,
+ bool check_cache = false);
#define ROOT_ACCESSOR(type, name, camel_name) inline Handle<type> name();
ROOT_LIST(ROOT_ACCESSOR)
@@ -908,7 +917,7 @@ class V8_EXPORT_PRIVATE Factory {
// Return a map for given number of properties using the map cache in the
// native context.
- Handle<Map> ObjectLiteralMapFromCache(Handle<Context> native_context,
+ Handle<Map> ObjectLiteralMapFromCache(Handle<NativeContext> native_context,
int number_of_properties);
Handle<LoadHandler> NewLoadHandler(int data_count);
@@ -995,10 +1004,11 @@ class V8_EXPORT_PRIVATE Factory {
// Attempt to find the number in a small cache. If we finds it, return
// the string representation of the number. Otherwise return undefined.
- Handle<Object> GetNumberStringCache(Handle<Object> number);
+ Handle<Object> NumberToStringCacheGet(Object* number, int hash);
// Update the cache with a new number-string pair.
- void SetNumberStringCache(Handle<Object> number, Handle<String> string);
+ Handle<String> NumberToStringCacheSet(Handle<Object> number, int hash,
+ const char* string, bool check_cache);
// Create a JSArray with no elements and no length.
Handle<JSArray> NewJSArray(ElementsKind elements_kind,
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 60a3b256c8..5ee7186c6a 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -665,7 +665,6 @@ void GCTracer::PrintNVP() const {
"clear.slots_buffer=%.1f "
"clear.store_buffer=%.1f "
"clear.string_table=%.1f "
- "clear.weak_cells=%.1f "
"clear.weak_collections=%.1f "
"clear.weak_lists=%.1f "
"clear.weak_references=%.1f "
@@ -762,7 +761,6 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_CLEAR_SLOTS_BUFFER],
current_.scopes[Scope::MC_CLEAR_STORE_BUFFER],
current_.scopes[Scope::MC_CLEAR_STRING_TABLE],
- current_.scopes[Scope::MC_CLEAR_WEAK_CELLS],
current_.scopes[Scope::MC_CLEAR_WEAK_COLLECTIONS],
current_.scopes[Scope::MC_CLEAR_WEAK_LISTS],
current_.scopes[Scope::MC_CLEAR_WEAK_REFERENCES],
diff --git a/deps/v8/src/heap/heap-controller.cc b/deps/v8/src/heap/heap-controller.cc
index 41f1a6bb3a..485b22902a 100644
--- a/deps/v8/src/heap/heap-controller.cc
+++ b/deps/v8/src/heap/heap-controller.cc
@@ -8,11 +8,6 @@
namespace v8 {
namespace internal {
-const double HeapController::kMinHeapGrowingFactor = 1.1;
-const double HeapController::kMaxHeapGrowingFactor = 4.0;
-const double HeapController::kConservativeHeapGrowingFactor = 1.3;
-const double HeapController::kTargetMutatorUtilization = 0.97;
-
// Given GC speed in bytes per ms, the allocation throughput in bytes per ms
// (mutator speed), this function returns the heap growing factor that will
// achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
@@ -52,74 +47,71 @@ const double HeapController::kTargetMutatorUtilization = 0.97;
// F * (1 - MU / (R * (1 - MU))) = 1
// F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
// F = R * (1 - MU) / (R * (1 - MU) - MU)
-double HeapController::HeapGrowingFactor(double gc_speed, double mutator_speed,
- double max_factor) {
- DCHECK_LE(kMinHeapGrowingFactor, max_factor);
- DCHECK_GE(kMaxHeapGrowingFactor, max_factor);
+double MemoryController::GrowingFactor(double gc_speed, double mutator_speed,
+ double max_factor) {
+ DCHECK_LE(kMinGrowingFactor, max_factor);
+ DCHECK_GE(kMaxGrowingFactor, max_factor);
if (gc_speed == 0 || mutator_speed == 0) return max_factor;
const double speed_ratio = gc_speed / mutator_speed;
- const double mu = kTargetMutatorUtilization;
- const double a = speed_ratio * (1 - mu);
- const double b = speed_ratio * (1 - mu) - mu;
+ const double a = speed_ratio * (1 - kTargetMutatorUtilization);
+ const double b =
+ speed_ratio * (1 - kTargetMutatorUtilization) - kTargetMutatorUtilization;
// The factor is a / b, but we need to check for small b first.
double factor = (a < b * max_factor) ? a / b : max_factor;
factor = Min(factor, max_factor);
- factor = Max(factor, kMinHeapGrowingFactor);
+ factor = Max(factor, kMinGrowingFactor);
return factor;
}
-double HeapController::MaxHeapGrowingFactor(size_t max_old_generation_size) {
+double MemoryController::MaxGrowingFactor(size_t curr_max_size) {
const double min_small_factor = 1.3;
const double max_small_factor = 2.0;
const double high_factor = 4.0;
- size_t max_old_generation_size_in_mb = max_old_generation_size / MB;
- max_old_generation_size_in_mb =
- Max(max_old_generation_size_in_mb,
- static_cast<size_t>(kMinOldGenerationSize));
+ size_t max_size_in_mb = curr_max_size / MB;
+ max_size_in_mb = Max(max_size_in_mb, kMinSize);
// If we are on a device with lots of memory, we allow a high heap
// growing factor.
- if (max_old_generation_size_in_mb >= kMaxOldGenerationSize) {
+ if (max_size_in_mb >= kMaxSize) {
return high_factor;
}
- DCHECK_GE(max_old_generation_size_in_mb, kMinOldGenerationSize);
- DCHECK_LT(max_old_generation_size_in_mb, kMaxOldGenerationSize);
+ DCHECK_GE(max_size_in_mb, kMinSize);
+ DCHECK_LT(max_size_in_mb, kMaxSize);
// On smaller devices we linearly scale the factor: (X-A)/(B-A)*(D-C)+C
- double factor = (max_old_generation_size_in_mb - kMinOldGenerationSize) *
+ double factor = (max_size_in_mb - kMinSize) *
(max_small_factor - min_small_factor) /
- (kMaxOldGenerationSize - kMinOldGenerationSize) +
+ (kMaxSize - kMinSize) +
min_small_factor;
return factor;
}
-size_t HeapController::CalculateOldGenerationAllocationLimit(
- size_t old_gen_size, size_t max_old_generation_size, double gc_speed,
- double mutator_speed, size_t new_space_capacity,
- Heap::HeapGrowingMode growing_mode) {
- double max_factor = MaxHeapGrowingFactor(max_old_generation_size);
- double factor = HeapGrowingFactor(gc_speed, mutator_speed, max_factor);
+size_t MemoryController::CalculateAllocationLimit(
+ size_t curr_size, size_t max_size, double gc_speed, double mutator_speed,
+ size_t new_space_capacity, Heap::HeapGrowingMode growing_mode) {
+ double max_factor = MaxGrowingFactor(max_size);
+ double factor = GrowingFactor(gc_speed, mutator_speed, max_factor);
if (FLAG_trace_gc_verbose) {
heap_->isolate()->PrintWithTimestamp(
- "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
+ "%s factor %.1f based on mu=%.3f, speed_ratio=%.f "
"(gc=%.f, mutator=%.f)\n",
- factor, kTargetMutatorUtilization, gc_speed / mutator_speed, gc_speed,
- mutator_speed);
+ ControllerName(), factor, kTargetMutatorUtilization,
+ gc_speed / mutator_speed, gc_speed, mutator_speed);
}
if (growing_mode == Heap::HeapGrowingMode::kConservative ||
growing_mode == Heap::HeapGrowingMode::kSlow) {
- factor = Min(factor, kConservativeHeapGrowingFactor);
+ factor = Min(factor, kConservativeGrowingFactor);
}
if (growing_mode == Heap::HeapGrowingMode::kMinimal) {
- factor = kMinHeapGrowingFactor;
+ factor = kMinGrowingFactor;
}
if (FLAG_heap_growing_percent > 0) {
@@ -127,26 +119,25 @@ size_t HeapController::CalculateOldGenerationAllocationLimit(
}
CHECK_LT(1.0, factor);
- CHECK_LT(0, old_gen_size);
- uint64_t limit = static_cast<uint64_t>(old_gen_size * factor);
- limit = Max(limit, static_cast<uint64_t>(old_gen_size) +
+ CHECK_LT(0, curr_size);
+ uint64_t limit = static_cast<uint64_t>(curr_size * factor);
+ limit = Max(limit, static_cast<uint64_t>(curr_size) +
MinimumAllocationLimitGrowingStep(growing_mode));
limit += new_space_capacity;
uint64_t halfway_to_the_max =
- (static_cast<uint64_t>(old_gen_size) + max_old_generation_size) / 2;
+ (static_cast<uint64_t>(curr_size) + max_size) / 2;
size_t result = static_cast<size_t>(Min(limit, halfway_to_the_max));
if (FLAG_trace_gc_verbose) {
heap_->isolate()->PrintWithTimestamp(
- "Heap Controller Limit: old size: %" PRIuS " KB, new limit: %" PRIuS
- " KB (%.1f)\n",
- old_gen_size / KB, result / KB, factor);
+ "%s Limit: old size: %" PRIuS " KB, new limit: %" PRIuS " KB (%.1f)\n",
+ ControllerName(), curr_size / KB, result / KB, factor);
}
return result;
}
-size_t HeapController::MinimumAllocationLimitGrowingStep(
+size_t MemoryController::MinimumAllocationLimitGrowingStep(
Heap::HeapGrowingMode growing_mode) {
const size_t kRegularAllocationLimitGrowingStep = 8;
const size_t kLowMemoryAllocationLimitGrowingStep = 2;
diff --git a/deps/v8/src/heap/heap-controller.h b/deps/v8/src/heap/heap-controller.h
index 717c97a5b8..8aae46c279 100644
--- a/deps/v8/src/heap/heap-controller.h
+++ b/deps/v8/src/heap/heap-controller.h
@@ -13,40 +13,65 @@
namespace v8 {
namespace internal {
-class HeapController {
+class V8_EXPORT_PRIVATE MemoryController {
public:
- explicit HeapController(Heap* heap) : heap_(heap) {}
+ MemoryController(Heap* heap, double min_growing_factor,
+ double max_growing_factor,
+ double conservative_growing_factor,
+ double target_mutator_utilization, size_t min_size,
+ size_t max_size)
+ : heap_(heap),
+ kMinGrowingFactor(min_growing_factor),
+ kMaxGrowingFactor(max_growing_factor),
+ kConservativeGrowingFactor(conservative_growing_factor),
+ kTargetMutatorUtilization(target_mutator_utilization),
+ kMinSize(min_size),
+ kMaxSize(max_size) {}
+ virtual ~MemoryController() {}
- // Computes the allocation limit to trigger the next full garbage collection.
- V8_EXPORT_PRIVATE size_t CalculateOldGenerationAllocationLimit(
- size_t old_gen_size, size_t max_old_generation_size, double gc_speed,
- double mutator_speed, size_t new_space_capacity,
- Heap::HeapGrowingMode growing_mode);
+ // Computes the allocation limit to trigger the next garbage collection.
+ size_t CalculateAllocationLimit(size_t curr_size, size_t max_size,
+ double gc_speed, double mutator_speed,
+ size_t new_space_capacity,
+ Heap::HeapGrowingMode growing_mode);
+ // Computes the growing step when the limit increases.
size_t MinimumAllocationLimitGrowingStep(Heap::HeapGrowingMode growing_mode);
- // The old space size has to be a multiple of Page::kPageSize.
+ protected:
+ double GrowingFactor(double gc_speed, double mutator_speed,
+ double max_factor);
+ double MaxGrowingFactor(size_t curr_max_size);
+ virtual const char* ControllerName() = 0;
+
+ Heap* const heap_;
+
+ const double kMinGrowingFactor;
+ const double kMaxGrowingFactor;
+ const double kConservativeGrowingFactor;
+ const double kTargetMutatorUtilization;
// Sizes are in MB.
- static const size_t kMinOldGenerationSize = 128 * Heap::kPointerMultiplier;
- static const size_t kMaxOldGenerationSize = 1024 * Heap::kPointerMultiplier;
+ const size_t kMinSize;
+ const size_t kMaxSize;
- private:
- FRIEND_TEST(HeapController, HeapGrowingFactor);
- FRIEND_TEST(HeapController, MaxHeapGrowingFactor);
+ FRIEND_TEST(HeapControllerTest, HeapGrowingFactor);
+ FRIEND_TEST(HeapControllerTest, MaxHeapGrowingFactor);
+ FRIEND_TEST(HeapControllerTest, MaxOldGenerationSize);
FRIEND_TEST(HeapControllerTest, OldGenerationAllocationLimit);
+};
- V8_EXPORT_PRIVATE static const double kMinHeapGrowingFactor;
- V8_EXPORT_PRIVATE static const double kMaxHeapGrowingFactor;
- V8_EXPORT_PRIVATE static const double kConservativeHeapGrowingFactor;
- V8_EXPORT_PRIVATE static double MaxHeapGrowingFactor(
- size_t max_old_generation_size);
- V8_EXPORT_PRIVATE static double HeapGrowingFactor(double gc_speed,
- double mutator_speed,
- double max_factor);
+class HeapController : public MemoryController {
+ public:
+ explicit HeapController(Heap* heap)
+ : MemoryController(heap, 1.1, 4.0, 1.3, 0.97, kMinHeapSize,
+ kMaxHeapSize) {}
- static const double kTargetMutatorUtilization;
+ // Sizes are in MB.
+ static const size_t kMinHeapSize = 128 * Heap::kPointerMultiplier;
+ static const size_t kMaxHeapSize = 1024 * Heap::kPointerMultiplier;
- Heap* heap_;
+ protected:
+ const char* ControllerName() { return "HeapController"; }
};
} // namespace internal
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 5ad1a1bdd6..62f07ea322 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -8,17 +8,18 @@
#include <cmath>
// Clients of this interface shouldn't depend on lots of heap internals.
-// Do not include anything from src/heap other than src/heap/heap.h here!
+// Do not include anything from src/heap other than src/heap/heap.h and its
+// write barrier here!
+#include "src/heap/heap-write-barrier.h"
#include "src/heap/heap.h"
#include "src/base/platform/platform.h"
#include "src/counters-inl.h"
#include "src/feedback-vector.h"
-// TODO(mstarzinger): There are 3 more includes to remove in order to no longer
+
+// TODO(mstarzinger): There is one more include to remove in order to no longer
// leak heap internals to users of this interface!
-#include "src/heap/incremental-marking-inl.h"
#include "src/heap/spaces-inl.h"
-#include "src/heap/store-buffer.h"
#include "src/isolate.h"
#include "src/log.h"
#include "src/msan.h"
@@ -32,6 +33,12 @@
#include "src/string-hasher.h"
#include "src/zone/zone-list-inl.h"
+// The following header includes the write barrier essentials that can also be
+// used stand-alone without including heap-inl.h.
+// TODO(mlippautz): Remove once users of object-macros.h include this file on
+// their own.
+#include "src/heap/heap-write-barrier-inl.h"
+
namespace v8 {
namespace internal {
@@ -279,12 +286,33 @@ void Heap::UpdateAllocationsHash(uint32_t value) {
void Heap::RegisterExternalString(String* string) {
+ DCHECK(string->IsExternalString());
+ DCHECK(!string->IsThinString());
external_string_table_.AddString(string);
}
+void Heap::UpdateExternalString(String* string, size_t old_payload,
+ size_t new_payload) {
+ DCHECK(string->IsExternalString());
+ Page* page = Page::FromHeapObject(string);
+
+ if (old_payload > new_payload)
+ page->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kExternalString, old_payload - new_payload);
+ else
+ page->IncrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kExternalString, new_payload - old_payload);
+}
void Heap::FinalizeExternalString(String* string) {
DCHECK(string->IsExternalString());
+ Page* page = Page::FromHeapObject(string);
+ ExternalString* ext_string = ExternalString::cast(string);
+
+ page->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kExternalString,
+ ext_string->ExternalPayloadSize());
+
v8::String::ExternalStringResourceBase** resource_addr =
reinterpret_cast<v8::String::ExternalStringResourceBase**>(
reinterpret_cast<byte*>(string) + ExternalString::kResourceOffset -
@@ -399,40 +427,6 @@ bool Heap::ShouldBePromoted(Address old_address) {
(!page->ContainsLimit(age_mark) || old_address < age_mark);
}
-void Heap::RecordWrite(Object* object, Object** slot, Object* value) {
- DCHECK(!HasWeakHeapObjectTag(*slot));
- DCHECK(!HasWeakHeapObjectTag(value));
- DCHECK(object->IsHeapObject()); // Can't write to slots of a Smi.
- if (!InNewSpace(value) || InNewSpace(HeapObject::cast(object))) return;
- store_buffer()->InsertEntry(reinterpret_cast<Address>(slot));
-}
-
-void Heap::RecordWrite(Object* object, MaybeObject** slot, MaybeObject* value) {
- if (!InNewSpace(value) || !object->IsHeapObject() || InNewSpace(object)) {
- return;
- }
- store_buffer()->InsertEntry(reinterpret_cast<Address>(slot));
-}
-
-void Heap::RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value) {
- if (InNewSpace(value)) {
- RecordWriteIntoCodeSlow(host, rinfo, value);
- }
-}
-
-void Heap::RecordFixedArrayElements(FixedArray* array, int offset, int length) {
- if (InNewSpace(array)) return;
- for (int i = 0; i < length; i++) {
- if (!InNewSpace(array->get(offset + i))) continue;
- store_buffer()->InsertEntry(
- reinterpret_cast<Address>(array->RawFieldOfElementAt(offset + i)));
- }
-}
-
-Address* Heap::store_buffer_top_address() {
- return store_buffer()->top_address();
-}
-
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src),
static_cast<size_t>(byte_size / kPointerSize));
@@ -530,6 +524,8 @@ Isolate* Heap::isolate() {
void Heap::ExternalStringTable::AddString(String* string) {
DCHECK(string->IsExternalString());
+ DCHECK(!Contains(string));
+
if (InNewSpace(string)) {
new_space_strings_.push_back(string);
} else {
@@ -573,6 +569,18 @@ int Heap::GetNextTemplateSerialNumber() {
return next_serial_number;
}
+int Heap::MaxNumberToStringCacheSize() const {
+ // Compute the size of the number string cache based on the max newspace size.
+ // The number string cache has a minimum size based on twice the initial cache
+ // size to ensure that it is bigger after being made 'full size'.
+ size_t number_string_cache_size = max_semi_space_size_ / 512;
+ number_string_cache_size =
+ Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
+ Min<size_t>(0x4000u, number_string_cache_size));
+ // There is a string and a number per entry so the length is twice the number
+ // of entries.
+ return static_cast<int>(number_string_cache_size * 2);
+}
AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
: heap_(isolate->heap()) {
heap_->always_allocate_scope_count_++;
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
new file mode 100644
index 0000000000..1e4550679c
--- /dev/null
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -0,0 +1,157 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_HEAP_WRITE_BARRIER_INL_H_
+#define V8_HEAP_HEAP_WRITE_BARRIER_INL_H_
+
+// Clients of this interface shouldn't depend on lots of heap internals.
+// Do not include anything from src/heap here!
+
+#include "src/heap/heap-write-barrier.h"
+
+#include "src/globals.h"
+#include "src/objects-inl.h"
+#include "src/objects/maybe-object-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// Do not use these internal details anywhere outside of this file. These
+// internals are only intended to shortcut write barrier checks.
+namespace heap_internals {
+
+struct MemoryChunk {
+ static constexpr uintptr_t kFlagsOffset = sizeof(size_t);
+ static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18;
+ static constexpr uintptr_t kFromSpaceBit = uintptr_t{1} << 3;
+ static constexpr uintptr_t kToSpaceBit = uintptr_t{1} << 4;
+
+ V8_INLINE static heap_internals::MemoryChunk* FromHeapObject(
+ HeapObject* object) {
+ return reinterpret_cast<MemoryChunk*>(reinterpret_cast<Address>(object) &
+ ~kPageAlignmentMask);
+ }
+
+ V8_INLINE bool IsMarking() const { return GetFlags() & kMarkingBit; }
+
+ V8_INLINE bool InNewSpace() const {
+ constexpr uintptr_t kNewSpaceMask = kFromSpaceBit | kToSpaceBit;
+ return GetFlags() & kNewSpaceMask;
+ }
+
+ V8_INLINE uintptr_t GetFlags() const {
+ return *reinterpret_cast<const uintptr_t*>(
+ reinterpret_cast<const uint8_t*>(this) + kFlagsOffset);
+ }
+};
+
+inline void GenerationalBarrierInternal(HeapObject* object, Address slot,
+ HeapObject* value) {
+ DCHECK(Heap::PageFlagsAreConsistent(object));
+ heap_internals::MemoryChunk* value_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(value);
+ heap_internals::MemoryChunk* object_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+
+ if (!value_chunk->InNewSpace() || object_chunk->InNewSpace()) return;
+
+ Heap::GenerationalBarrierSlow(object, slot, value);
+}
+
+inline void MarkingBarrierInternal(HeapObject* object, Address slot,
+ HeapObject* value) {
+ DCHECK(Heap::PageFlagsAreConsistent(object));
+ heap_internals::MemoryChunk* value_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(value);
+
+ if (!value_chunk->IsMarking()) return;
+
+ Heap::MarkingBarrierSlow(object, slot, value);
+}
+
+} // namespace heap_internals
+
+inline void WriteBarrierForCode(Code* host, RelocInfo* rinfo, Object* value) {
+ DCHECK(!HasWeakHeapObjectTag(value));
+ if (!value->IsHeapObject()) return;
+ HeapObject* object = HeapObject::cast(value);
+ GenerationalBarrierForCode(host, rinfo, object);
+ MarkingBarrierForCode(host, rinfo, object);
+}
+
+inline void WriteBarrierForCode(Code* host) {
+ Heap::WriteBarrierForCodeSlow(host);
+}
+
+inline void GenerationalBarrier(HeapObject* object, Object** slot,
+ Object* value) {
+ DCHECK(!HasWeakHeapObjectTag(*slot));
+ DCHECK(!HasWeakHeapObjectTag(value));
+ if (!value->IsHeapObject()) return;
+ heap_internals::GenerationalBarrierInternal(
+ object, reinterpret_cast<Address>(slot), HeapObject::cast(value));
+}
+
+inline void GenerationalBarrier(HeapObject* object, MaybeObject** slot,
+ MaybeObject* value) {
+ HeapObject* value_heap_object;
+ if (!value->ToStrongOrWeakHeapObject(&value_heap_object)) return;
+ heap_internals::GenerationalBarrierInternal(
+ object, reinterpret_cast<Address>(slot), value_heap_object);
+}
+
+inline void GenerationalBarrierForElements(Heap* heap, FixedArray* array,
+ int offset, int length) {
+ heap_internals::MemoryChunk* array_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(array);
+ if (array_chunk->InNewSpace()) return;
+
+ Heap::GenerationalBarrierForElementsSlow(heap, array, offset, length);
+}
+
+inline void GenerationalBarrierForCode(Code* host, RelocInfo* rinfo,
+ HeapObject* object) {
+ heap_internals::MemoryChunk* object_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+ if (!object_chunk->InNewSpace()) return;
+ Heap::GenerationalBarrierForCodeSlow(host, rinfo, object);
+}
+
+inline void MarkingBarrier(HeapObject* object, Object** slot, Object* value) {
+ DCHECK_IMPLIES(slot != nullptr, !HasWeakHeapObjectTag(*slot));
+ DCHECK(!HasWeakHeapObjectTag(value));
+ if (!value->IsHeapObject()) return;
+ heap_internals::MarkingBarrierInternal(
+ object, reinterpret_cast<Address>(slot), HeapObject::cast(value));
+}
+
+inline void MarkingBarrier(HeapObject* object, MaybeObject** slot,
+ MaybeObject* value) {
+ HeapObject* value_heap_object;
+ if (!value->ToStrongOrWeakHeapObject(&value_heap_object)) return;
+ heap_internals::MarkingBarrierInternal(
+ object, reinterpret_cast<Address>(slot), value_heap_object);
+}
+
+inline void MarkingBarrierForElements(Heap* heap, HeapObject* object) {
+ heap_internals::MemoryChunk* object_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+ if (!object_chunk->IsMarking()) return;
+
+ Heap::MarkingBarrierForElementsSlow(heap, object);
+}
+
+inline void MarkingBarrierForCode(Code* host, RelocInfo* rinfo,
+ HeapObject* object) {
+ DCHECK(!HasWeakHeapObjectTag(object));
+ heap_internals::MemoryChunk* object_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+ if (!object_chunk->IsMarking()) return;
+ Heap::MarkingBarrierForCodeSlow(host, rinfo, object);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_HEAP_WRITE_BARRIER_INL_H_
diff --git a/deps/v8/src/heap/heap-write-barrier.h b/deps/v8/src/heap/heap-write-barrier.h
new file mode 100644
index 0000000000..4eaeaae8a4
--- /dev/null
+++ b/deps/v8/src/heap/heap-write-barrier.h
@@ -0,0 +1,51 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_HEAP_WRITE_BARRIER_H_
+#define V8_HEAP_HEAP_WRITE_BARRIER_H_
+
+namespace v8 {
+namespace internal {
+
+class Code;
+class FixedArray;
+class Heap;
+class HeapObject;
+class MaybeObject;
+class Object;
+class RelocInfo;
+
+// Note: In general it is preferred to use the macros defined in
+// object-macros.h.
+
+// Write barrier for FixedArray elements.
+#define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
+ do { \
+ GenerationalBarrierForElements(heap, array, start, length); \
+ MarkingBarrierForElements(heap, array); \
+ } while (false)
+
+// Combined write barriers.
+void WriteBarrierForCode(Code* host, RelocInfo* rinfo, Object* value);
+void WriteBarrierForCode(Code* host);
+
+// Generational write barrier.
+void GenerationalBarrier(HeapObject* object, Object** slot, Object* value);
+void GenerationalBarrier(HeapObject* object, MaybeObject** slot,
+ MaybeObject* value);
+void GenerationalBarrierForElements(Heap* heap, FixedArray* array, int offset,
+ int length);
+void GenerationalBarrierForCode(Code* host, RelocInfo* rinfo,
+ HeapObject* object);
+
+// Marking write barrier.
+void MarkingBarrier(HeapObject* object, Object** slot, Object* value);
+void MarkingBarrier(HeapObject* object, MaybeObject** slot, MaybeObject* value);
+void MarkingBarrierForElements(Heap* heap, HeapObject* object);
+void MarkingBarrierForCode(Code* host, RelocInfo* rinfo, HeapObject* object);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_HEAP_WRITE_BARRIER_H_
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 6fd93f659f..2ec30635be 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -8,7 +8,7 @@
#include <unordered_set>
#include "src/accessors.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/assembler-inl.h"
#include "src/ast/context-slot-cache.h"
#include "src/base/bits.h"
@@ -31,6 +31,7 @@
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-controller.h"
+#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/item-parallel-job.h"
#include "src/heap/mark-compact-inl.h"
@@ -258,8 +259,8 @@ size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
size_t computed_size = static_cast<size_t>(physical_memory / i::MB /
old_space_physical_memory_factor *
kPointerMultiplier);
- return Max(Min(computed_size, HeapController::kMaxOldGenerationSize),
- HeapController::kMinOldGenerationSize);
+ return Max(Min(computed_size, HeapController::kMaxHeapSize),
+ HeapController::kMinHeapSize);
}
size_t Heap::Capacity() {
@@ -513,22 +514,25 @@ void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
if (!FLAG_track_retaining_path) {
PrintF("Retaining path tracking requires --track-retaining-path\n");
} else {
- int index = 0;
- Handle<FixedArrayOfWeakCells> array = FixedArrayOfWeakCells::Add(
- isolate(), handle(retaining_path_targets(), isolate()), object, &index);
+ Handle<WeakArrayList> array(retaining_path_targets(), isolate());
+ int index = array->length();
+ array = WeakArrayList::AddToEnd(isolate(), array,
+ MaybeObjectHandle::Weak(object));
set_retaining_path_targets(*array);
+ DCHECK_EQ(array->length(), index + 1);
retaining_path_target_option_[index] = option;
}
}
bool Heap::IsRetainingPathTarget(HeapObject* object,
RetainingPathOption* option) {
- if (!retaining_path_targets()->IsFixedArrayOfWeakCells()) return false;
- FixedArrayOfWeakCells* targets =
- FixedArrayOfWeakCells::cast(retaining_path_targets());
- int length = targets->Length();
+ WeakArrayList* targets = retaining_path_targets();
+ int length = targets->length();
+ MaybeObject* object_to_check = HeapObjectReference::Weak(object);
for (int i = 0; i < length; i++) {
- if (targets->Get(i) == object) {
+ MaybeObject* target = targets->Get(i);
+ DCHECK(target->IsWeakOrClearedHeapObject());
+ if (target == object_to_check) {
DCHECK(retaining_path_target_option_.count(i));
*option = retaining_path_target_option_[i];
return true;
@@ -1038,29 +1042,6 @@ void Heap::GarbageCollectionEpilogue() {
}
}
-
-void Heap::PreprocessStackTraces() {
- FixedArrayOfWeakCells::Iterator iterator(weak_stack_trace_list());
- FixedArray* elements;
- while ((elements = iterator.Next<FixedArray>()) != nullptr) {
- for (int j = 1; j < elements->length(); j += 4) {
- Object* maybe_code = elements->get(j + 2);
- // If GC happens while adding a stack trace to the weak fixed array,
- // which has been copied into a larger backing store, we may run into
- // a stack trace that has already been preprocessed. Guard against this.
- if (!maybe_code->IsAbstractCode()) break;
- AbstractCode* abstract_code = AbstractCode::cast(maybe_code);
- int offset = Smi::ToInt(elements->get(j + 3));
- int pos = abstract_code->SourcePosition(offset);
- elements->set(j + 2, Smi::FromInt(pos));
- }
- }
- // We must not compact the weak fixed list here, as we may be in the middle
- // of writing to it, when the GC triggered. Instead, we reset the root value.
- set_weak_stack_trace_list(Smi::kZero);
-}
-
-
class GCCallbacksScope {
public:
explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
@@ -1175,8 +1156,7 @@ intptr_t CompareWords(int size, HeapObject* a, HeapObject* b) {
return 0;
}
-void ReportDuplicates(Isolate* isolate, int size,
- std::vector<HeapObject*>& objects) {
+void ReportDuplicates(int size, std::vector<HeapObject*>& objects) {
if (objects.size() == 0) return;
sort(objects.begin(), objects.end(), [size](HeapObject* a, HeapObject* b) {
@@ -1274,7 +1254,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
}
for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
++it) {
- ReportDuplicates(isolate(), it->first, it->second);
+ ReportDuplicates(it->first, it->second);
}
}
}
@@ -1805,7 +1785,7 @@ bool Heap::PerformGarbageCollection(
external_memory_at_last_mark_compact_ = external_memory_;
external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
- size_t new_limit = heap_controller()->CalculateOldGenerationAllocationLimit(
+ size_t new_limit = heap_controller()->CalculateAllocationLimit(
old_gen_size, max_old_generation_size_, gc_speed, mutator_speed,
new_space()->Capacity(), CurrentHeapGrowingMode());
old_generation_allocation_limit_ = new_limit;
@@ -1814,7 +1794,7 @@ bool Heap::PerformGarbageCollection(
old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
} else if (HasLowYoungGenerationAllocationRate() &&
old_generation_size_configured_) {
- size_t new_limit = heap_controller()->CalculateOldGenerationAllocationLimit(
+ size_t new_limit = heap_controller()->CalculateAllocationLimit(
old_gen_size, max_old_generation_size_, gc_speed, mutator_speed,
new_space()->Capacity(), CurrentHeapGrowingMode());
if (new_limit < old_generation_allocation_limit_) {
@@ -1925,7 +1905,6 @@ void Heap::MarkCompactEpilogue() {
incremental_marking()->Epilogue();
- PreprocessStackTraces();
DCHECK(incremental_marking()->IsStopped());
}
@@ -2296,6 +2275,25 @@ void Heap::ProtectUnprotectedMemoryChunks() {
unprotected_memory_chunks_.clear();
}
+bool Heap::ExternalStringTable::Contains(HeapObject* obj) {
+ for (size_t i = 0; i < new_space_strings_.size(); ++i) {
+ if (new_space_strings_[i] == obj) return true;
+ }
+ for (size_t i = 0; i < old_space_strings_.size(); ++i) {
+ if (old_space_strings_[i] == obj) return true;
+ }
+ return false;
+}
+
+void Heap::ProcessMovedExternalString(Page* old_page, Page* new_page,
+ ExternalString* string) {
+ size_t size = string->ExternalPayloadSize();
+ new_page->IncrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kExternalString, size);
+ old_page->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kExternalString, size);
+}
+
String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
Object** p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
@@ -2313,24 +2311,70 @@ String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
}
// String is still reachable.
- String* string = String::cast(first_word.ToForwardingAddress());
- if (string->IsThinString()) string = ThinString::cast(string)->actual();
+ String* new_string = String::cast(first_word.ToForwardingAddress());
+ String* original_string = reinterpret_cast<String*>(*p);
+ // The length of the original string is used to disambiguate the scenario
+ // of a ThingString being forwarded to an ExternalString (which already exists
+ // in the OLD space), and an ExternalString being forwarded to its promoted
+ // copy. See Scavenger::EvacuateThinString.
+ if (new_string->IsThinString() || original_string->length() == 0) {
+ // Filtering Thin strings out of the external string table.
+ return nullptr;
+ } else if (new_string->IsExternalString()) {
+ heap->ProcessMovedExternalString(
+ Page::FromAddress(reinterpret_cast<Address>(*p)),
+ Page::FromHeapObject(new_string), ExternalString::cast(new_string));
+ return new_string;
+ }
+
// Internalization can replace external strings with non-external strings.
- return string->IsExternalString() ? string : nullptr;
+ return new_string->IsExternalString() ? new_string : nullptr;
}
-void Heap::ExternalStringTable::Verify() {
+void Heap::ExternalStringTable::VerifyNewSpace() {
#ifdef DEBUG
+ std::set<String*> visited_map;
+ std::map<MemoryChunk*, size_t> size_map;
+ ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
for (size_t i = 0; i < new_space_strings_.size(); ++i) {
- Object* obj = Object::cast(new_space_strings_[i]);
- DCHECK(InNewSpace(obj));
+ String* obj = String::cast(new_space_strings_[i]);
+ MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
+ DCHECK(mc->InNewSpace());
+ DCHECK(heap_->InNewSpace(obj));
DCHECK(!obj->IsTheHole(heap_->isolate()));
- }
+ DCHECK(obj->IsExternalString());
+ // Note: we can have repeated elements in the table.
+ DCHECK_EQ(0, visited_map.count(obj));
+ visited_map.insert(obj);
+ size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
+ }
+ for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
+ it != size_map.end(); it++)
+ DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
+#endif
+}
+
+void Heap::ExternalStringTable::Verify() {
+#ifdef DEBUG
+ std::set<String*> visited_map;
+ std::map<MemoryChunk*, size_t> size_map;
+ ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
+ VerifyNewSpace();
for (size_t i = 0; i < old_space_strings_.size(); ++i) {
- Object* obj = Object::cast(old_space_strings_[i]);
- DCHECK(!InNewSpace(obj));
+ String* obj = String::cast(old_space_strings_[i]);
+ MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
+ DCHECK(!mc->InNewSpace());
+ DCHECK(!heap_->InNewSpace(obj));
DCHECK(!obj->IsTheHole(heap_->isolate()));
- }
+ DCHECK(obj->IsExternalString());
+ // Note: we can have repeated elements in the table.
+ DCHECK_EQ(0, visited_map.count(obj));
+ visited_map.insert(obj);
+ size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
+ }
+ for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
+ it != size_map.end(); it++)
+ DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
#endif
}
@@ -2363,7 +2407,7 @@ void Heap::ExternalStringTable::UpdateNewSpaceReferences(
new_space_strings_.resize(static_cast<size_t>(last - start));
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- Verify();
+ VerifyNewSpace();
}
#endif
}
@@ -2661,7 +2705,6 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kRetainingPathTargetsRootIndex:
case kFeedbackVectorsForProfilingToolsRootIndex:
case kNoScriptSharedFunctionInfosRootIndex:
- case kWeakStackTraceListRootIndex:
case kSerializedObjectsRootIndex:
case kSerializedGlobalProxySizesRootIndex:
case kPublicSymbolTableRootIndex:
@@ -2691,19 +2734,6 @@ bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
return can_be;
}
-int Heap::FullSizeNumberStringCacheLength() {
- // Compute the size of the number string cache based on the max newspace size.
- // The number string cache has a minimum size based on twice the initial cache
- // size to ensure that it is bigger after being made 'full size'.
- size_t number_string_cache_size = max_semi_space_size_ / 512;
- number_string_cache_size =
- Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
- Min<size_t>(0x4000u, number_string_cache_size));
- // There is a string and a number per entry so the length is twice the number
- // of entries.
- return static_cast<int>(number_string_cache_size * 2);
-}
-
void Heap::FlushNumberStringCache() {
// Flush the number to string cache.
@@ -2717,8 +2747,8 @@ namespace {
Heap::RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type) {
switch (array_type) {
-#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
+#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype) \
+ case kExternal##Type##Array: \
return Heap::kFixed##Type##ArrayMapRootIndex;
TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
@@ -2729,8 +2759,8 @@ Heap::RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type) {
Heap::RootListIndex RootIndexForFixedTypedArray(ElementsKind elements_kind) {
switch (elements_kind) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
return Heap::kFixed##Type##ArrayMapRootIndex;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
@@ -2742,8 +2772,8 @@ Heap::RootListIndex RootIndexForFixedTypedArray(ElementsKind elements_kind) {
Heap::RootListIndex RootIndexForEmptyFixedTypedArray(
ElementsKind elements_kind) {
switch (elements_kind) {
-#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
+#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
return Heap::kEmptyFixed##Type##ArrayRootIndex;
TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
@@ -2782,7 +2812,7 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)),
SKIP_WRITE_BARRIER);
if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
- Memory::Address_at(addr + kPointerSize) =
+ Memory<Address>(addr + kPointerSize) =
static_cast<Address>(kClearedFreeMemoryValue);
}
} else {
@@ -2850,8 +2880,23 @@ class LeftTrimmerVerifierRootVisitor : public RootVisitor {
} // namespace
#endif // ENABLE_SLOW_DCHECKS
+namespace {
+bool MayContainRecordedSlots(HeapObject* object) {
+ // New space object do not have recorded slots.
+ if (MemoryChunk::FromHeapObject(object)->InNewSpace()) return false;
+ // Whitelist objects that definitely do not have pointers.
+ if (object->IsByteArray() || object->IsFixedDoubleArray()) return false;
+ // Conservatively return true for other objects.
+ return true;
+}
+} // namespace
+
FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
int elements_to_trim) {
+ if (elements_to_trim == 0) {
+ // This simplifies reasoning in the rest of the function.
+ return object;
+ }
CHECK_NOT_NULL(object);
DCHECK(CanMoveObjectStart(object));
// Add custom visitor to concurrent marker if new left-trimmable type
@@ -2886,7 +2931,8 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
- CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
+ HeapObject* filler =
+ CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
// Initialize header of the trimmed array. Since left trimming is only
// performed on pages which are not concurrently swept creating a filler
@@ -2903,6 +2949,23 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
ClearRecordedSlot(new_object, HeapObject::RawField(
new_object, FixedArrayBase::kLengthOffset));
+ // Handle invalidated old-to-old slots.
+ if (incremental_marking()->IsCompacting() &&
+ MayContainRecordedSlots(new_object)) {
+ // If the array was right-trimmed before, then it is registered in
+ // the invalidated_slots.
+ MemoryChunk::FromHeapObject(new_object)
+ ->MoveObjectWithInvalidatedSlots(filler, new_object);
+ // We have to clear slots in the free space to avoid stale old-to-old slots.
+ // Note we cannot use ClearFreedMemoryMode of CreateFillerObjectAt because
+ // we need pointer granularity writes to avoid race with the concurrent
+ // marking.
+ if (filler->Size() > FreeSpace::kSize) {
+ MemsetPointer(HeapObject::RawField(filler, FreeSpace::kSize),
+ ReadOnlyRoots(this).undefined_value(),
+ (filler->Size() - FreeSpace::kSize) / kPointerSize);
+ }
+ }
// Notify the heap profiler of change in object layout.
OnMoveEvent(new_object, object, new_object->Size());
@@ -2967,9 +3030,23 @@ void Heap::CreateFillerForArray(T* object, int elements_to_trim,
}
// Calculate location of new array end.
- Address old_end = object->address() + object->Size();
+ int old_size = object->Size();
+ Address old_end = object->address() + old_size;
Address new_end = old_end - bytes_to_trim;
+ // Register the array as an object with invalidated old-to-old slots. We
+ // cannot use NotifyObjectLayoutChange as it would mark the array black,
+ // which is not safe for left-trimming because left-trimming re-pushes
+ // only grey arrays onto the marking worklist.
+ if (incremental_marking()->IsCompacting() &&
+ MayContainRecordedSlots(object)) {
+ // Ensure that the object survives because the InvalidatedSlotsFilter will
+ // compute its size from its map during pointers updating phase.
+ incremental_marking()->WhiteToGreyAndPush(object);
+ MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
+ object, old_size);
+ }
+
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
@@ -3258,15 +3335,12 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
const DisallowHeapAllocation&) {
- DCHECK(InOldSpace(object) || InNewSpace(object) ||
- (lo_space()->Contains(object) && object->IsString()));
- if (FLAG_incremental_marking && incremental_marking()->IsMarking()) {
+ if (incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndPush(object);
- if (InOldSpace(object) && incremental_marking()->IsCompacting()) {
- // The concurrent marker might have recorded slots for the object.
- // Register this object as invalidated to filter out the slots.
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- chunk->RegisterObjectWithInvalidatedSlots(object, size);
+ if (incremental_marking()->IsCompacting() &&
+ MayContainRecordedSlots(object)) {
+ MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
+ object, size);
}
}
#ifdef VERIFY_HEAP
@@ -4800,7 +4874,7 @@ void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
}
void Heap::TracePossibleWrapper(JSObject* js_object) {
- DCHECK(js_object->WasConstructedFromApiFunction());
+ DCHECK(js_object->IsApiWrapper());
if (js_object->GetEmbedderFieldCount() >= 2 &&
js_object->GetEmbedderField(0) &&
js_object->GetEmbedderField(0) != ReadOnlyRoots(this).undefined_value() &&
@@ -5005,15 +5079,37 @@ void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
}
namespace {
-void CompactFixedArrayOfWeakCells(Isolate* isolate, Object* object) {
- if (object->IsFixedArrayOfWeakCells()) {
- FixedArrayOfWeakCells* array = FixedArrayOfWeakCells::cast(object);
- array->Compact<FixedArrayOfWeakCells::NullCallback>(isolate);
- }
+Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
+ Handle<WeakArrayList> array,
+ PretenureFlag pretenure) {
+ if (array->length() == 0) {
+ return array;
+ }
+ int new_length = array->CountLiveWeakReferences();
+ if (new_length == array->length()) {
+ return array;
+ }
+
+ Handle<WeakArrayList> new_array = WeakArrayList::EnsureSpace(
+ heap->isolate(),
+ handle(ReadOnlyRoots(heap).empty_weak_array_list(), heap->isolate()),
+ new_length, pretenure);
+ // Allocation might have caused GC and turned some of the elements into
+ // cleared weak heap objects. Count the number of live references again and
+ // fill in the new array.
+ int copy_to = 0;
+ for (int i = 0; i < array->length(); i++) {
+ MaybeObject* element = array->Get(i);
+ if (element->IsClearedWeakHeapObject()) continue;
+ new_array->Set(copy_to++, element);
+ }
+ new_array->set_length(copy_to);
+ return new_array;
}
+
} // anonymous namespace
-void Heap::CompactFixedArraysOfWeakCells() {
+void Heap::CompactWeakArrayLists(PretenureFlag pretenure) {
// Find known PrototypeUsers and compact them.
std::vector<Handle<PrototypeInfo>> prototype_infos;
{
@@ -5030,15 +5126,25 @@ void Heap::CompactFixedArraysOfWeakCells() {
for (auto& prototype_info : prototype_infos) {
Handle<WeakArrayList> array(
WeakArrayList::cast(prototype_info->prototype_users()), isolate());
+ DCHECK_IMPLIES(pretenure == TENURED,
+ InOldSpace(*array) ||
+ *array == ReadOnlyRoots(this).empty_weak_array_list());
WeakArrayList* new_array = PrototypeUsers::Compact(
- array, this, JSObject::PrototypeRegistryCompactionCallback);
+ array, this, JSObject::PrototypeRegistryCompactionCallback, pretenure);
prototype_info->set_prototype_users(new_array);
}
- // Find known FixedArrayOfWeakCells and compact them.
- CompactFixedArrayOfWeakCells(isolate(), noscript_shared_function_infos());
- CompactFixedArrayOfWeakCells(isolate(), script_list());
- CompactFixedArrayOfWeakCells(isolate(), weak_stack_trace_list());
+ // Find known WeakArrayLists and compact them.
+ Handle<WeakArrayList> scripts(script_list(), isolate());
+ DCHECK_IMPLIES(pretenure == TENURED, InOldSpace(*scripts));
+ scripts = CompactWeakArrayList(this, scripts, pretenure);
+ set_script_list(*scripts);
+
+ Handle<WeakArrayList> no_script_list(noscript_shared_function_infos(),
+ isolate());
+ DCHECK_IMPLIES(pretenure == TENURED, InOldSpace(*no_script_list));
+ no_script_list = CompactWeakArrayList(this, no_script_list, pretenure);
+ set_noscript_shared_function_infos(*no_script_list);
}
void Heap::AddRetainedMap(Handle<Map> map) {
@@ -5139,6 +5245,20 @@ void Heap::CheckHandleCount() {
isolate_->handle_scope_implementer()->Iterate(&v);
}
+Address* Heap::store_buffer_top_address() {
+ return store_buffer()->top_address();
+}
+
+// static
+intptr_t Heap::store_buffer_mask_constant() {
+ return StoreBuffer::kStoreBufferMask;
+}
+
+// static
+Address Heap::store_buffer_overflow_function_address() {
+ return FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow);
+}
+
void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
Address slot_addr = reinterpret_cast<Address>(slot);
Page* page = Page::FromAddress(slot_addr);
@@ -5168,34 +5288,6 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
}
}
-void Heap::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
- Object* value) {
- DCHECK(InNewSpace(value));
- Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
- RelocInfo::Mode rmode = rinfo->rmode();
- Address addr = rinfo->pc();
- SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
- if (rinfo->IsInConstantPool()) {
- addr = rinfo->constant_pool_entry_address();
- if (RelocInfo::IsCodeTargetMode(rmode)) {
- slot_type = CODE_ENTRY_SLOT;
- } else {
- DCHECK(RelocInfo::IsEmbeddedObject(rmode));
- slot_type = OBJECT_SLOT;
- }
- }
- RememberedSet<OLD_TO_NEW>::InsertTyped(
- source_page, reinterpret_cast<Address>(host), slot_type, addr);
-}
-
-void Heap::RecordWritesIntoCode(Code* code) {
- for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
- !it.done(); it.next()) {
- RecordWriteIntoCode(code, it.rinfo(), it.rinfo()->target_object());
- }
-}
-
-
PagedSpace* PagedSpaces::next() {
switch (counter_++) {
case RO_SPACE:
@@ -5450,19 +5542,15 @@ void Heap::ExternalStringTable::CleanUpAll() {
void Heap::ExternalStringTable::TearDown() {
for (size_t i = 0; i < new_space_strings_.size(); ++i) {
Object* o = new_space_strings_[i];
- if (o->IsThinString()) {
- o = ThinString::cast(o)->actual();
- if (!o->IsExternalString()) continue;
- }
+ // Dont finalize thin strings.
+ if (o->IsThinString()) continue;
heap_->FinalizeExternalString(ExternalString::cast(o));
}
new_space_strings_.clear();
for (size_t i = 0; i < old_space_strings_.size(); ++i) {
Object* o = old_space_strings_[i];
- if (o->IsThinString()) {
- o = ThinString::cast(o)->actual();
- if (!o->IsExternalString()) continue;
- }
+ // Dont finalize thin strings.
+ if (o->IsThinString()) continue;
heap_->FinalizeExternalString(ExternalString::cast(o));
}
old_space_strings_.clear();
@@ -5773,5 +5861,105 @@ Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
}
}
+void Heap::WriteBarrierForCodeSlow(Code* code) {
+ for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
+ !it.done(); it.next()) {
+ GenerationalBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
+ MarkingBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
+ }
+}
+
+void Heap::GenerationalBarrierSlow(HeapObject* object, Address slot,
+ HeapObject* value) {
+ Heap* heap = Heap::FromWritableHeapObject(object);
+ heap->store_buffer()->InsertEntry(slot);
+}
+
+void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray* array,
+ int offset, int length) {
+ for (int i = 0; i < length; i++) {
+ if (!InNewSpace(array->get(offset + i))) continue;
+ heap->store_buffer()->InsertEntry(
+ reinterpret_cast<Address>(array->RawFieldOfElementAt(offset + i)));
+ }
+}
+
+void Heap::GenerationalBarrierForCodeSlow(Code* host, RelocInfo* rinfo,
+ HeapObject* object) {
+ DCHECK(InNewSpace(object));
+ Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
+ RelocInfo::Mode rmode = rinfo->rmode();
+ Address addr = rinfo->pc();
+ SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
+ if (rinfo->IsInConstantPool()) {
+ addr = rinfo->constant_pool_entry_address();
+ if (RelocInfo::IsCodeTargetMode(rmode)) {
+ slot_type = CODE_ENTRY_SLOT;
+ } else {
+ DCHECK(RelocInfo::IsEmbeddedObject(rmode));
+ slot_type = OBJECT_SLOT;
+ }
+ }
+ RememberedSet<OLD_TO_NEW>::InsertTyped(
+ source_page, reinterpret_cast<Address>(host), slot_type, addr);
+}
+
+void Heap::MarkingBarrierSlow(HeapObject* object, Address slot,
+ HeapObject* value) {
+ Heap* heap = Heap::FromWritableHeapObject(object);
+ heap->incremental_marking()->RecordWriteSlow(
+ object, reinterpret_cast<HeapObjectReference**>(slot), value);
+}
+
+void Heap::MarkingBarrierForElementsSlow(Heap* heap, HeapObject* object) {
+ if (FLAG_concurrent_marking ||
+ heap->incremental_marking()->marking_state()->IsBlack(object)) {
+ heap->incremental_marking()->RevisitObject(object);
+ }
+}
+
+void Heap::MarkingBarrierForCodeSlow(Code* host, RelocInfo* rinfo,
+ HeapObject* object) {
+ Heap* heap = Heap::FromWritableHeapObject(host);
+ DCHECK(heap->incremental_marking()->IsMarking());
+ heap->incremental_marking()->RecordWriteIntoCode(host, rinfo, object);
+}
+
+bool Heap::PageFlagsAreConsistent(HeapObject* object) {
+ Heap* heap = Heap::FromWritableHeapObject(object);
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ heap_internals::MemoryChunk* slim_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+
+ const bool generation_consistency =
+ chunk->owner()->identity() != NEW_SPACE ||
+ (chunk->InNewSpace() && slim_chunk->InNewSpace());
+ const bool marking_consistency =
+ !heap->incremental_marking()->IsMarking() ||
+ (chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING) &&
+ slim_chunk->IsMarking());
+
+ return generation_consistency && marking_consistency;
+}
+
+static_assert(MemoryChunk::Flag::INCREMENTAL_MARKING ==
+ heap_internals::MemoryChunk::kMarkingBit,
+ "Incremental marking flag inconsistent");
+static_assert(MemoryChunk::Flag::IN_FROM_SPACE ==
+ heap_internals::MemoryChunk::kFromSpaceBit,
+ "From space flag inconsistent");
+static_assert(MemoryChunk::Flag::IN_TO_SPACE ==
+ heap_internals::MemoryChunk::kToSpaceBit,
+ "To space flag inconsistent");
+static_assert(MemoryChunk::kFlagsOffset ==
+ heap_internals::MemoryChunk::kFlagsOffset,
+ "Flag offset inconsistent");
+
+void Heap::SetEmbedderStackStateForNextFinalizaton(
+ EmbedderHeapTracer::EmbedderStackState stack_state) {
+ local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
+ stack_state);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 0f3c9ea389..2e750d56fa 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -47,7 +47,7 @@ class DeoptimizationData;
class HandlerTable;
class IncrementalMarking;
class JSArrayBuffer;
-
+class ExternalString;
using v8::MemoryPressureLevel;
// Heap roots that are known to be immortal immovable, for which we can safely
@@ -88,7 +88,6 @@ using v8::MemoryPressureLevel;
V(EmptyScript) \
V(EmptySloppyArgumentsElements) \
V(EmptySlowElementDictionary) \
- V(EmptyWeakCell) \
V(EvalContextMap) \
V(Exception) \
V(FalseValue) \
@@ -159,19 +158,12 @@ using v8::MemoryPressureLevel;
V(UninitializedValue) \
V(UncompiledDataWithoutPreParsedScopeMap) \
V(UncompiledDataWithPreParsedScopeMap) \
- V(WeakCellMap) \
V(WeakFixedArrayMap) \
V(WeakArrayListMap) \
V(WithContextMap) \
V(empty_string) \
PRIVATE_SYMBOL_LIST(V)
-#define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
- do { \
- heap->RecordFixedArrayElements(array, start, length); \
- heap->incremental_marking()->RecordWrites(array); \
- } while (false)
-
class AllocationObserver;
class ArrayBufferCollector;
class ArrayBufferTracker;
@@ -492,6 +484,24 @@ class Heap {
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
+ V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code* host);
+ V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject* object,
+ Address slot,
+ HeapObject* value);
+ V8_EXPORT_PRIVATE static void GenerationalBarrierForElementsSlow(
+ Heap* heap, FixedArray* array, int offset, int length);
+ V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
+ Code* host, RelocInfo* rinfo, HeapObject* value);
+ V8_EXPORT_PRIVATE static void MarkingBarrierSlow(HeapObject* object,
+ Address slot,
+ HeapObject* value);
+ V8_EXPORT_PRIVATE static void MarkingBarrierForElementsSlow(
+ Heap* heap, HeapObject* object);
+ V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code* host,
+ RelocInfo* rinfo,
+ HeapObject* value);
+ V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject* object);
+
// Notifies the heap that is ok to start marking or other activities that
// should not happen during deserialization.
void NotifyDeserializationComplete();
@@ -678,7 +688,10 @@ class Heap {
external_memory_concurrently_freed_ = 0;
}
- void CompactFixedArraysOfWeakCells();
+ void ProcessMovedExternalString(Page* old_page, Page* new_page,
+ ExternalString* string);
+
+ void CompactWeakArrayLists(PretenureFlag pretenure);
void AddRetainedMap(Handle<Map> map);
@@ -842,6 +855,10 @@ class Heap {
return kRootsBuiltinsOffset;
}
+ static constexpr int root_register_addressable_end_offset() {
+ return kRootRegisterAddressableEndOffset;
+ }
+
Address root_register_addressable_end() {
return reinterpret_cast<Address>(roots_array_start()) +
kRootRegisterAddressableEndOffset;
@@ -971,16 +988,6 @@ class Heap {
// Store buffer API. =========================================================
// ===========================================================================
- // Write barrier support for object[offset] = o;
- inline void RecordWrite(Object* object, MaybeObject** slot,
- MaybeObject* value);
- inline void RecordWrite(Object* object, Object** slot, Object* value);
- inline void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* target);
- void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* target);
- void RecordWritesIntoCode(Code* code);
- inline void RecordFixedArrayElements(FixedArray* array, int offset,
- int length);
-
// Used for query incremental marking status in generated code.
Address* IsMarkingFlagAddress() {
return reinterpret_cast<Address*>(&is_marking_flag_);
@@ -988,7 +995,9 @@ class Heap {
void SetIsMarkingFlag(uint8_t flag) { is_marking_flag_ = flag; }
- inline Address* store_buffer_top_address();
+ Address* store_buffer_top_address();
+ static intptr_t store_buffer_mask_constant();
+ static Address store_buffer_overflow_function_address();
void ClearRecordedSlot(HeapObject* object, Object** slot);
void ClearRecordedSlotRange(Address start, Address end);
@@ -1081,6 +1090,8 @@ class Heap {
void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
void TracePossibleWrapper(JSObject* js_object);
void RegisterExternallyReferencedObject(Object** object);
+ void SetEmbedderStackStateForNextFinalizaton(
+ EmbedderHeapTracer::EmbedderStackState stack_state);
// ===========================================================================
// External string table API. ================================================
@@ -1089,6 +1100,11 @@ class Heap {
// Registers an external string.
inline void RegisterExternalString(String* string);
+ // Called when a string's resource is changed. The size of the payload is sent
+ // as argument of the method.
+ inline void UpdateExternalString(String* string, size_t old_payload,
+ size_t new_payload);
+
// Finalizes an external string by deleting the associated external
// data and clearing the resource pointer.
inline void FinalizeExternalString(String* string);
@@ -1466,6 +1482,9 @@ class Heap {
static const char* GarbageCollectionReasonToString(
GarbageCollectionReason gc_reason);
+ // Calculates the nof entries for the full sized number to string cache.
+ inline int MaxNumberToStringCacheSize() const;
+
private:
class SkipStoreBufferScope;
@@ -1481,6 +1500,7 @@ class Heap {
// Registers an external string.
inline void AddString(String* string);
+ bool Contains(HeapObject* obj);
void IterateAll(RootVisitor* v);
void IterateNewSpaceStrings(RootVisitor* v);
@@ -1501,6 +1521,7 @@ class Heap {
private:
void Verify();
+ void VerifyNewSpace();
Heap* const heap_;
@@ -1615,8 +1636,6 @@ class Heap {
int NumberOfScavengeTasks();
- void PreprocessStackTraces();
-
// Checks whether a global GC is necessary
GarbageCollector SelectGarbageCollector(AllocationSpace space,
const char** reason);
@@ -1675,8 +1694,6 @@ class Heap {
// Record statistics after garbage collection.
void ReportStatisticsAfterGC();
- // Creates and installs the full-sized number string cache.
- int FullSizeNumberStringCacheLength();
// Flush the number to string cache.
void FlushNumberStringCache();
@@ -2249,7 +2266,7 @@ class Heap {
friend class EphemeronHashTableMarkingTask;
friend class GCCallbacksScope;
friend class GCTracer;
- friend class HeapController;
+ friend class MemoryController;
friend class HeapIterator;
friend class IdleScavengeObserver;
friend class IncrementalMarking;
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index d2736c6715..19d6b22e4d 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -7,6 +7,7 @@
#include "src/heap/incremental-marking.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
#include "src/objects/maybe-object.h"
namespace v8 {
@@ -35,21 +36,6 @@ void IncrementalMarking::RecordMaybeWeakWrite(HeapObject* obj,
}
}
-void IncrementalMarking::RecordWrites(HeapObject* obj) {
- if (IsMarking()) {
- if (FLAG_concurrent_marking || marking_state()->IsBlack(obj)) {
- RevisitObject(obj);
- }
- }
-}
-
-void IncrementalMarking::RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
- Object* value) {
- if (IsMarking() && value->IsHeapObject()) {
- RecordWriteIntoCodeSlow(host, rinfo, value);
- }
-}
-
void IncrementalMarking::RestartIfNotMarking() {
if (state_ == COMPLETE) {
state_ = MARKING;
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 2b84a45999..a58d25fff4 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -11,6 +11,7 @@
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/incremental-marking-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
@@ -113,8 +114,9 @@ int IncrementalMarking::RecordWriteFromCode(HeapObject* obj, MaybeObject** slot,
return 0;
}
-void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
- Object* value) {
+void IncrementalMarking::RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
+ HeapObject* value) {
+ DCHECK(IsMarking());
if (BaseRecordWrite(host, value)) {
// Object is not going to be rescanned. We need to record the slot.
heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
@@ -130,6 +132,9 @@ bool IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj) {
}
void IncrementalMarking::MarkBlackAndPush(HeapObject* obj) {
+ // Marking left-trimmable fixed array black is unsafe because left-trimming
+ // re-pushes only grey arrays onto the marking worklist.
+ DCHECK(!obj->IsFixedArrayBase());
// Color the object black and push it into the bailout deque.
marking_state()->WhiteToGrey(obj);
if (marking_state()->GreyToBlack(obj)) {
@@ -197,7 +202,10 @@ void IncrementalMarking::NotifyLeftTrimming(HeapObject* from, HeapObject* to) {
DCHECK(success);
USE(success);
}
- marking_worklist()->Push(to);
+ // Subsequent left-trimming will re-push only grey arrays.
+ // Ensure that this array is grey.
+ DCHECK(Marking::IsGrey<kAtomicity>(new_mark_bit));
+ marking_worklist()->PushBailout(to);
RestartIfNotMarking();
}
}
@@ -928,10 +936,7 @@ double IncrementalMarking::AdvanceIncrementalMarking(
heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
if (!heap_->local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking()) {
- heap_->local_embedder_heap_tracer()->Trace(
- wrapper_deadline, EmbedderHeapTracer::AdvanceTracingActions(
- EmbedderHeapTracer::ForceCompletionAction::
- DO_NOT_FORCE_COMPLETION));
+ heap_->local_embedder_heap_tracer()->Trace(wrapper_deadline);
}
} else {
Step(step_size_in_bytes, completion_action, step_origin);
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 1a916693ba..0fb5e11651 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -209,13 +209,11 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
V8_INLINE void RecordWrite(HeapObject* obj, Object** slot, Object* value);
V8_INLINE void RecordMaybeWeakWrite(HeapObject* obj, MaybeObject** slot,
MaybeObject* value);
- V8_INLINE void RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
- Object* value);
- V8_INLINE void RecordWrites(HeapObject* obj);
+ void RevisitObject(HeapObject* obj);
void RecordWriteSlow(HeapObject* obj, HeapObjectReference** slot,
Object* value);
- void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
+ void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, HeapObject* value);
// Returns true if the function succeeds in transitioning the object
// from white to grey.
@@ -301,8 +299,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// Visits the object and returns its size.
V8_INLINE int VisitObject(Map* map, HeapObject* obj);
- void RevisitObject(HeapObject* obj);
-
void IncrementIdleMarkingDelayCounter();
void AdvanceIncrementalMarkingOnAllocation();
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index 577c4a5576..5e4610257e 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -47,6 +47,7 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
// Ask the object if the slot is valid.
if (invalidated_object_ == nullptr) {
invalidated_object_ = HeapObject::FromAddress(invalidated_start_);
+ DCHECK(!invalidated_object_->IsFiller());
invalidated_object_size_ =
invalidated_object_->SizeFromMap(invalidated_object_->map());
}
@@ -56,10 +57,7 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
static_cast<int>(invalidated_end_ - invalidated_start_));
if (offset >= invalidated_object_size_) {
- // A new object could have been allocated during evacuation in the free
- // space outside the object. Since objects are not invalidated in GC pause
- // we can return true here.
- return true;
+ return slots_in_free_space_are_valid_;
}
return invalidated_object_->IsValidSlot(invalidated_object_->map(), offset);
}
diff --git a/deps/v8/src/heap/invalidated-slots.cc b/deps/v8/src/heap/invalidated-slots.cc
index 85430e58bc..42042c63ef 100644
--- a/deps/v8/src/heap/invalidated-slots.cc
+++ b/deps/v8/src/heap/invalidated-slots.cc
@@ -9,8 +9,15 @@ namespace v8 {
namespace internal {
InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) {
+ // Adjust slots_in_free_space_are_valid_ if more spaces are added.
DCHECK_IMPLIES(chunk->invalidated_slots() != nullptr,
- chunk->owner()->identity() == OLD_SPACE);
+ chunk->InOldSpace() || chunk->InLargeObjectSpace());
+ // The sweeper removes invalid slots and makes free space available for
+ // allocation. Slots for new objects can be recorded in the free space.
+ // Note that we cannot simply check for SweepingDone because pages in large
+ // object space are not swept but have SweepingDone() == true.
+ slots_in_free_space_are_valid_ = chunk->SweepingDone() && chunk->InOldSpace();
+
InvalidatedSlots* invalidated_slots =
chunk->invalidated_slots() ? chunk->invalidated_slots() : &empty_;
iterator_ = invalidated_slots->begin();
diff --git a/deps/v8/src/heap/invalidated-slots.h b/deps/v8/src/heap/invalidated-slots.h
index e9410575a3..641e8feb91 100644
--- a/deps/v8/src/heap/invalidated-slots.h
+++ b/deps/v8/src/heap/invalidated-slots.h
@@ -42,6 +42,7 @@ class InvalidatedSlotsFilter {
Address invalidated_end_;
HeapObject* invalidated_object_;
int invalidated_object_size_;
+ bool slots_in_free_space_are_valid_;
InvalidatedSlots empty_;
#ifdef DEBUG
Address last_slot_;
diff --git a/deps/v8/src/heap/local-allocator-inl.h b/deps/v8/src/heap/local-allocator-inl.h
new file mode 100644
index 0000000000..7263387465
--- /dev/null
+++ b/deps/v8/src/heap/local-allocator-inl.h
@@ -0,0 +1,109 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_LOCAL_ALLOCATOR_INL_H_
+#define V8_HEAP_LOCAL_ALLOCATOR_INL_H_
+
+#include "src/heap/local-allocator.h"
+
+#include "src/heap/spaces-inl.h"
+
+namespace v8 {
+namespace internal {
+
+AllocationResult LocalAllocator::Allocate(AllocationSpace space,
+ int object_size,
+ AllocationAlignment alignment) {
+ switch (space) {
+ case NEW_SPACE:
+ return AllocateInNewSpace(object_size, alignment);
+ case OLD_SPACE:
+ return compaction_spaces_.Get(OLD_SPACE)->AllocateRaw(object_size,
+ alignment);
+ case CODE_SPACE:
+ return compaction_spaces_.Get(CODE_SPACE)
+ ->AllocateRaw(object_size, alignment);
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+void LocalAllocator::FreeLast(AllocationSpace space, HeapObject* object,
+ int object_size) {
+ switch (space) {
+ case NEW_SPACE:
+ FreeLastInNewSpace(object, object_size);
+ return;
+ case OLD_SPACE:
+ FreeLastInOldSpace(object, object_size);
+ return;
+ default:
+ // Only new and old space supported.
+ UNREACHABLE();
+ break;
+ }
+}
+
+void LocalAllocator::FreeLastInNewSpace(HeapObject* object, int object_size) {
+ if (!new_space_lab_.TryFreeLast(object, object_size)) {
+ // We couldn't free the last object so we have to write a proper filler.
+ heap_->CreateFillerObjectAt(object->address(), object_size,
+ ClearRecordedSlots::kNo);
+ }
+}
+
+void LocalAllocator::FreeLastInOldSpace(HeapObject* object, int object_size) {
+ if (!compaction_spaces_.Get(OLD_SPACE)->TryFreeLast(object, object_size)) {
+ // We couldn't free the last object so we have to write a proper filler.
+ heap_->CreateFillerObjectAt(object->address(), object_size,
+ ClearRecordedSlots::kNo);
+ }
+}
+
+AllocationResult LocalAllocator::AllocateInLAB(int object_size,
+ AllocationAlignment alignment) {
+ AllocationResult allocation;
+ if (!new_space_lab_.IsValid() && !NewLocalAllocationBuffer()) {
+ return AllocationResult::Retry(OLD_SPACE);
+ }
+ allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
+ if (allocation.IsRetry()) {
+ if (!NewLocalAllocationBuffer()) {
+ return AllocationResult::Retry(OLD_SPACE);
+ } else {
+ allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
+ CHECK(!allocation.IsRetry());
+ }
+ }
+ return allocation;
+}
+
+bool LocalAllocator::NewLocalAllocationBuffer() {
+ if (lab_allocation_will_fail_) return false;
+ LocalAllocationBuffer saved_lab_ = new_space_lab_;
+ AllocationResult result =
+ new_space_->AllocateRawSynchronized(kLabSize, kWordAligned);
+ new_space_lab_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
+ if (new_space_lab_.IsValid()) {
+ new_space_lab_.TryMerge(&saved_lab_);
+ return true;
+ }
+ new_space_lab_ = saved_lab_;
+ lab_allocation_will_fail_ = true;
+ return false;
+}
+
+AllocationResult LocalAllocator::AllocateInNewSpace(
+ int object_size, AllocationAlignment alignment) {
+ if (object_size > kMaxLabObjectSize) {
+ return new_space_->AllocateRawSynchronized(object_size, alignment);
+ }
+ return AllocateInLAB(object_size, alignment);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_LOCAL_ALLOCATOR_INL_H_
diff --git a/deps/v8/src/heap/local-allocator.h b/deps/v8/src/heap/local-allocator.h
index 4646a3783d..e84c7188c2 100644
--- a/deps/v8/src/heap/local-allocator.h
+++ b/deps/v8/src/heap/local-allocator.h
@@ -41,95 +41,19 @@ class LocalAllocator {
}
}
- AllocationResult Allocate(AllocationSpace space, int object_size,
- AllocationAlignment alignment) {
- switch (space) {
- case NEW_SPACE:
- return AllocateInNewSpace(object_size, alignment);
- case OLD_SPACE:
- return compaction_spaces_.Get(OLD_SPACE)->AllocateRaw(object_size,
- alignment);
- case CODE_SPACE:
- return compaction_spaces_.Get(CODE_SPACE)
- ->AllocateRaw(object_size, alignment);
- default:
- UNREACHABLE();
- break;
- }
- }
-
- void FreeLast(AllocationSpace space, HeapObject* object, int object_size) {
- switch (space) {
- case NEW_SPACE:
- FreeLastInNewSpace(object, object_size);
- return;
- case OLD_SPACE:
- FreeLastInOldSpace(object, object_size);
- return;
- default:
- // Only new and old space supported.
- UNREACHABLE();
- break;
- }
- }
+ inline AllocationResult Allocate(AllocationSpace space, int object_size,
+ AllocationAlignment alignment);
+ inline void FreeLast(AllocationSpace space, HeapObject* object,
+ int object_size);
private:
- AllocationResult AllocateInNewSpace(int object_size,
- AllocationAlignment alignment) {
- if (object_size > kMaxLabObjectSize) {
- return new_space_->AllocateRawSynchronized(object_size, alignment);
- }
- return AllocateInLAB(object_size, alignment);
- }
-
- inline bool NewLocalAllocationBuffer() {
- if (lab_allocation_will_fail_) return false;
- LocalAllocationBuffer saved_lab_ = new_space_lab_;
- AllocationResult result =
- new_space_->AllocateRawSynchronized(kLabSize, kWordAligned);
- new_space_lab_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
- if (new_space_lab_.IsValid()) {
- new_space_lab_.TryMerge(&saved_lab_);
- return true;
- }
- new_space_lab_ = saved_lab_;
- lab_allocation_will_fail_ = true;
- return false;
- }
-
- AllocationResult AllocateInLAB(int object_size,
- AllocationAlignment alignment) {
- AllocationResult allocation;
- if (!new_space_lab_.IsValid() && !NewLocalAllocationBuffer()) {
- return AllocationResult::Retry(OLD_SPACE);
- }
- allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
- if (allocation.IsRetry()) {
- if (!NewLocalAllocationBuffer()) {
- return AllocationResult::Retry(OLD_SPACE);
- } else {
- allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
- CHECK(!allocation.IsRetry());
- }
- }
- return allocation;
- }
-
- void FreeLastInNewSpace(HeapObject* object, int object_size) {
- if (!new_space_lab_.TryFreeLast(object, object_size)) {
- // We couldn't free the last object so we have to write a proper filler.
- heap_->CreateFillerObjectAt(object->address(), object_size,
- ClearRecordedSlots::kNo);
- }
- }
-
- void FreeLastInOldSpace(HeapObject* object, int object_size) {
- if (!compaction_spaces_.Get(OLD_SPACE)->TryFreeLast(object, object_size)) {
- // We couldn't free the last object so we have to write a proper filler.
- heap_->CreateFillerObjectAt(object->address(), object_size,
- ClearRecordedSlots::kNo);
- }
- }
+ inline AllocationResult AllocateInNewSpace(int object_size,
+ AllocationAlignment alignment);
+ inline bool NewLocalAllocationBuffer();
+ inline AllocationResult AllocateInLAB(int object_size,
+ AllocationAlignment alignment);
+ inline void FreeLastInNewSpace(HeapObject* object, int object_size);
+ inline void FreeLastInOldSpace(HeapObject* object, int object_size);
Heap* const heap_;
NewSpace* const new_space_;
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index d200671955..466a89080b 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -161,30 +161,6 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
-int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitWeakCell(Map* map, WeakCell* weak_cell) {
- // Enqueue weak cell in linked list of encountered weak collections.
- // We can ignore weak cells with cleared values because they will always
- // contain smi zero.
- if (!weak_cell->cleared()) {
- HeapObject* value = HeapObject::cast(weak_cell->value());
- if (marking_state()->IsBlackOrGrey(value)) {
- // Weak cells with live values are directly processed here to reduce
- // the processing time of weak cells during the main GC pause.
- Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
- collector_->RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
- } else {
- // If we do not know about liveness of values of weak cells, we have to
- // process them when we know the liveness of the whole transitive
- // closure.
- collector_->AddWeakCell(weak_cell);
- }
- }
- return WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
-}
-
-template <FixedArrayVisitationMode fixed_array_mode,
- TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitPointer(HeapObject* host, Object** p) {
if (!(*p)->IsHeapObject()) return;
@@ -578,6 +554,8 @@ typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::end() {
return iterator(chunk_, bitmap_, end_);
}
+Isolate* MarkCompactCollectorBase::isolate() { return heap()->isolate(); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index af56c72418..dea105943a 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -20,7 +20,7 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/item-parallel-job.h"
-#include "src/heap/local-allocator.h"
+#include "src/heap/local-allocator-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
@@ -1099,7 +1099,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
DCHECK_EQ(host, rinfo->host());
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
HeapObject* object = HeapObject::cast(rinfo->target_object());
- collector_->heap()->RecordWriteIntoCode(host, rinfo, object);
+ GenerationalBarrierForCode(host, rinfo, object);
collector_->RecordRelocSlot(host, rinfo, object);
}
@@ -1249,8 +1249,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
bool AbortCompactionForTesting(HeapObject* object) {
if (FLAG_stress_compaction) {
const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
- Page::kPageAlignmentMask & ~kPointerAlignmentMask;
- if ((object->address() & Page::kPageAlignmentMask) == mask) {
+ kPageAlignmentMask & ~kPointerAlignmentMask;
+ if ((object->address() & kPageAlignmentMask) == mask) {
Page* page = Page::FromAddress(object->address());
if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
@@ -1635,8 +1635,7 @@ void MarkCompactCollector::PerformWrapperTracing() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
heap_->local_embedder_heap_tracer()->Trace(
- 0, EmbedderHeapTracer::AdvanceTracingActions(
- EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
+ std::numeric_limits<double>::infinity());
}
}
@@ -1784,6 +1783,18 @@ void MarkCompactCollector::MarkLiveObjects() {
DCHECK(marking_worklist()->IsEmpty());
+ // Mark objects reachable through the embedder heap. This phase is
+ // opportunistic as it may not discover graphs that are only reachable
+ // through ephemerons.
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPERS);
+ while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone()) {
+ PerformWrapperTracing();
+ ProcessMarkingWorklist();
+ }
+ DCHECK(marking_worklist()->IsEmpty());
+ }
+
// The objects reachable from the roots are marked, yet unreachable objects
// are unmarked. Mark objects reachable due to embedder heap tracing or
// harmony weak maps.
@@ -1874,13 +1885,11 @@ void MarkCompactCollector::ClearNonLiveReferences() {
// cleared.
ClearFullMapTransitions();
}
- ClearWeakCells();
ClearWeakReferences();
MarkDependentCodeForDeoptimization();
ClearWeakCollections();
- DCHECK(weak_objects_.weak_cells.IsEmpty());
DCHECK(weak_objects_.transition_arrays.IsEmpty());
DCHECK(weak_objects_.weak_references.IsEmpty());
DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
@@ -1939,7 +1948,7 @@ void MarkCompactCollector::ClearFullMapTransitions() {
// The array might contain "undefined" elements because it's not yet
// filled. Allow it.
if (array->GetTargetIfExists(0, isolate(), &map)) {
- DCHECK_NOT_NULL(map); // WeakCells aren't cleared yet.
+ DCHECK_NOT_NULL(map); // Weak pointers aren't cleared yet.
Map* parent = Map::cast(map->constructor_or_backpointer());
bool parent_is_alive =
non_atomic_marking_state()->IsBlackOrGrey(parent);
@@ -2077,46 +2086,6 @@ void MarkCompactCollector::ClearWeakCollections() {
}
}
-void MarkCompactCollector::ClearWeakCells() {
- Heap* heap = this->heap();
- TRACE_GC(heap->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
- WeakCell* weak_cell;
- while (weak_objects_.weak_cells.Pop(kMainThread, &weak_cell)) {
- // We do not insert cleared weak cells into the list, so the value
- // cannot be a Smi here.
- HeapObject* value = HeapObject::cast(weak_cell->value());
- if (!non_atomic_marking_state()->IsBlackOrGrey(value)) {
- // Cells for new-space objects embedded in optimized code are wrapped in
- // WeakCell and put into Heap::weak_object_to_code_table.
- // Such cells do not have any strong references but we want to keep them
- // alive as long as the cell value is alive.
- // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
- if (value->IsCell()) {
- Object* cell_value = Cell::cast(value)->value();
- if (cell_value->IsHeapObject() &&
- non_atomic_marking_state()->IsBlackOrGrey(
- HeapObject::cast(cell_value))) {
- // Resurrect the cell.
- non_atomic_marking_state()->WhiteToBlack(value);
- Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
- RecordSlot(value, slot, HeapObject::cast(*slot));
- slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
- RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
- } else {
- weak_cell->clear();
- }
- } else {
- // All other objects.
- weak_cell->clear();
- }
- } else {
- // The value of the weak cell is alive.
- Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
- RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
- }
- }
-}
-
void MarkCompactCollector::ClearWeakReferences() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
std::pair<HeapObject*, HeapObjectReference**> slot;
@@ -2140,7 +2109,6 @@ void MarkCompactCollector::ClearWeakReferences() {
}
void MarkCompactCollector::AbortWeakObjects() {
- weak_objects_.weak_cells.Clear();
weak_objects_.transition_arrays.Clear();
weak_objects_.ephemeron_hash_tables.Clear();
weak_objects_.current_ephemerons.Clear();
@@ -2195,6 +2163,8 @@ static inline SlotCallbackResult UpdateSlot(
}
DCHECK(!Heap::InFromSpace(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
+ } else {
+ DCHECK(heap_obj->map()->IsMap());
}
// OLD_TO_OLD slots are always removed after updating.
return REMOVE_SLOT;
@@ -2301,7 +2271,14 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
MapWord map_word = HeapObject::cast(*p)->map_word();
if (map_word.IsForwardingAddress()) {
- return String::cast(map_word.ToForwardingAddress());
+ String* new_string = String::cast(map_word.ToForwardingAddress());
+
+ if (new_string->IsExternalString()) {
+ heap->ProcessMovedExternalString(
+ Page::FromAddress(reinterpret_cast<Address>(*p)),
+ Page::FromHeapObject(new_string), ExternalString::cast(new_string));
+ }
+ return new_string;
}
return String::cast(*p);
@@ -3026,13 +3003,14 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
nullptr) {
CHECK_NE(chunk_->owner(), heap_->map_space());
+ const auto check_and_update_old_to_new_slot_fn =
+ [this](MaybeObject** slot) {
+ return CheckAndUpdateOldToNewSlot(reinterpret_cast<Address>(slot));
+ };
RememberedSet<OLD_TO_NEW>::IterateTyped(
- chunk_, [this](SlotType slot_type, Address host_addr, Address slot) {
+ chunk_, [=](SlotType slot_type, Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
- heap_, slot_type, slot, [this](MaybeObject** slot) {
- return CheckAndUpdateOldToNewSlot(
- reinterpret_cast<Address>(slot));
- });
+ heap_, slot_type, slot, check_and_update_old_to_new_slot_fn);
});
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
@@ -3896,7 +3874,7 @@ void MinorMarkCompactCollector::MakeIterable(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(free_end));
if (free_space_mode == ZAP_FREE_SPACE) {
- memset(reinterpret_cast<void*>(free_start), 0xCC, size);
+ ZapCode(free_start, size);
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
@@ -3913,7 +3891,7 @@ void MinorMarkCompactCollector::MakeIterable(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(p->area_end()));
if (free_space_mode == ZAP_FREE_SPACE) {
- memset(reinterpret_cast<void*>(free_start), 0xCC, size);
+ ZapCode(free_start, size);
}
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 169f2ae671..d62c964336 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -257,7 +257,7 @@ class MarkCompactCollectorBase {
virtual void CollectGarbage() = 0;
inline Heap* heap() const { return heap_; }
- inline Isolate* isolate() { return heap()->isolate(); }
+ inline Isolate* isolate();
protected:
static const int kMainThread = 0;
@@ -420,7 +420,6 @@ typedef Worklist<Ephemeron, 64> EphemeronWorklist;
// Weak objects encountered during marking.
struct WeakObjects {
- Worklist<WeakCell*, 64> weak_cells;
Worklist<TransitionArray*, 64> transition_arrays;
// Keep track of all EphemeronHashTables in the heap to process
@@ -647,10 +646,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
WeakObjects* weak_objects() { return &weak_objects_; }
- void AddWeakCell(WeakCell* weak_cell) {
- weak_objects_.weak_cells.Push(kMainThread, weak_cell);
- }
-
void AddTransitionArray(TransitionArray* array) {
weak_objects_.transition_arrays.Push(kMainThread, array);
}
@@ -810,11 +805,10 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// The linked list of all encountered weak maps is destroyed.
void ClearWeakCollections();
- // Goes through the list of encountered weak cells and clears those with
+ // Goes through the list of encountered weak references and clears those with
// dead values. If the value is a dead map and the parent map transitions to
// the dead map via weak cell, then this function also clears the map
// transition.
- void ClearWeakCells();
void ClearWeakReferences();
void AbortWeakObjects();
@@ -926,7 +920,6 @@ class MarkingVisitor final
V8_INLINE int VisitMap(Map* map, Map* object);
V8_INLINE int VisitNativeContext(Map* map, Context* object);
V8_INLINE int VisitTransitionArray(Map* map, TransitionArray* object);
- V8_INLINE int VisitWeakCell(Map* map, WeakCell* object);
// ObjectVisitor implementation.
V8_INLINE void VisitPointer(HeapObject* host, Object** p) final;
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 6c7a26b672..ac7bcb8087 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -341,6 +341,9 @@ class ObjectStatsCollectorImpl {
ObjectStats::VirtualInstanceType type,
size_t size, size_t over_allocated,
CowMode check_cow_array = kCheckCow);
+ void RecordExternalResourceStats(Address resource,
+ ObjectStats::VirtualInstanceType type,
+ size_t size);
// Gets size from |ob| and assumes no over allocating.
bool RecordSimpleVirtualObjectStats(HeapObject* parent, HeapObject* obj,
ObjectStats::VirtualInstanceType type);
@@ -379,6 +382,7 @@ class ObjectStatsCollectorImpl {
void RecordVirtualJSObjectDetails(JSObject* object);
void RecordVirtualMapDetails(Map* map);
void RecordVirtualScriptDetails(Script* script);
+ void RecordVirtualExternalStringDetails(ExternalString* script);
void RecordVirtualSharedFunctionInfoDetails(SharedFunctionInfo* info);
void RecordVirtualJSFunctionDetails(JSFunction* function);
@@ -388,6 +392,7 @@ class ObjectStatsCollectorImpl {
ObjectStats* stats_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
std::unordered_set<HeapObject*> virtual_objects_;
+ std::unordered_set<Address> external_resources_;
FieldStatsCollector field_stats_collector_;
};
@@ -431,8 +436,9 @@ bool ObjectStatsCollectorImpl::RecordSimpleVirtualObjectStats(
bool ObjectStatsCollectorImpl::RecordVirtualObjectStats(
HeapObject* parent, HeapObject* obj, ObjectStats::VirtualInstanceType type,
size_t size, size_t over_allocated, CowMode check_cow_array) {
- if (!SameLiveness(parent, obj) || !ShouldRecordObject(obj, check_cow_array))
+ if (!SameLiveness(parent, obj) || !ShouldRecordObject(obj, check_cow_array)) {
return false;
+ }
if (virtual_objects_.find(obj) == virtual_objects_.end()) {
virtual_objects_.insert(obj);
@@ -442,6 +448,14 @@ bool ObjectStatsCollectorImpl::RecordVirtualObjectStats(
return false;
}
+void ObjectStatsCollectorImpl::RecordExternalResourceStats(
+ Address resource, ObjectStats::VirtualInstanceType type, size_t size) {
+ if (external_resources_.find(resource) == external_resources_.end()) {
+ external_resources_.insert(resource);
+ stats_->RecordVirtualObjectStats(type, size, 0);
+ }
+}
+
void ObjectStatsCollectorImpl::RecordVirtualAllocationSiteDetails(
AllocationSite* site) {
if (!site->PointsToLiteral()) return;
@@ -663,6 +677,8 @@ void ObjectStatsCollectorImpl::CollectStatistics(
RecordVirtualContext(Context::cast(obj));
} else if (obj->IsScript()) {
RecordVirtualScriptDetails(Script::cast(obj));
+ } else if (obj->IsExternalString()) {
+ RecordVirtualExternalStringDetails(ExternalString::cast(obj));
} else if (obj->IsArrayBoilerplateDescription()) {
RecordVirtualArrayBoilerplateDescription(
ArrayBoilerplateDescription::cast(obj));
@@ -704,14 +720,13 @@ void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
RecordSimpleVirtualObjectStats(nullptr, heap_->retained_maps(),
ObjectStats::RETAINED_MAPS_TYPE);
- // FixedArrayOfWeakCells.
+ // WeakArrayList.
RecordSimpleVirtualObjectStats(
- nullptr,
- FixedArrayOfWeakCells::cast(heap_->noscript_shared_function_infos()),
+ nullptr, WeakArrayList::cast(heap_->noscript_shared_function_infos()),
ObjectStats::NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE);
- RecordSimpleVirtualObjectStats(
- nullptr, FixedArrayOfWeakCells::cast(heap_->script_list()),
- ObjectStats::SCRIPT_LIST_TYPE);
+ RecordSimpleVirtualObjectStats(nullptr,
+ WeakArrayList::cast(heap_->script_list()),
+ ObjectStats::SCRIPT_LIST_TYPE);
// HashTable.
RecordHashTableVirtualObjectStats(nullptr, heap_->code_stubs(),
@@ -776,24 +791,44 @@ void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script* script) {
ObjectStats::SCRIPT_SHARED_FUNCTION_INFOS_TYPE);
// Log the size of external source code.
- Object* source = script->source();
- if (source->IsExternalString()) {
+ Object* raw_source = script->source();
+ if (raw_source->IsExternalString()) {
// The contents of external strings aren't on the heap, so we have to record
- // them manually.
- ExternalString* external_source_string = ExternalString::cast(source);
- size_t off_heap_size = external_source_string->ExternalPayloadSize();
- size_t on_heap_size = external_source_string->Size();
- RecordVirtualObjectStats(script, external_source_string,
- ObjectStats::SCRIPT_SOURCE_EXTERNAL_TYPE,
- on_heap_size + off_heap_size,
- ObjectStats::kNoOverAllocation);
- } else if (source->IsHeapObject()) {
+ // them manually. The on-heap String object is recorded indepentendely in
+ // the normal pass.
+ ExternalString* string = ExternalString::cast(raw_source);
+ Address resource = string->resource_as_address();
+ size_t off_heap_size = string->ExternalPayloadSize();
+ RecordExternalResourceStats(
+ resource,
+ string->IsOneByteRepresentation()
+ ? ObjectStats::SCRIPT_SOURCE_EXTERNAL_ONE_BYTE_TYPE
+ : ObjectStats::SCRIPT_SOURCE_EXTERNAL_TWO_BYTE_TYPE,
+ off_heap_size);
+ } else if (raw_source->IsString()) {
+ String* source = String::cast(raw_source);
RecordSimpleVirtualObjectStats(
- script, HeapObject::cast(source),
- ObjectStats::SCRIPT_SOURCE_NON_EXTERNAL_TYPE);
+ script, HeapObject::cast(raw_source),
+ source->IsOneByteRepresentation()
+ ? ObjectStats::SCRIPT_SOURCE_NON_EXTERNAL_ONE_BYTE_TYPE
+ : ObjectStats::SCRIPT_SOURCE_NON_EXTERNAL_TWO_BYTE_TYPE);
}
}
+void ObjectStatsCollectorImpl::RecordVirtualExternalStringDetails(
+ ExternalString* string) {
+ // Track the external string resource size in a separate category.
+
+ Address resource = string->resource_as_address();
+ size_t off_heap_size = string->ExternalPayloadSize();
+ RecordExternalResourceStats(
+ resource,
+ string->IsOneByteRepresentation()
+ ? ObjectStats::STRING_EXTERNAL_RESOURCE_ONE_BYTE_TYPE
+ : ObjectStats::STRING_EXTERNAL_RESOURCE_TWO_BYTE_TYPE,
+ off_heap_size);
+}
+
void ObjectStatsCollectorImpl::RecordVirtualSharedFunctionInfoDetails(
SharedFunctionInfo* info) {
// Uncompiled SharedFunctionInfo gets its own category.
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index a21b7f749f..7914f09881 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -58,11 +58,15 @@
V(RETAINED_MAPS_TYPE) \
V(SCRIPT_LIST_TYPE) \
V(SCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
- V(SCRIPT_SOURCE_EXTERNAL_TYPE) \
- V(SCRIPT_SOURCE_NON_EXTERNAL_TYPE) \
+ V(SCRIPT_SOURCE_EXTERNAL_ONE_BYTE_TYPE) \
+ V(SCRIPT_SOURCE_EXTERNAL_TWO_BYTE_TYPE) \
+ V(SCRIPT_SOURCE_NON_EXTERNAL_ONE_BYTE_TYPE) \
+ V(SCRIPT_SOURCE_NON_EXTERNAL_TWO_BYTE_TYPE) \
V(SERIALIZED_OBJECTS_TYPE) \
V(SINGLE_CHARACTER_STRING_CACHE_TYPE) \
V(STRING_SPLIT_CACHE_TYPE) \
+ V(STRING_EXTERNAL_RESOURCE_ONE_BYTE_TYPE) \
+ V(STRING_EXTERNAL_RESOURCE_TWO_BYTE_TYPE) \
V(SOURCE_POSITION_TABLE_TYPE) \
V(UNCOMPILED_JS_FUNCTION_TYPE) \
V(UNCOMPILED_SHARED_FUNCTION_INFO_TYPE) \
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 54229416b7..f32bbc1914 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -12,6 +12,7 @@
#include "src/heap/mark-compact.h"
#include "src/macro-assembler.h"
#include "src/objects-body-descriptors-inl.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index cdb7c917b0..63ef8fb353 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -23,47 +23,48 @@ class DataHandler;
class JSArrayBuffer;
class JSRegExp;
class JSWeakCollection;
+class UncompiledDataWithoutPreParsedScope;
class UncompiledDataWithPreParsedScope;
-#define TYPED_VISITOR_ID_LIST(V) \
- V(AllocationSite) \
- V(BigInt) \
- V(ByteArray) \
- V(BytecodeArray) \
- V(Cell) \
- V(Code) \
- V(CodeDataContainer) \
- V(ConsString) \
- V(DataHandler) \
- V(EphemeronHashTable) \
- V(FeedbackCell) \
- V(FeedbackVector) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- V(FixedFloat64Array) \
- V(FixedTypedArrayBase) \
- V(JSArrayBuffer) \
- V(JSFunction) \
- V(JSObject) \
- V(JSWeakCollection) \
- V(Map) \
- V(Oddball) \
- V(PreParsedScopeData) \
- V(PropertyArray) \
- V(PropertyCell) \
- V(PrototypeInfo) \
- V(SeqOneByteString) \
- V(SeqTwoByteString) \
- V(SharedFunctionInfo) \
- V(SlicedString) \
- V(SmallOrderedHashMap) \
- V(SmallOrderedHashSet) \
- V(Symbol) \
- V(ThinString) \
- V(TransitionArray) \
- V(UncompiledDataWithPreParsedScope) \
- V(WasmInstanceObject) \
- V(WeakCell)
+#define TYPED_VISITOR_ID_LIST(V) \
+ V(AllocationSite) \
+ V(BigInt) \
+ V(ByteArray) \
+ V(BytecodeArray) \
+ V(Cell) \
+ V(Code) \
+ V(CodeDataContainer) \
+ V(ConsString) \
+ V(DataHandler) \
+ V(EphemeronHashTable) \
+ V(FeedbackCell) \
+ V(FeedbackVector) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ V(FixedFloat64Array) \
+ V(FixedTypedArrayBase) \
+ V(JSArrayBuffer) \
+ V(JSFunction) \
+ V(JSObject) \
+ V(JSWeakCollection) \
+ V(Map) \
+ V(Oddball) \
+ V(PreParsedScopeData) \
+ V(PropertyArray) \
+ V(PropertyCell) \
+ V(PrototypeInfo) \
+ V(SeqOneByteString) \
+ V(SeqTwoByteString) \
+ V(SharedFunctionInfo) \
+ V(SlicedString) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
+ V(Symbol) \
+ V(ThinString) \
+ V(TransitionArray) \
+ V(UncompiledDataWithoutPreParsedScope) \
+ V(UncompiledDataWithPreParsedScope) \
+ V(WasmInstanceObject)
// The base class for visitors that need to dispatch on object type. The default
// behavior of all visit functions is to iterate body of the given object using
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index 95b7b5b9d5..e59457b10d 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -5,10 +5,10 @@
#ifndef V8_HEAP_REMEMBERED_SET_H_
#define V8_HEAP_REMEMBERED_SET_H_
-#include "src/assembler.h"
#include "src/heap/heap.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
+#include "src/reloc-info.h"
#include "src/v8memory.h"
namespace v8 {
@@ -284,8 +284,7 @@ class UpdateTypedSlotHelper {
callback(reinterpret_cast<MaybeObject**>(&code));
DCHECK(!HasWeakHeapObjectTag(code));
if (code != old_code) {
- Memory::Address_at(entry_address) =
- reinterpret_cast<Code*>(code)->entry();
+ Memory<Address>(entry_address) = reinterpret_cast<Code*>(code)->entry();
}
return result;
}
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index e581ebe571..649292085a 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -6,6 +6,8 @@
#define V8_HEAP_SCAVENGER_INL_H_
#include "src/heap/scavenger.h"
+
+#include "src/heap/local-allocator-inl.h"
#include "src/objects-inl.h"
#include "src/objects/map.h"
@@ -146,7 +148,8 @@ void Scavenger::EvacuateThinString(Map* map, HeapObject** slot,
ThinString* object, int object_size) {
if (!is_incremental_marking_) {
// Loading actual is fine in a parallel setting is there is no write.
- HeapObject* actual = object->actual();
+ String* actual = object->actual();
+ object->set_length(0);
*slot = actual;
// ThinStrings always refer to internalized strings, which are
// always in old space.
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index d9f920ef7e..f8c6d496ce 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -202,5 +202,11 @@ void RootScavengeVisitor::ScavengePointer(Object** p) {
reinterpret_cast<HeapObject*>(object));
}
+RootScavengeVisitor::RootScavengeVisitor(Scavenger* scavenger)
+ : scavenger_(scavenger) {}
+
+ScavengeVisitor::ScavengeVisitor(Scavenger* scavenger)
+ : scavenger_(scavenger) {}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 847a5b07fc..4e6753f6ce 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -114,7 +114,7 @@ class Scavenger {
// filtering out non-HeapObjects and objects which do not reside in new space.
class RootScavengeVisitor final : public RootVisitor {
public:
- explicit RootScavengeVisitor(Scavenger* scavenger) : scavenger_(scavenger) {}
+ explicit RootScavengeVisitor(Scavenger* scavenger);
void VisitRootPointer(Root root, const char* description, Object** p) final;
void VisitRootPointers(Root root, const char* description, Object** start,
@@ -128,7 +128,7 @@ class RootScavengeVisitor final : public RootVisitor {
class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
public:
- explicit ScavengeVisitor(Scavenger* scavenger) : scavenger_(scavenger) {}
+ explicit ScavengeVisitor(Scavenger* scavenger);
V8_INLINE void VisitPointers(HeapObject* host, Object** start,
Object** end) final;
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index fdb142ab56..2742cd9c9d 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -21,6 +21,7 @@
#include "src/objects/debug-objects.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/dictionary.h"
+#include "src/objects/literal-objects-inl.h"
#include "src/objects/map.h"
#include "src/objects/microtask.h"
#include "src/objects/module.h"
@@ -108,15 +109,6 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
Map::cast(result), instance_type, instance_size, elements_kind,
inobject_properties);
- if (!is_js_object) {
- // Eagerly initialize the WeakCell cache for the map as it will not be
- // writable in RO_SPACE.
- HandleScope handle_scope(isolate());
- Handle<WeakCell> weak_cell = isolate()->factory()->NewWeakCell(
- Handle<Map>(map, isolate()), TENURED_READ_ONLY);
- map->set_weak_cell_cache(*weak_cell);
- }
-
return map;
}
@@ -148,14 +140,13 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
Map::OwnsDescriptorsBit::encode(true) |
Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
map->set_bit_field3(bit_field3);
- map->set_weak_cell_cache(Smi::kZero);
map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
return map;
}
void Heap::FinalizePartialMap(Map* map) {
ReadOnlyRoots roots(this);
- map->set_dependent_code(DependentCode::cast(roots.empty_fixed_array()));
+ map->set_dependent_code(DependentCode::cast(roots.empty_weak_fixed_array()));
map->set_raw_transitions(MaybeObject::FromSmi(Smi::kZero));
map->set_instance_descriptors(roots.empty_descriptor_array());
if (FLAG_unbox_double_fields) {
@@ -163,13 +154,6 @@ void Heap::FinalizePartialMap(Map* map) {
}
map->set_prototype(roots.null_value());
map->set_constructor_or_backpointer(roots.null_value());
-
- // Eagerly initialize the WeakCell cache for the map as it will not be
- // writable in RO_SPACE.
- HandleScope handle_scope(isolate());
- Handle<WeakCell> weak_cell = isolate()->factory()->NewWeakCell(
- Handle<Map>(map, isolate()), TENURED_READ_ONLY);
- map->set_weak_cell_cache(*weak_cell);
}
AllocationResult Heap::Allocate(Map* map, AllocationSpace space) {
@@ -242,7 +226,6 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
- ALLOCATE_PARTIAL_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell);
#undef ALLOCATE_PARTIAL_MAP
}
@@ -335,7 +318,6 @@ bool Heap::CreateInitialMaps() {
// Fix the instance_descriptors for the existing maps.
FinalizePartialMap(roots.meta_map());
- FinalizePartialMap(roots.weak_cell_map());
FinalizePartialMap(roots.fixed_array_map());
FinalizePartialMap(roots.weak_fixed_array_map());
FinalizePartialMap(roots.weak_array_list_map());
@@ -426,8 +408,8 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_MAP_TYPE, small_ordered_hash_map)
ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_SET_TYPE, small_ordered_hash_set)
-#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
- ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
+#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype) \
+ ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
@@ -542,6 +524,20 @@ bool Heap::CreateInitialMaps() {
ObjectBoilerplateDescription::cast(obj));
{
+ // Empty array boilerplate description
+ AllocationResult alloc =
+ Allocate(roots.array_boilerplate_description_map(), RO_SPACE);
+ if (!alloc.To(&obj)) return false;
+
+ ArrayBoilerplateDescription::cast(obj)->set_constant_elements(
+ roots.empty_fixed_array());
+ ArrayBoilerplateDescription::cast(obj)->set_elements_kind(
+ ElementsKind::PACKED_SMI_ELEMENTS);
+ }
+ set_empty_array_boilerplate_description(
+ ArrayBoilerplateDescription::cast(obj));
+
+ {
AllocationResult allocation = Allocate(roots.boolean_map(), RO_SPACE);
if (!allocation.To(&obj)) return false;
}
@@ -573,7 +569,7 @@ bool Heap::CreateInitialMaps() {
set_empty_property_array(PropertyArray::cast(obj));
}
-#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype) \
{ \
FixedTypedArrayBase* obj; \
if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
@@ -770,20 +766,13 @@ void Heap::CreateInitialObjects() {
set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
}
- {
- Handle<WeakCell> cell =
- factory->NewWeakCell(factory->undefined_value(), TENURED_READ_ONLY);
- set_empty_weak_cell(*cell);
- cell->clear();
- }
-
- set_detached_contexts(roots.empty_fixed_array());
+ set_detached_contexts(roots.empty_weak_array_list());
set_retained_maps(roots.empty_weak_array_list());
- set_retaining_path_targets(roots.undefined_value());
+ set_retaining_path_targets(roots.empty_weak_array_list());
set_feedback_vectors_for_profiling_tools(roots.undefined_value());
- set_script_list(Smi::kZero);
+ set_script_list(roots.empty_weak_array_list());
Handle<NumberDictionary> slow_element_dictionary = NumberDictionary::New(
isolate(), 1, TENURED_READ_ONLY, USE_CUSTOM_MINIMUM_CAPACITY);
@@ -883,9 +872,7 @@ void Heap::CreateInitialObjects() {
set_serialized_objects(roots.empty_fixed_array());
set_serialized_global_proxy_sizes(roots.empty_fixed_array());
- set_weak_stack_trace_list(Smi::kZero);
-
- set_noscript_shared_function_infos(Smi::kZero);
+ set_noscript_shared_function_infos(roots.empty_weak_array_list());
STATIC_ASSERT(interpreter::BytecodeOperands::kOperandScaleCount == 3);
set_deserialize_lazy_handler(Smi::kZero);
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 2ddcf6cf36..9e86905d00 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -468,13 +468,6 @@ V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
return AllocateRaw(size_in_bytes, alignment);
}
-
-LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
- return LocalAllocationBuffer(
- nullptr, LinearAllocationArea(kNullAddress, kNullAddress));
-}
-
-
LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
AllocationResult result,
intptr_t size) {
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 59ce145474..ff28ab56b2 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -17,10 +17,13 @@
#include "src/heap/heap-controller.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
#include "src/heap/sweeper.h"
#include "src/msan.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/snapshot/snapshot.h"
#include "src/v8.h"
#include "src/vm-state-inl.h"
@@ -799,6 +802,14 @@ bool MemoryChunk::IsPagedSpace() const {
return owner()->identity() != LO_SPACE;
}
+bool MemoryChunk::InOldSpace() const {
+ return owner()->identity() == OLD_SPACE;
+}
+
+bool MemoryChunk::InLargeObjectSpace() const {
+ return owner()->identity() == LO_SPACE;
+}
+
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
@@ -939,9 +950,11 @@ void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
if (is_marking) {
SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ SetFlag(MemoryChunk::INCREMENTAL_MARKING);
} else {
ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
}
}
@@ -949,8 +962,10 @@ void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
if (is_marking) {
SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ SetFlag(MemoryChunk::INCREMENTAL_MARKING);
} else {
ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
}
}
@@ -1234,7 +1249,7 @@ void MemoryAllocator::ZapBlock(Address start, size_t size,
DCHECK_EQ(start % kPointerSize, 0);
DCHECK_EQ(size % kPointerSize, 0);
for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
- Memory::Address_at(start + s) = static_cast<Address>(zap_value);
+ Memory<Address>(start + s) = static_cast<Address>(zap_value);
}
}
@@ -1426,6 +1441,22 @@ void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object,
}
}
+void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject* old_start,
+ HeapObject* new_start) {
+ DCHECK_LT(old_start, new_start);
+ DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
+ MemoryChunk::FromHeapObject(new_start));
+ if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots()) {
+ auto it = invalidated_slots()->find(old_start);
+ if (it != invalidated_slots()->end()) {
+ int old_size = it->second;
+ int delta = static_cast<int>(new_start->address() - old_start->address());
+ invalidated_slots()->erase(it);
+ (*invalidated_slots())[new_start] = old_size - delta;
+ }
+ }
+}
+
void MemoryChunk::ReleaseLocalTracker() {
DCHECK_NOT_NULL(local_tracker_);
delete local_tracker_;
@@ -1989,7 +2020,11 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
CHECK(object->address() + size <= top);
end_of_previous_object = object->address() + size;
- if (object->IsJSArrayBuffer()) {
+ if (object->IsExternalString()) {
+ ExternalString* external_string = ExternalString::cast(object);
+ size_t size = external_string->ExternalPayloadSize();
+ external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
+ } else if (object->IsJSArrayBuffer()) {
JSArrayBuffer* array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
size_t size = NumberToSize(array_buffer->byte_length());
@@ -2473,7 +2508,11 @@ void NewSpace::Verify(Isolate* isolate) {
int size = object->Size();
object->IterateBody(map, size, &visitor);
- if (object->IsJSArrayBuffer()) {
+ if (object->IsExternalString()) {
+ ExternalString* external_string = ExternalString::cast(object);
+ size_t size = external_string->ExternalPayloadSize();
+ external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
+ } else if (object->IsJSArrayBuffer()) {
JSArrayBuffer* array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
size_t size = NumberToSize(array_buffer->byte_length());
@@ -2574,7 +2613,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
if (!is_committed()) {
if (!Commit()) return false;
}
- DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
+ DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
DCHECK_LE(new_capacity, maximum_capacity_);
DCHECK_GT(new_capacity, current_capacity_);
const size_t delta = new_capacity - current_capacity_;
@@ -2613,7 +2652,7 @@ void SemiSpace::RewindPages(int num_pages) {
}
bool SemiSpace::ShrinkTo(size_t new_capacity) {
- DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
+ DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
DCHECK_GE(new_capacity, minimum_capacity_);
DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) {
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index dbd0d82008..47272501f3 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -110,7 +110,7 @@ class Space;
// Some assertion macros used in the debugging mode.
#define DCHECK_PAGE_ALIGNED(address) \
- DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
+ DCHECK((OffsetFrom(address) & kPageAlignmentMask) == 0)
#define DCHECK_OBJECT_ALIGNED(address) \
DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
@@ -312,7 +312,11 @@ class MemoryChunk {
// |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
// to iterate the page.
- SWEEP_TO_ITERATE = 1u << 17
+ SWEEP_TO_ITERATE = 1u << 17,
+
+ // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
+ // enabled.
+ INCREMENTAL_MARKING = 1u << 18
};
using Flags = uintptr_t;
@@ -403,7 +407,6 @@ class MemoryChunk {
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
- static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
@@ -512,6 +515,9 @@ class MemoryChunk {
InvalidatedSlots* AllocateInvalidatedSlots();
void ReleaseInvalidatedSlots();
void RegisterObjectWithInvalidatedSlots(HeapObject* object, int size);
+ // Updates invalidated_slots after array left-trimming.
+ void MoveObjectWithInvalidatedSlots(HeapObject* old_start,
+ HeapObject* new_start);
InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
void ReleaseLocalTracker();
@@ -623,6 +629,10 @@ class MemoryChunk {
bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
+ bool InOldSpace() const;
+
+ bool InLargeObjectSpace() const;
+
Space* owner() const { return owner_; }
void set_owner(Space* space) { owner_ = space; }
@@ -758,7 +768,8 @@ class Page : public MemoryChunk {
// Page flags copied from from-space to to-space when flipping semispaces.
static const intptr_t kCopyOnFlipFlagsMask =
static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
- static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
+ static_cast<intptr_t>(MemoryChunk::INCREMENTAL_MARKING);
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[. This only works if the object
@@ -766,6 +777,10 @@ class Page : public MemoryChunk {
static Page* FromAddress(Address addr) {
return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
}
+ static Page* FromHeapObject(const HeapObject* o) {
+ return reinterpret_cast<Page*>(reinterpret_cast<Address>(o) &
+ ~kAlignmentMask);
+ }
// Returns the page containing the address provided. The address can
// potentially point righter after the page. To be also safe for tagged values
@@ -1196,7 +1211,7 @@ class SkipList {
}
static inline int RegionNumber(Address addr) {
- return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
+ return (OffsetFrom(addr) & kPageAlignmentMask) >> kRegionSizeLog2;
}
static void Update(Address addr, int size) {
@@ -1990,7 +2005,10 @@ class LocalAllocationBuffer {
// Indicates that a buffer cannot be used for allocations anymore. Can result
// from either reassigning a buffer, or trying to construct it from an
// invalid {AllocationResult}.
- static inline LocalAllocationBuffer InvalidBuffer();
+ static LocalAllocationBuffer InvalidBuffer() {
+ return LocalAllocationBuffer(
+ nullptr, LinearAllocationArea(kNullAddress, kNullAddress));
+ }
// Creates a new LAB from a given {AllocationResult}. Results in
// InvalidBuffer if the result indicates a retry.
diff --git a/deps/v8/src/heap/store-buffer-inl.h b/deps/v8/src/heap/store-buffer-inl.h
new file mode 100644
index 0000000000..4609c83ca0
--- /dev/null
+++ b/deps/v8/src/heap/store-buffer-inl.h
@@ -0,0 +1,36 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_STORE_BUFFER_INL_H_
+#define V8_HEAP_STORE_BUFFER_INL_H_
+
+#include "src/heap/store-buffer.h"
+
+#include "src/heap/heap-inl.h"
+
+namespace v8 {
+namespace internal {
+
+void StoreBuffer::InsertDeletionIntoStoreBuffer(Address start, Address end) {
+ if (top_ + sizeof(Address) * 2 > limit_[current_]) {
+ StoreBufferOverflow(heap_->isolate());
+ }
+ *top_ = MarkDeletionAddress(start);
+ top_++;
+ *top_ = end;
+ top_++;
+}
+
+void StoreBuffer::InsertIntoStoreBuffer(Address slot) {
+ if (top_ + sizeof(Address) > limit_[current_]) {
+ StoreBufferOverflow(heap_->isolate());
+ }
+ *top_ = slot;
+ top_++;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_STORE_BUFFER_INL_H_
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index d73e3235c1..b428a82046 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -10,6 +10,7 @@
#include "src/base/template-utils.h"
#include "src/counters.h"
#include "src/heap/incremental-marking.h"
+#include "src/heap/store-buffer-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/v8.h"
@@ -30,22 +31,28 @@ StoreBuffer::StoreBuffer(Heap* heap)
}
void StoreBuffer::SetUp() {
- // Allocate 3x the buffer size, so that we can start the new store buffer
- // aligned to 2x the size. This lets us use a bit test to detect the end of
- // the area.
+ const size_t requested_size = kStoreBufferSize * kStoreBuffers;
+ // Allocate buffer memory aligned at least to kStoreBufferSize. This lets us
+ // use a bit test to detect the ends of the buffers.
+ const size_t alignment =
+ std::max<size_t>(kStoreBufferSize, AllocatePageSize());
+ void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment);
VirtualMemory reservation;
- if (!AllocVirtualMemory(kStoreBufferSize * 3, heap_->GetRandomMmapAddr(),
- &reservation)) {
+ if (!AlignedAllocVirtualMemory(requested_size, alignment, hint,
+ &reservation)) {
heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
+
Address start = reservation.address();
- start_[0] = reinterpret_cast<Address*>(::RoundUp(start, kStoreBufferSize));
+ const size_t allocated_size = reservation.size();
+
+ start_[0] = reinterpret_cast<Address*>(start);
limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize);
start_[1] = limit_[0];
limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize);
- Address* vm_limit = reinterpret_cast<Address*>(start + reservation.size());
-
+ // Sanity check the buffers.
+ Address* vm_limit = reinterpret_cast<Address*>(start + allocated_size);
USE(vm_limit);
for (int i = 0; i < kStoreBuffers; i++) {
DCHECK(reinterpret_cast<Address>(start_[i]) >= reservation.address());
@@ -55,8 +62,9 @@ void StoreBuffer::SetUp() {
DCHECK_EQ(0, reinterpret_cast<Address>(limit_[i]) & kStoreBufferMask);
}
- if (!reservation.SetPermissions(reinterpret_cast<Address>(start_[0]),
- kStoreBufferSize * kStoreBuffers,
+ // Set RW permissions only on the pages we use.
+ const size_t used_size = RoundUp(requested_size, CommitPageSize());
+ if (!reservation.SetPermissions(start, used_size,
PageAllocator::kReadWrite)) {
heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
@@ -65,7 +73,6 @@ void StoreBuffer::SetUp() {
virtual_memory_.TakeControl(&reservation);
}
-
void StoreBuffer::TearDown() {
if (virtual_memory_.IsReserved()) virtual_memory_.Free();
top_ = nullptr;
@@ -76,6 +83,48 @@ void StoreBuffer::TearDown() {
}
}
+void StoreBuffer::DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
+ Address end) {
+ DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
+ store_buffer->InsertDeletionIntoStoreBuffer(start, end);
+}
+
+void StoreBuffer::InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
+ DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
+ store_buffer->InsertIntoStoreBuffer(slot);
+}
+
+void StoreBuffer::DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
+ Address start, Address end) {
+ // In GC the store buffer has to be empty at any time.
+ DCHECK(store_buffer->Empty());
+ DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
+ Page* page = Page::FromAddress(start);
+ if (end) {
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ } else {
+ RememberedSet<OLD_TO_NEW>::Remove(page, start);
+ }
+}
+
+void StoreBuffer::InsertDuringGarbageCollection(StoreBuffer* store_buffer,
+ Address slot) {
+ DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
+ RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+}
+
+void StoreBuffer::SetMode(StoreBufferMode mode) {
+ mode_ = mode;
+ if (mode == NOT_IN_GC) {
+ insertion_callback = &InsertDuringRuntime;
+ deletion_callback = &DeleteDuringRuntime;
+ } else {
+ insertion_callback = &InsertDuringGarbageCollection;
+ deletion_callback = &DeleteDuringGarbageCollection;
+ }
+}
+
int StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
isolate->heap()->store_buffer()->FlipStoreBuffers();
isolate->counters()->store_buffer_overflows()->Increment();
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index e447b2f74e..d2c0f9b75f 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -34,6 +34,15 @@ class StoreBuffer {
V8_EXPORT_PRIVATE static int StoreBufferOverflow(Isolate* isolate);
+ static void DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
+ Address start, Address end);
+ static void InsertDuringGarbageCollection(StoreBuffer* store_buffer,
+ Address slot);
+
+ static void DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
+ Address end);
+ static void InsertDuringRuntime(StoreBuffer* store_buffer, Address slot);
+
explicit StoreBuffer(Heap* heap);
void SetUp();
void TearDown();
@@ -61,6 +70,16 @@ class StoreBuffer {
return address & ~kDeletionTag;
}
+ inline void InsertDeletionIntoStoreBuffer(Address start, Address end);
+ inline void InsertIntoStoreBuffer(Address slot);
+
+ void InsertEntry(Address slot) {
+ // Insertions coming from the GC are directly inserted into the remembered
+ // set. Insertions coming from the runtime are added to the store buffer to
+ // allow concurrent processing.
+ insertion_callback(this, slot);
+ }
+
// If we only want to delete a single slot, end should be set to null which
// will be written into the second field. When processing the store buffer
// the more efficient Remove method will be called in this case.
@@ -71,72 +90,7 @@ class StoreBuffer {
deletion_callback(this, start, end);
}
- static void DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
- Address start, Address end) {
- // In GC the store buffer has to be empty at any time.
- DCHECK(store_buffer->Empty());
- DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
- Page* page = Page::FromAddress(start);
- if (end) {
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
- SlotSet::PREFREE_EMPTY_BUCKETS);
- } else {
- RememberedSet<OLD_TO_NEW>::Remove(page, start);
- }
- }
-
- static void DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
- Address end) {
- DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
- store_buffer->InsertDeletionIntoStoreBuffer(start, end);
- }
-
- void InsertDeletionIntoStoreBuffer(Address start, Address end) {
- if (top_ + sizeof(Address) * 2 > limit_[current_]) {
- StoreBufferOverflow(heap_->isolate());
- }
- *top_ = MarkDeletionAddress(start);
- top_++;
- *top_ = end;
- top_++;
- }
-
- static void InsertDuringGarbageCollection(StoreBuffer* store_buffer,
- Address slot) {
- DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
- RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
- }
-
- static void InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
- DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
- store_buffer->InsertIntoStoreBuffer(slot);
- }
-
- void InsertIntoStoreBuffer(Address slot) {
- if (top_ + sizeof(Address) > limit_[current_]) {
- StoreBufferOverflow(heap_->isolate());
- }
- *top_ = slot;
- top_++;
- }
-
- void InsertEntry(Address slot) {
- // Insertions coming from the GC are directly inserted into the remembered
- // set. Insertions coming from the runtime are added to the store buffer to
- // allow concurrent processing.
- insertion_callback(this, slot);
- }
-
- void SetMode(StoreBufferMode mode) {
- mode_ = mode;
- if (mode == NOT_IN_GC) {
- insertion_callback = &InsertDuringRuntime;
- deletion_callback = &DeleteDuringRuntime;
- } else {
- insertion_callback = &InsertDuringGarbageCollection;
- deletion_callback = &DeleteDuringGarbageCollection;
- }
- }
+ void SetMode(StoreBufferMode mode);
// Used by the concurrent processing thread to transfer entries from the
// store buffer to the remembered set.
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 8b62213cb6..9e622c3385 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -279,7 +279,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
CHECK_GT(free_end, free_start);
size_t size = static_cast<size_t>(free_end - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
- memset(reinterpret_cast<void*>(free_start), 0xCC, size);
+ ZapCode(free_start, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
@@ -319,7 +319,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
CHECK_GT(p->area_end(), free_start);
size_t size = static_cast<size_t>(p->area_end() - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
- memset(reinterpret_cast<void*>(free_start), 0xCC, size);
+ ZapCode(free_start, size);
}
if (free_list_mode == REBUILD_FREE_LIST) {
freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 429953c0e6..1ce23129e5 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -53,8 +53,13 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
+ DCHECK_EQ(kApplyMask, (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL) |
+ RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY)));
if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_) ||
- rmode_ == RelocInfo::JS_TO_WASM_CALL) {
+ IsJsToWasmCall(rmode_) || IsOffHeapTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry.
} else if (IsInternalReference(rmode_)) {
@@ -89,38 +94,37 @@ int RelocInfo::target_address_size() {
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(Memory::Object_at(pc_));
+ return HeapObject::cast(Memory<Object*>(pc_));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<HeapObject>::cast(Memory::Object_Handle_at(pc_));
+ return Handle<HeapObject>::cast(Memory<Handle<Object>>(pc_));
}
void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Memory::Object_at(pc_) = target;
+ Memory<Object*>(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
- heap->RecordWriteIntoCode(host(), this, target);
- heap->incremental_marking()->RecordWriteIntoCode(host(), this, target);
+ WriteBarrierForCode(host(), this, target);
}
}
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- return Memory::Address_at(pc_);
+ return Memory<Address>(pc_);
}
void RelocInfo::set_target_external_reference(
Address target, ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- Memory::Address_at(pc_) = target;
+ Memory<Address>(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc_, sizeof(Address));
}
@@ -128,7 +132,7 @@ void RelocInfo::set_target_external_reference(
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
- return Memory::Address_at(pc_);
+ return Memory<Address>(pc_);
}
@@ -153,14 +157,15 @@ void RelocInfo::set_target_runtime_entry(Address target,
Address RelocInfo::target_off_heap_target() {
DCHECK(IsOffHeapTarget(rmode_));
- return Memory::Address_at(pc_);
+ return Assembler::target_address_at(pc_, constant_pool_);
}
void RelocInfo::WipeOut() {
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)) {
- Memory::Address_at(pc_) = kNullAddress;
- } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
+ Memory<Address>(pc_) = kNullAddress;
+ } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
+ IsOffHeapTarget(rmode_)) {
// Effectively write zero into the relocation.
Assembler::set_target_address_at(pc_, constant_pool_,
pc_ + sizeof(int32_t));
@@ -313,7 +318,7 @@ void Assembler::emit_near_disp(Label* L) {
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
- Memory::Address_at(pc) = target;
+ Memory<Address>(pc) = target;
}
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index a3a76b8b64..38b65c583f 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -189,16 +189,17 @@ void Displacement::init(Label* L, Type type) {
const int RelocInfo::kApplyMask =
RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL);
+ RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL) |
+ RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
// specially coded on IA32 means that it is a relative address, as used by
// branch instructions. These are also the ones that need changing when a
// code object moves.
- return (1 << rmode_) & kApplyMask;
+ return RelocInfo::ModeMask(rmode_) & kApplyMask;
}
@@ -225,7 +226,7 @@ Address RelocInfo::js_to_wasm_address() const {
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
- return Memory::uint32_at(pc_);
+ return Memory<uint32_t>(pc_);
}
// -----------------------------------------------------------------------------
@@ -312,7 +313,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
break;
}
Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
- Memory::Object_Handle_at(pc) = object;
+ Memory<Handle<Object>>(pc) = object;
}
}
@@ -330,9 +331,7 @@ Assembler::Assembler(const AssemblerOptions& options, void* buffer,
// caller in which case we can't be sure it's okay to overwrite
// existing code in it.
#ifdef DEBUG
- if (own_buffer_) {
- memset(buffer_, 0xCC, buffer_size_); // int3
- }
+ if (own_buffer_) ZapCode(reinterpret_cast<Address>(buffer_), buffer_size_);
#endif
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
@@ -692,6 +691,14 @@ void Assembler::movzx_w(Register dst, Operand src) {
emit_operand(dst, src);
}
+void Assembler::movq(XMMRegister dst, Operand src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF3);
+ EMIT(0x0F);
+ EMIT(0x7E);
+ emit_operand(dst, src);
+}
+
void Assembler::cmov(Condition cc, Register dst, Operand src) {
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
@@ -785,6 +792,13 @@ void Assembler::cmpxchg_w(Operand dst, Register src) {
emit_operand(src, dst);
}
+void Assembler::cmpxchg8b(Operand dst) {
+ EnsureSpace enure_space(this);
+ EMIT(0x0F);
+ EMIT(0xC7);
+ emit_operand(ecx, dst);
+}
+
void Assembler::lfence() {
EnsureSpace ensure_space(this);
EMIT(0x0F);
@@ -1396,6 +1410,12 @@ void Assembler::xor_(Operand dst, const Immediate& x) {
emit_arith(6, dst, x);
}
+void Assembler::bswap(Register dst) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xC8 + dst.code());
+}
+
void Assembler::bt(Operand dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
@@ -1606,22 +1626,12 @@ void Assembler::wasm_call(Address entry, RelocInfo::Mode rmode) {
emit(entry, rmode);
}
-int Assembler::CallSize(Operand adr) {
- // Call size is 1 (opcode) + adr.len_ (operand).
- return 1 + adr.len_;
-}
-
void Assembler::call(Operand adr) {
EnsureSpace ensure_space(this);
EMIT(0xFF);
emit_operand(edx, adr);
}
-
-int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
- return 1 /* EMIT */ + sizeof(uint32_t) /* emit */;
-}
-
void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
@@ -3197,7 +3207,7 @@ void Assembler::GrowBuffer() {
// Clear the buffer in debug mode. Use 'int3' instructions to make
// sure to get into problems if we ever run uninitialized code.
#ifdef DEBUG
- memset(desc.buffer, 0xCC, desc.buffer_size);
+ ZapCode(reinterpret_cast<Address>(desc.buffer), desc.buffer_size);
#endif
// Copy the data.
@@ -3221,9 +3231,11 @@ void Assembler::GrowBuffer() {
*p += pc_delta;
}
- // Relocate js-to-wasm calls (which are encoded pc-relative).
- for (RelocIterator it(desc, RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
- !it.done(); it.next()) {
+ // Relocate pc-relative references.
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL) |
+ RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET);
+ DCHECK_EQ(mode_mask, RelocInfo::kApplyMask & mode_mask);
+ for (RelocIterator it(desc, mode_mask); !it.done(); it.next()) {
it.rinfo()->apply(pc_delta);
}
@@ -3259,11 +3271,29 @@ void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
}
void Assembler::emit_operand(Register reg, Operand adr) {
+ emit_operand(reg.code(), adr);
+}
+
+void Assembler::emit_operand(XMMRegister reg, Operand adr) {
+ Register ireg = Register::from_code(reg.code());
+ emit_operand(ireg, adr);
+}
+
+void Assembler::emit_operand(int code, Operand adr) {
+ // Isolate-independent code may not embed relocatable addresses.
+ DCHECK(!options().isolate_independent_code ||
+ adr.rmode_ != RelocInfo::CODE_TARGET);
+ DCHECK(!options().isolate_independent_code ||
+ adr.rmode_ != RelocInfo::EMBEDDED_OBJECT);
+ // TODO(jgruber,v8:6666): Enable once kRootRegister exists.
+ // DCHECK(!options().isolate_independent_code ||
+ // adr.rmode_ != RelocInfo::EXTERNAL_REFERENCE);
+
const unsigned length = adr.len_;
DCHECK_GT(length, 0);
// Emit updated ModRM byte containing the given register.
- pc_[0] = (adr.buf_[0] & ~0x38) | (reg.code() << 3);
+ pc_[0] = (adr.buf_[0] & ~0x38) | (code << 3);
// Emit the rest of the encoded operand.
for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 40a981f53f..69d243a749 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -141,6 +141,7 @@ typedef XMMRegister Simd128Register;
DOUBLE_REGISTERS(DEFINE_REGISTER)
#undef DEFINE_REGISTER
constexpr DoubleRegister no_double_reg = DoubleRegister::no_reg();
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
// Note that the bit values must match those used in actual instruction encoding
constexpr int kNumRegs = 8;
@@ -197,31 +198,6 @@ inline Condition NegateCondition(Condition cc) {
}
-// Commute a condition such that {a cond b == b cond' a}.
-inline Condition CommuteCondition(Condition cc) {
- switch (cc) {
- case below:
- return above;
- case above:
- return below;
- case above_equal:
- return below_equal;
- case below_equal:
- return above_equal;
- case less:
- return greater;
- case greater:
- return less;
- case greater_equal:
- return less_equal;
- case less_equal:
- return greater_equal;
- default:
- return cc;
- }
-}
-
-
enum RoundingMode {
kRoundToNearest = 0x0,
kRoundDown = 0x1,
@@ -270,6 +246,15 @@ class Immediate BASE_EMBEDDED {
return value_.immediate;
}
+ bool is_external_reference() const {
+ return rmode() == RelocInfo::EXTERNAL_REFERENCE;
+ }
+
+ ExternalReference external_reference() const {
+ DCHECK(is_external_reference());
+ return bit_cast<ExternalReference>(immediate());
+ }
+
bool is_zero() const { return RelocInfo::IsNone(rmode_) && immediate() == 0; }
bool is_int8() const {
return RelocInfo::IsNone(rmode_) && i::is_int8(immediate());
@@ -321,7 +306,7 @@ enum ScaleFactor {
times_twice_pointer_size = times_8
};
-class Operand {
+class V8_EXPORT_PRIVATE Operand {
public:
// reg
V8_INLINE explicit Operand(Register reg) { set_modrm(3, reg); }
@@ -361,16 +346,6 @@ class Operand {
RelocInfo::INTERNAL_REFERENCE);
}
- static Operand StaticVariable(const ExternalReference& ext) {
- return Operand(ext.address(), RelocInfo::EXTERNAL_REFERENCE);
- }
-
- static Operand StaticArray(Register index,
- ScaleFactor scale,
- const ExternalReference& arr) {
- return Operand(index, scale, arr.address(), RelocInfo::EXTERNAL_REFERENCE);
- }
-
static Operand ForRegisterPlusImmediate(Register base, Immediate imm) {
return Operand(base, imm.value_.immediate, imm.rmode_);
}
@@ -412,9 +387,9 @@ class Operand {
byte buf_[6];
// The number of bytes in buf_.
- uint8_t len_;
+ uint8_t len_ = 0;
// Only valid if len_ > 4.
- RelocInfo::Mode rmode_;
+ RelocInfo::Mode rmode_ = RelocInfo::NONE;
// TODO(clemensh): Get rid of this friendship, or make Operand immutable.
friend class Assembler;
@@ -472,8 +447,7 @@ class Displacement BASE_EMBEDDED {
void init(Label* L, Type type);
};
-
-class Assembler : public AssemblerBase {
+class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
private:
// We check before assembling an instruction that there is sufficient
// space to write an instruction and its relocation information.
@@ -640,6 +614,7 @@ class Assembler : public AssemblerBase {
void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
void movzx_w(Register dst, Operand src);
+ void movq(XMMRegister dst, Operand src);
// Conditional moves
void cmov(Condition cc, Register dst, Register src) {
cmov(cc, dst, Operand(src));
@@ -667,6 +642,7 @@ class Assembler : public AssemblerBase {
void cmpxchg(Operand dst, Register src);
void cmpxchg_b(Operand dst, Register src);
void cmpxchg_w(Operand dst, Register src);
+ void cmpxchg8b(Operand dst);
// Memory Fence
void lfence();
@@ -815,6 +791,7 @@ class Assembler : public AssemblerBase {
void xor_(Operand dst, const Immediate& x);
// Bit operations.
+ void bswap(Register dst);
void bt(Operand dst, Register src);
void bts(Register dst, Register src) { bts(Operand(dst), src); }
void bts(Operand dst, Register src);
@@ -850,10 +827,8 @@ class Assembler : public AssemblerBase {
// Calls
void call(Label* L);
void call(Address entry, RelocInfo::Mode rmode);
- int CallSize(Operand adr);
void call(Register reg) { call(Operand(reg)); }
void call(Operand adr);
- int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code, RelocInfo::Mode rmode);
void call(CodeStub* stub);
void wasm_call(Address address, RelocInfo::Mode rmode);
@@ -1822,7 +1797,9 @@ class Assembler : public AssemblerBase {
// sel specifies the /n in the modrm byte (see the Intel PRM).
void emit_arith(int sel, Operand dst, const Immediate& x);
+ void emit_operand(int code, Operand adr);
void emit_operand(Register reg, Operand adr);
+ void emit_operand(XMMRegister reg, Operand adr);
void emit_label(Label* label);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index b7a704f359..7bfc0875cb 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -4,7 +4,7 @@
#if V8_TARGET_ARCH_IA32
-#include "src/api-arguments.h"
+#include "src/api-arguments-inl.h"
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
@@ -40,23 +40,25 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ push(Immediate(StackFrame::TypeToMarker(marker))); // marker
ExternalReference context_address =
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
- __ push(Operand::StaticVariable(context_address)); // context
+ __ push(__ StaticVariable(context_address)); // context
// Save callee-saved registers (C calling conventions).
__ push(edi);
__ push(esi);
__ push(ebx);
+ __ InitializeRootRegister();
+
// Save copies of the top frame descriptor on the stack.
ExternalReference c_entry_fp =
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
- __ push(Operand::StaticVariable(c_entry_fp));
+ __ push(__ StaticVariable(c_entry_fp));
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp =
ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress, isolate());
- __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
+ __ cmp(__ StaticVariable(js_entry_sp), Immediate(0));
__ j(not_equal, &not_outermost_js, Label::kNear);
- __ mov(Operand::StaticVariable(js_entry_sp), ebp);
+ __ mov(__ StaticVariable(js_entry_sp), ebp);
__ push(Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ jmp(&invoke, Label::kNear);
__ bind(&not_outermost_js);
@@ -71,7 +73,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// field in the JSEnv and return a failure sentinel.
ExternalReference pending_exception = ExternalReference::Create(
IsolateAddressId::kPendingExceptionAddress, isolate());
- __ mov(Operand::StaticVariable(pending_exception), eax);
+ __ mov(__ StaticVariable(pending_exception), eax);
__ mov(eax, Immediate(isolate()->factory()->exception()));
__ jmp(&exit);
@@ -89,15 +91,18 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ PopStackHandler();
__ bind(&exit);
+
+ __ VerifyRootRegister();
+
// Check if the current stack frame is marked as the outermost JS frame.
__ pop(ebx);
__ cmp(ebx, Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ j(not_equal, &not_outermost_js_2);
- __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
+ __ mov(__ StaticVariable(js_entry_sp), Immediate(0));
__ bind(&not_outermost_js_2);
// Restore the top frame descriptor from the stack.
- __ pop(Operand::StaticVariable(ExternalReference::Create(
+ __ pop(__ StaticVariable(ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, isolate())));
// Restore callee-saved registers (C calling conventions).
@@ -197,9 +202,9 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
DCHECK(edx == function_address);
// Allocate HandleScope in callee-save registers.
- __ mov(ebx, Operand::StaticVariable(next_address));
- __ mov(edi, Operand::StaticVariable(limit_address));
- __ add(Operand::StaticVariable(level_address), Immediate(1));
+ __ mov(ebx, __ StaticVariable(next_address));
+ __ mov(edi, __ StaticVariable(limit_address));
+ __ add(__ StaticVariable(level_address), Immediate(1));
if (FLAG_log_timer_events) {
FrameScope frame(masm, StackFrame::MANUAL);
@@ -251,10 +256,10 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&prologue);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
- __ mov(Operand::StaticVariable(next_address), ebx);
- __ sub(Operand::StaticVariable(level_address), Immediate(1));
+ __ mov(__ StaticVariable(next_address), ebx);
+ __ sub(__ StaticVariable(level_address), Immediate(1));
__ Assert(above_equal, AbortReason::kInvalidHandleScopeLevel);
- __ cmp(edi, Operand::StaticVariable(limit_address));
+ __ cmp(edi, __ StaticVariable(limit_address));
__ j(not_equal, &delete_allocated_handles);
// Leave the API exit frame.
@@ -267,7 +272,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception.
ExternalReference scheduled_exception_address =
ExternalReference::scheduled_exception_address(isolate);
- __ cmp(Operand::StaticVariable(scheduled_exception_address),
+ __ cmp(__ StaticVariable(scheduled_exception_address),
Immediate(isolate->factory()->the_hole_value()));
__ j(not_equal, &promote_scheduled_exception);
@@ -323,7 +328,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
ExternalReference delete_extensions =
ExternalReference::delete_handle_scope_extensions();
__ bind(&delete_allocated_handles);
- __ mov(Operand::StaticVariable(limit_address), edi);
+ __ mov(__ StaticVariable(limit_address), edi);
__ mov(edi, eax);
__ mov(Operand(esp, 0),
Immediate(ExternalReference::isolate_address(isolate)));
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 308943e923..2a66676c9c 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -36,7 +36,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(desc));
+ DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
@@ -448,7 +448,7 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(desc));
+ DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
// TODO(jkummerow): It would be nice to register this code creation event
diff --git a/deps/v8/src/ia32/constants-ia32.h b/deps/v8/src/ia32/constants-ia32.h
index 38ad1280f1..ed1104eef9 100644
--- a/deps/v8/src/ia32/constants-ia32.h
+++ b/deps/v8/src/ia32/constants-ia32.h
@@ -15,6 +15,10 @@ namespace internal {
// currently no root register is present.
constexpr int kRootRegisterBias = 0;
+// Used temporarily to track clobbering of the root register.
+// TODO(v8:6666): Remove this once use the root register.
+constexpr size_t kRootRegisterSentinel = 0xcafeca11;
+
// TODO(sigurds): Change this value once we use relative jumps.
constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
} // namespace internal
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 259ad6a508..c9cc71f161 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -47,7 +47,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
ExternalReference c_entry_fp_address =
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
- __ mov(Operand::StaticVariable(c_entry_fp_address), ebp);
+ __ mov(masm()->StaticVariable(c_entry_fp_address), ebp);
const int kSavedRegistersAreaSize =
kNumberOfRegisters * kPointerSize + kDoubleRegsSize + kFloatRegsSize;
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 42f699bf82..75cd9a258a 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -228,15 +228,16 @@ void InstructionTable::AddJumpConditionalShort() {
// The IA32 disassembler implementation.
class DisassemblerIA32 {
public:
- DisassemblerIA32(const NameConverter& converter,
- bool abort_on_unimplemented = true)
+ DisassemblerIA32(
+ const NameConverter& converter,
+ Disassembler::UnimplementedOpcodeAction unimplemented_opcode_action)
: converter_(converter),
vex_byte0_(0),
vex_byte1_(0),
vex_byte2_(0),
instruction_table_(InstructionTable::get_instance()),
tmp_buffer_pos_(0),
- abort_on_unimplemented_(abort_on_unimplemented) {
+ unimplemented_opcode_action_(unimplemented_opcode_action) {
tmp_buffer_[0] = '\0';
}
@@ -254,7 +255,7 @@ class DisassemblerIA32 {
InstructionTable* instruction_table_;
v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
unsigned int tmp_buffer_pos_;
- bool abort_on_unimplemented_;
+ Disassembler::UnimplementedOpcodeAction unimplemented_opcode_action_;
enum {
eax = 0,
@@ -392,8 +393,9 @@ class DisassemblerIA32 {
PRINTF_FORMAT(2, 3) void AppendToBuffer(const char* format, ...);
void UnimplementedInstruction() {
- if (abort_on_unimplemented_) {
- UNIMPLEMENTED();
+ if (unimplemented_opcode_action_ ==
+ Disassembler::kAbortOnUnimplementedOpcode) {
+ FATAL("Unimplemented instruction in disassembler");
} else {
AppendToBuffer("'Unimplemented Instruction'");
}
@@ -1500,6 +1502,8 @@ static const char* F0Mnem(byte f0byte) {
return "bsf";
case 0xBD:
return "bsr";
+ case 0xC7:
+ return "cmpxchg8b";
default:
return nullptr;
}
@@ -1745,6 +1749,11 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
static_cast<int>(imm8));
data += 2;
+ } else if (f0byte >= 0xC8 && f0byte <= 0xCF) {
+ // bswap
+ data += 2;
+ int reg = f0byte - 0xC8;
+ AppendToBuffer("bswap %s", NameOfCPURegister(reg));
} else if ((f0byte & 0xF0) == 0x80) {
data += JumpConditional(data, branch_hint);
} else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
@@ -1801,6 +1810,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
data += PrintRightOperand(data);
+ } else if (f0byte == 0xC7) {
+ // cmpxchg8b
+ data += 2;
+ AppendToBuffer("%s ", f0mnem);
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightOperand(data);
} else if (f0byte == 0xAE && (data[2] & 0xF8) == 0xE8) {
AppendToBuffer("lfence");
data += 3;
@@ -2490,6 +2506,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0x5F:
mnem = "maxss";
break;
+ case 0x7E:
+ mnem = "movq";
+ break;
}
data += 3;
int mod, regop, rm;
@@ -2619,32 +2638,20 @@ const char* NameConverter::NameInCode(byte* addr) const {
//------------------------------------------------------------------------------
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-
-Disassembler::~Disassembler() {}
-
-
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte* instruction) {
- DisassemblerIA32 d(converter_, false /*do not crash if unimplemented*/);
- return d.InstructionDecode(buffer, instruction);
-}
-
-int Disassembler::InstructionDecodeForTesting(v8::internal::Vector<char> buffer,
- byte* instruction) {
- DisassemblerIA32 d(converter_, true /*crash if unimplemented*/);
+ DisassemblerIA32 d(converter_, unimplemented_opcode_action());
return d.InstructionDecode(buffer, instruction);
}
// The IA-32 assembler does not currently use constant pools.
int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
-
-/*static*/ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+// static
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
+ UnimplementedOpcodeAction unimplemented_action) {
NameConverter converter;
- Disassembler d(converter);
+ Disassembler d(converter, unimplemented_action);
for (byte* pc = begin; pc < end;) {
v8::internal::EmbeddedVector<char, 128> buffer;
buffer[0] = '\0';
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index c230087618..71205b10d0 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -13,7 +13,14 @@ const Register CallInterfaceDescriptor::ContextRegister() { return esi; }
void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {eax, ebx, ecx, edx, edi};
+#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
+ // TODO(jgruber,v8:6666): Keep kRootRegister free unconditionally.
+ constexpr Register default_stub_registers[] = {eax, ecx, edx, edi};
+ DCHECK(!AreAliased(eax, ecx, edx, edi, kRootRegister));
+#else
+ constexpr Register default_stub_registers[] = {eax, ebx, ecx, edx, edi};
+#endif
+ STATIC_ASSERT(arraysize(default_stub_registers) == kMaxBuiltinRegisterParams);
CHECK_LE(static_cast<size_t>(register_parameter_count),
arraysize(default_stub_registers));
data->InitializePlatformSpecific(register_parameter_count,
@@ -247,30 +254,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-namespace {
-
-void InterpreterCEntryDescriptor_InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- eax, // argument count (argc)
- ecx, // address of first argument (argv)
- ebx // the runtime function to call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace
-
-void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
-void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 72fc778966..5ff4b66e61 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -17,6 +17,7 @@
#include "src/frames-inl.h"
#include "src/instruction-stream.h"
#include "src/runtime/runtime.h"
+#include "src/snapshot/snapshot.h"
#include "src/ia32/assembler-ia32-inl.h"
#include "src/ia32/macro-assembler-ia32.h"
@@ -40,24 +41,35 @@ MacroAssembler::MacroAssembler(Isolate* isolate,
code_object_ = Handle<HeapObject>::New(
*isolate->factory()->NewSelfReferenceMarker(), isolate);
}
+
+#ifdef V8_EMBEDDED_BUILTINS
+ // Fake it as long as we use indirections through an embedded external
+ // reference. This will let us implement indirections without a real
+ // root register.
+ // TODO(jgruber, v8:6666): Remove once a real root register exists.
+ if (FLAG_embedded_builtins) set_root_array_available(true);
+#endif // V8_EMBEDDED_BUILTINS
}
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
+ // TODO(jgruber, v8:6666): Support loads through the root register once it
+ // exists.
if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
Handle<Object> object = isolate()->heap()->root_handle(index);
- if (object->IsHeapObject()) {
- mov(destination, Handle<HeapObject>::cast(object));
- } else {
+ if (object->IsSmi()) {
mov(destination, Immediate(Smi::cast(*object)));
+ return;
+ } else if (!options().isolate_independent_code) {
+ DCHECK(object->IsHeapObject());
+ mov(destination, Handle<HeapObject>::cast(object));
+ return;
}
- return;
}
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(destination, Immediate(index));
- mov(destination, Operand::StaticArray(destination,
- times_pointer_size,
- roots_array_start));
+ mov(destination,
+ StaticArray(destination, times_pointer_size, roots_array_start));
}
void MacroAssembler::CompareRoot(Register with,
@@ -66,9 +78,7 @@ void MacroAssembler::CompareRoot(Register with,
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(scratch, Immediate(index));
- cmp(with, Operand::StaticArray(scratch,
- times_pointer_size,
- roots_array_start));
+ cmp(with, StaticArray(scratch, times_pointer_size, roots_array_start));
}
@@ -102,6 +112,56 @@ void MacroAssembler::PushRoot(Heap::RootListIndex index) {
}
}
+void TurboAssembler::LoadFromConstantsTable(Register destination,
+ int constant_index) {
+ DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
+ Heap::kBuiltinsConstantsTableRootIndex));
+ // TODO(jgruber): LoadRoot should be a register-relative load once we have
+ // the kRootRegister.
+ LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
+ mov(destination,
+ FieldOperand(destination,
+ FixedArray::kHeaderSize + constant_index * kPointerSize));
+}
+
+void TurboAssembler::LoadRootRegisterOffset(Register destination,
+ intptr_t offset) {
+ DCHECK(is_int32(offset));
+ // TODO(jgruber): Register-relative load once kRootRegister exists.
+ mov(destination, Immediate(ExternalReference::roots_array_start(isolate())));
+ if (offset != 0) {
+ add(destination, Immediate(offset));
+ }
+}
+
+void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+ // TODO(jgruber): Register-relative load once kRootRegister exists.
+ LoadRootRegisterOffset(destination, offset);
+ mov(destination, Operand(destination, 0));
+}
+
+void TurboAssembler::LoadAddress(Register destination,
+ ExternalReference source) {
+ if (FLAG_embedded_builtins) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadExternalReference(destination, source);
+ return;
+ }
+ }
+ mov(destination, Immediate(source));
+}
+
+Operand TurboAssembler::StaticVariable(const ExternalReference& ext) {
+ // TODO(jgruber,v8:6666): Root-relative operand once kRootRegister exists.
+ return Operand(ext.address(), RelocInfo::EXTERNAL_REFERENCE);
+}
+
+Operand TurboAssembler::StaticArray(Register index, ScaleFactor scale,
+ const ExternalReference& ext) {
+ // TODO(jgruber,v8:6666): Root-relative operand once kRootRegister exists.
+ return Operand(index, scale, ext.address(), RelocInfo::EXTERNAL_REFERENCE);
+}
+
static constexpr Register saved_regs[] = {eax, ecx, edx};
static constexpr int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
@@ -201,7 +261,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// Skip barrier if writing a smi.
if (smi_check == INLINE_SMI_CHECK) {
- JumpIfSmi(value, &done, Label::kNear);
+ JumpIfSmi(value, &done);
}
// Although the object register is tagged, the offset is relative to the start
@@ -350,7 +410,7 @@ void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
ExternalReference restart_fp =
ExternalReference::debug_restart_fp_address(isolate());
- mov(ebx, Operand::StaticVariable(restart_fp));
+ mov(ebx, StaticVariable(restart_fp));
test(ebx, ebx);
j(not_zero, BUILTIN_CODE(isolate(), FrameDropperTrampoline),
RelocInfo::CODE_TARGET);
@@ -405,7 +465,7 @@ void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src) {
ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
Cvtsi2sd(dst, src);
j(not_sign, &done, Label::kNear);
- addsd(dst, Operand::StaticVariable(uint32_bias));
+ addsd(dst, StaticVariable(uint32_bias));
bind(&done);
}
@@ -680,9 +740,9 @@ void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type) {
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
ExternalReference c_function_address =
ExternalReference::Create(IsolateAddressId::kCFunctionAddress, isolate());
- mov(Operand::StaticVariable(c_entry_fp_address), ebp);
- mov(Operand::StaticVariable(context_address), esi);
- mov(Operand::StaticVariable(c_function_address), ebx);
+ mov(StaticVariable(c_entry_fp_address), ebp);
+ mov(StaticVariable(context_address), esi);
+ mov(StaticVariable(c_function_address), ebx);
}
@@ -763,16 +823,15 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address =
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
- mov(esi, Operand::StaticVariable(context_address));
+ mov(esi, StaticVariable(context_address));
#ifdef DEBUG
- mov(Operand::StaticVariable(context_address),
- Immediate(Context::kInvalidContext));
+ mov(StaticVariable(context_address), Immediate(Context::kInvalidContext));
#endif
// Clear the top frame.
ExternalReference c_entry_fp_address =
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
- mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
+ mov(StaticVariable(c_entry_fp_address), Immediate(0));
}
void MacroAssembler::LeaveApiExitFrame() {
@@ -793,10 +852,10 @@ void MacroAssembler::PushStackHandler() {
// Link the current handler as the next handler.
ExternalReference handler_address =
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
- push(Operand::StaticVariable(handler_address));
+ push(StaticVariable(handler_address));
// Set this new handler as the current one.
- mov(Operand::StaticVariable(handler_address), esp);
+ mov(StaticVariable(handler_address), esp);
}
@@ -804,14 +863,14 @@ void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
ExternalReference handler_address =
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
- pop(Operand::StaticVariable(handler_address));
+ pop(StaticVariable(handler_address));
add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
void MacroAssembler::CallStub(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
- call(stub->GetCode(), RelocInfo::CODE_TARGET);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET);
}
void TurboAssembler::CallStubDelayed(CodeStub* stub) {
@@ -820,7 +879,7 @@ void TurboAssembler::CallStubDelayed(CodeStub* stub) {
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
- jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
@@ -839,8 +898,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- Move(eax, Immediate(num_arguments));
- mov(ebx, Immediate(ExternalReference::Create(f)));
+ Move(kRuntimeCallArgCountRegister, Immediate(num_arguments));
+ Move(kRuntimeCallFunctionRegister, Immediate(ExternalReference::Create(f)));
Handle<Code> code =
CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
Call(code, RelocInfo::CODE_TARGET);
@@ -853,9 +912,10 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- Move(eax, Immediate(f->nargs));
- mov(ebx, Immediate(ExternalReference::Create(f)));
- DCHECK(!AreAliased(centry, eax, ebx));
+ Move(kRuntimeCallArgCountRegister, Immediate(f->nargs));
+ Move(kRuntimeCallFunctionRegister, Immediate(ExternalReference::Create(f)));
+ DCHECK(!AreAliased(centry, kRuntimeCallArgCountRegister,
+ kRuntimeCallFunctionRegister));
add(centry, Immediate(Code::kHeaderSize - kHeapObjectTag));
Call(centry);
}
@@ -878,7 +938,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- mov(eax, Immediate(function->nargs));
+ Move(kRuntimeCallArgCountRegister, Immediate(function->nargs));
}
JumpToExternalReference(ExternalReference::Create(fid));
}
@@ -886,15 +946,14 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
bool builtin_exit_frame) {
// Set the entry point and jump to the C entry runtime stub.
- mov(ebx, Immediate(ext));
+ Move(kRuntimeCallFunctionRegister, Immediate(ext));
Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
kArgvOnStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
void MacroAssembler::JumpToInstructionStream(Address entry) {
- mov(kOffHeapTrampolineRegister, Immediate(entry, RelocInfo::OFF_HEAP_TARGET));
- jmp(kOffHeapTrampolineRegister);
+ jmp(entry, RelocInfo::OFF_HEAP_TARGET);
}
void TurboAssembler::PrepareForTailCall(
@@ -1022,12 +1081,12 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (!definitely_matches) {
Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
if (flag == CALL_FUNCTION) {
- call(adaptor, RelocInfo::CODE_TARGET);
+ Call(adaptor, RelocInfo::CODE_TARGET);
if (!*definitely_mismatches) {
jmp(done, done_near);
}
} else {
- jmp(adaptor, RelocInfo::CODE_TARGET);
+ Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(&invoke);
}
@@ -1040,7 +1099,7 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
- cmpb(Operand::StaticVariable(debug_hook_active), Immediate(0));
+ cmpb(StaticVariable(debug_hook_active), Immediate(0));
j(equal, &skip_hook);
{
@@ -1196,18 +1255,24 @@ void TurboAssembler::Move(Register dst, Register src) {
}
}
-void TurboAssembler::Move(Register dst, const Immediate& x) {
- if (!x.is_heap_object_request() && x.is_zero()) {
+void TurboAssembler::Move(Register dst, const Immediate& src) {
+ if (!src.is_heap_object_request() && src.is_zero()) {
xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
+ } else if (src.is_external_reference()) {
+ LoadAddress(dst, src.external_reference());
} else {
- mov(dst, x);
+ mov(dst, src);
}
}
-void TurboAssembler::Move(Operand dst, const Immediate& x) { mov(dst, x); }
+void TurboAssembler::Move(Operand dst, const Immediate& src) { mov(dst, src); }
-void TurboAssembler::Move(Register dst, Handle<HeapObject> object) {
- mov(dst, object);
+void TurboAssembler::Move(Register dst, Handle<HeapObject> src) {
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(dst, src);
+ return;
+ }
+ mov(dst, src);
}
void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
@@ -1524,8 +1589,7 @@ void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
- Operand operand =
- Operand::StaticVariable(ExternalReference::Create(counter));
+ Operand operand = StaticVariable(ExternalReference::Create(counter));
if (value == 1) {
inc(operand);
} else {
@@ -1538,8 +1602,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
- Operand operand =
- Operand::StaticVariable(ExternalReference::Create(counter));
+ Operand operand = StaticVariable(ExternalReference::Create(counter));
if (value == 1) {
dec(operand);
} else {
@@ -1591,6 +1654,15 @@ void TurboAssembler::Abort(AbortReason reason) {
return;
}
+ if (should_abort_hard()) {
+ // We don't care if we constructed a frame. Just pretend we did.
+ FrameScope assume_frame(this, StackFrame::NONE);
+ PrepareCallCFunction(1, eax);
+ mov(Operand(esp, 0), Immediate(static_cast<int>(reason)));
+ CallCFunction(ExternalReference::abort_with_reason(), 1);
+ return;
+ }
+
Move(edx, Smi::FromInt(static_cast<int>(reason)));
// Disable stub call restrictions to always allow calls to abort.
@@ -1645,6 +1717,62 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
}
}
+void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
+ if (FLAG_embedded_builtins) {
+ // TODO(jgruber): Figure out which register we can clobber here.
+ // TODO(jgruber): Pc-relative builtin-to-builtin calls.
+ Register scratch = kOffHeapTrampolineRegister;
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(scratch, code_object);
+ lea(scratch, FieldOperand(scratch, Code::kHeaderSize));
+ call(scratch);
+ return;
+ } else if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ call(entry, RelocInfo::OFF_HEAP_TARGET);
+ return;
+ }
+ }
+ }
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+ call(code_object, rmode);
+}
+
+void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
+ if (FLAG_embedded_builtins) {
+ // TODO(jgruber): Figure out which register we can clobber here.
+ // TODO(jgruber): Pc-relative builtin-to-builtin calls.
+ Register scratch = kOffHeapTrampolineRegister;
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(scratch, code_object);
+ lea(scratch, FieldOperand(scratch, Code::kHeaderSize));
+ jmp(scratch);
+ return;
+ } else if (options().inline_offheap_trampolines) {
+ int builtin_index = Builtins::kNoBuiltinId;
+ if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
+ Builtins::IsIsolateIndependent(builtin_index)) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob();
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ jmp(entry, RelocInfo::OFF_HEAP_TARGET);
+ return;
+ }
+ }
+ }
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+ jmp(code_object, rmode);
+}
+
void TurboAssembler::RetpolineCall(Register reg) {
Label setup_return, setup_target, inner_indirect_branch, capture_spec;
@@ -1699,43 +1827,14 @@ void TurboAssembler::RetpolineJump(Register reg) {
ret(0);
}
-#ifdef DEBUG
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3,
- Register reg4,
- Register reg5,
- Register reg6,
- Register reg7,
- Register reg8) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
- reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid();
-
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
- if (reg7.is_valid()) regs |= reg7.bit();
- if (reg8.is_valid()) regs |= reg8.bit();
- int n_of_non_aliasing_regs = NumRegs(regs);
-
- return n_of_valid_regs != n_of_non_aliasing_regs;
-}
-#endif
-
-
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met,
Label::Distance condition_met_distance) {
DCHECK(cc == zero || cc == not_zero);
if (scratch == object) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ and_(scratch, Immediate(~kPageAlignmentMask));
} else {
- mov(scratch, Immediate(~Page::kPageAlignmentMask));
+ mov(scratch, Immediate(~kPageAlignmentMask));
and_(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 5d2b9bb0af..8ad92a9d1d 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -31,13 +31,24 @@ constexpr Register kJavaScriptCallArgCountRegister = eax;
constexpr Register kJavaScriptCallCodeStartRegister = ecx;
constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = edx;
-constexpr Register kJavaScriptCallExtraArg1Register = ebx;
-constexpr Register kOffHeapTrampolineRegister = ecx;
-constexpr Register kRuntimeCallFunctionRegister = ebx;
+// The ExtraArg1Register not part of the real JS calling convention and is
+// mostly there to simplify consistent interface descriptor definitions across
+// platforms. Note that on ia32 it aliases kJavaScriptCallCodeStartRegister.
+constexpr Register kJavaScriptCallExtraArg1Register = ecx;
+
+// The off-heap trampoline does not need a register on ia32 (it uses a
+// pc-relative call instead).
+constexpr Register kOffHeapTrampolineRegister = no_reg;
+
+constexpr Register kRuntimeCallFunctionRegister = edx;
constexpr Register kRuntimeCallArgCountRegister = eax;
+constexpr Register kRuntimeCallArgvRegister = ecx;
constexpr Register kWasmInstanceRegister = esi;
+// TODO(v8:6666): Implement full support.
+constexpr Register kRootRegister = ebx;
+
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
@@ -45,16 +56,7 @@ typedef Operand MemOperand;
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
-
-#ifdef DEBUG
-bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
- Register reg4 = no_reg, Register reg5 = no_reg,
- Register reg6 = no_reg, Register reg7 = no_reg,
- Register reg8 = no_reg);
-#endif
-
-class TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
@@ -104,18 +106,30 @@ class TurboAssembler : public TurboAssemblerBase {
// Check that the stack is aligned.
void CheckStackAlignment();
- // Nop, because ia32 does not have a root register.
- void InitializeRootRegister() {}
-
- // Move a constant into a destination using the most efficient encoding.
- void Move(Register dst, const Immediate& x);
-
- void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
+ void InitializeRootRegister() {
+ // For now, only check sentinel value for root register.
+ // TODO(jgruber,v8:6666): Implement root register.
+ if (FLAG_ia32_verify_root_register && FLAG_embedded_builtins) {
+ mov(kRootRegister, kRootRegisterSentinel);
+ }
+ }
- // Move if the registers are not identical.
- void Move(Register target, Register source);
+ void VerifyRootRegister() {
+ if (FLAG_ia32_verify_root_register && FLAG_embedded_builtins) {
+ Label root_register_ok;
+ cmp(kRootRegister, kRootRegisterSentinel);
+ j(equal, &root_register_ok);
+ int3();
+ bind(&root_register_ok);
+ }
+ }
- void Move(Operand dst, const Immediate& x);
+ // Move a constant into a destination using the most efficient encoding.
+ void Move(Register dst, const Immediate& src);
+ void Move(Register dst, Smi* src) { Move(dst, Immediate(src)); }
+ void Move(Register dst, Handle<HeapObject> src);
+ void Move(Register dst, Register src);
+ void Move(Operand dst, const Immediate& src);
// Move an immediate into an XMM register.
void Move(XMMRegister dst, uint32_t src);
@@ -123,11 +137,11 @@ class TurboAssembler : public TurboAssemblerBase {
void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
- void Move(Register dst, Handle<HeapObject> handle);
-
void Call(Register reg) { call(reg); }
- void Call(Handle<Code> target, RelocInfo::Mode rmode) { call(target, rmode); }
void Call(Label* target) { call(target); }
+ void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
+
+ void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
void RetpolineCall(Register reg);
void RetpolineCall(Address destination, RelocInfo::Mode rmode);
@@ -226,17 +240,19 @@ class TurboAssembler : public TurboAssemblerBase {
void LoadRoot(Register destination, Heap::RootListIndex index) override;
- // TODO(jgruber,v8:6666): Implement embedded builtins.
+ // Indirect root-relative loads.
void LoadFromConstantsTable(Register destination,
- int constant_index) override {
- UNREACHABLE();
- }
- void LoadRootRegisterOffset(Register destination, intptr_t offset) override {
- UNREACHABLE();
- }
- void LoadRootRelative(Register destination, int32_t offset) override {
- UNREACHABLE();
- }
+ int constant_index) override;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
+ void LoadRootRelative(Register destination, int32_t offset) override;
+
+ void LoadAddress(Register destination, ExternalReference source);
+
+ // Wrapper functions to ensure external reference operands produce
+ // isolate-independent code if needed.
+ Operand StaticVariable(const ExternalReference& ext);
+ Operand StaticArray(Register index, ScaleFactor scale,
+ const ExternalReference& ext);
// Return and drop arguments from stack, where the number of arguments
// may be bigger than 2^16 - 1. Requires a scratch register.
@@ -697,7 +713,6 @@ class MacroAssembler : public TurboAssembler {
// from the stack, clobbering only the esp register.
void Drop(int element_count);
- void Jump(Handle<Code> target, RelocInfo::Mode rmode) { jmp(target, rmode); }
void Pop(Register dst) { pop(dst); }
void Pop(Operand dst) { pop(dst); }
void PushReturnAddressFrom(Register src) { push(src); }
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index f471381dd8..0b5e58b92e 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -207,6 +207,78 @@ void AccessorAssembler::HandleLoadICHandlerCase(
}
}
+void AccessorAssembler::HandleLoadCallbackProperty(const LoadICParameters* p,
+ TNode<JSObject> holder,
+ TNode<WordT> handler_word,
+ ExitPoint* exit_point) {
+ Comment("native_data_property_load");
+ Node* descriptor = DecodeWord<LoadHandler::DescriptorBits>(handler_word);
+
+ Label runtime(this, Label::kDeferred);
+ Callable callable = CodeFactory::ApiGetter(isolate());
+ TNode<AccessorInfo> accessor_info =
+ CAST(LoadDescriptorValue(LoadMap(holder), descriptor));
+
+ GotoIf(IsRuntimeCallStatsEnabled(), &runtime);
+ exit_point->ReturnCallStub(callable, p->context, p->receiver, holder,
+ accessor_info);
+
+ BIND(&runtime);
+ exit_point->ReturnCallRuntime(Runtime::kLoadCallbackProperty, p->context,
+ p->receiver, holder, accessor_info, p->name);
+}
+
+void AccessorAssembler::HandleLoadAccessor(
+ const LoadICParameters* p, TNode<CallHandlerInfo> call_handler_info,
+ TNode<WordT> handler_word, TNode<DataHandler> handler,
+ TNode<IntPtrT> handler_kind, ExitPoint* exit_point) {
+ Comment("api_getter");
+ Label runtime(this, Label::kDeferred);
+ // Context is stored either in data2 or data3 field depending on whether
+ // the access check is enabled for this handler or not.
+ TNode<MaybeObject> maybe_context = Select<MaybeObject>(
+ IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_word),
+ [=] { return LoadHandlerDataField(handler, 3); },
+ [=] { return LoadHandlerDataField(handler, 2); });
+
+ CSA_ASSERT(this, IsWeakOrClearedHeapObject(maybe_context));
+ CSA_CHECK(this, IsNotClearedWeakHeapObject(maybe_context));
+ TNode<Object> context = ToWeakHeapObject(maybe_context);
+
+ GotoIf(IsRuntimeCallStatsEnabled(), &runtime);
+ {
+ TNode<Foreign> foreign = CAST(
+ LoadObjectField(call_handler_info, CallHandlerInfo::kJsCallbackOffset));
+ TNode<WordT> callback = TNode<WordT>::UncheckedCast(LoadObjectField(
+ foreign, Foreign::kForeignAddressOffset, MachineType::Pointer()));
+ TNode<Object> data =
+ LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset);
+
+ VARIABLE(api_holder, MachineRepresentation::kTagged, p->receiver);
+ Label load(this);
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kApiGetter)),
+ &load);
+
+ CSA_ASSERT(
+ this,
+ WordEqual(handler_kind,
+ IntPtrConstant(LoadHandler::kApiGetterHolderIsPrototype)));
+
+ api_holder.Bind(LoadMapPrototype(LoadMap(p->receiver)));
+ Goto(&load);
+
+ BIND(&load);
+ Callable callable = CodeFactory::CallApiCallback(isolate(), 0);
+ exit_point->Return(CallStub(callable, nullptr, context, data,
+ api_holder.value(), callback, p->receiver));
+ }
+
+ BIND(&runtime);
+ exit_point->ReturnCallRuntime(Runtime::kLoadAccessorProperty, context,
+ p->receiver, SmiTag(handler_kind),
+ call_handler_info);
+}
+
void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word,
Variable* var_double_value,
Label* rebox_double,
@@ -250,17 +322,17 @@ void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word,
}
}
-TNode<Object> AccessorAssembler::LoadDescriptorValue(Node* map,
+TNode<Object> AccessorAssembler::LoadDescriptorValue(TNode<Map> map,
Node* descriptor) {
return CAST(LoadDescriptorValueOrFieldType(map, descriptor));
}
TNode<MaybeObject> AccessorAssembler::LoadDescriptorValueOrFieldType(
- Node* map, Node* descriptor) {
+ TNode<Map> map, SloppyTNode<IntPtrT> descriptor) {
TNode<DescriptorArray> descriptors = LoadMapDescriptors(map);
- Node* scaled_descriptor =
+ TNode<IntPtrT> scaled_descriptor =
IntPtrMul(descriptor, IntPtrConstant(DescriptorArray::kEntrySize));
- Node* value_index = IntPtrAdd(
+ TNode<IntPtrT> value_index = IntPtrAdd(
scaled_descriptor, IntPtrConstant(DescriptorArray::kFirstIndex +
DescriptorArray::kEntryValueIndex));
CSA_ASSERT(this, UintPtrLessThan(descriptor, LoadAndUntagWeakFixedArrayLength(
@@ -269,14 +341,15 @@ TNode<MaybeObject> AccessorAssembler::LoadDescriptorValueOrFieldType(
}
void AccessorAssembler::HandleLoadICSmiHandlerCase(
- const LoadICParameters* p, Node* holder, Node* smi_handler, Node* handler,
- Label* miss, ExitPoint* exit_point, OnNonExistent on_nonexistent,
- ElementSupport support_elements) {
+ const LoadICParameters* p, Node* holder, SloppyTNode<Smi> smi_handler,
+ SloppyTNode<Object> handler, Label* miss, ExitPoint* exit_point,
+ OnNonExistent on_nonexistent, ElementSupport support_elements) {
VARIABLE(var_double_value, MachineRepresentation::kFloat64);
Label rebox_double(this, &var_double_value);
- Node* handler_word = SmiUntag(smi_handler);
- Node* handler_kind = DecodeWord<LoadHandler::KindBits>(handler_word);
+ TNode<WordT> handler_word = SmiUntag(smi_handler);
+ TNode<IntPtrT> handler_kind =
+ Signed(DecodeWord<LoadHandler::KindBits>(handler_word));
if (support_elements == kSupportElements) {
Label if_element(this), if_indexed_string(this), if_property(this);
GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kElement)),
@@ -461,55 +534,11 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
}
BIND(&native_data_property);
- {
- Comment("native_data_property_load");
- Node* descriptor = DecodeWord<LoadHandler::DescriptorBits>(handler_word);
- Node* accessor_info = LoadDescriptorValue(LoadMap(holder), descriptor);
-
- Callable callable = CodeFactory::ApiGetter(isolate());
- exit_point->ReturnCallStub(callable, p->context, p->receiver, holder,
- accessor_info);
- }
+ HandleLoadCallbackProperty(p, CAST(holder), handler_word, exit_point);
BIND(&api_getter);
- {
- Comment("api_getter");
- CSA_ASSERT(this, TaggedIsNotSmi(handler));
- Node* call_handler_info = holder;
-
- // Context is stored either in data2 or data3 field depending on whether
- // the access check is enabled for this handler or not.
- TNode<Object> context_cell = Select<Object>(
- IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_word),
- [=] { return CAST(LoadHandlerDataField(handler, 3)); },
- [=] { return CAST(LoadHandlerDataField(handler, 2)); });
-
- Node* context = LoadWeakCellValueUnchecked(CAST(context_cell));
- Node* foreign =
- LoadObjectField(call_handler_info, CallHandlerInfo::kJsCallbackOffset);
- Node* callback = LoadObjectField(foreign, Foreign::kForeignAddressOffset,
- MachineType::Pointer());
- Node* data =
- LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset);
-
- VARIABLE(api_holder, MachineRepresentation::kTagged, p->receiver);
- Label load(this);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kApiGetter)),
- &load);
-
- CSA_ASSERT(
- this,
- WordEqual(handler_kind,
- IntPtrConstant(LoadHandler::kApiGetterHolderIsPrototype)));
-
- api_holder.Bind(LoadMapPrototype(LoadMap(p->receiver)));
- Goto(&load);
-
- BIND(&load);
- Callable callable = CodeFactory::CallApiCallback(isolate(), 0);
- exit_point->Return(CallStub(callable, nullptr, context, data,
- api_holder.value(), callback, p->receiver));
- }
+ HandleLoadAccessor(p, CAST(holder), handler_word, CAST(handler), handler_kind,
+ exit_point);
BIND(&proxy);
{
@@ -576,8 +605,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
Node* module =
LoadObjectField(p->receiver, JSModuleNamespace::kModuleOffset,
MachineType::TaggedPointer());
- Node* exports = LoadObjectField(module, Module::kExportsOffset,
- MachineType::TaggedPointer());
+ TNode<ObjectHashTable> exports = CAST(LoadObjectField(
+ module, Module::kExportsOffset, MachineType::TaggedPointer()));
Node* cell = LoadFixedArrayElement(exports, index);
// The handler is only installed for exports that exist.
CSA_ASSERT(this, IsCell(cell));
@@ -670,8 +699,9 @@ Node* AccessorAssembler::HandleProtoHandler(
BIND(&if_do_access_check);
{
- TNode<WeakCell> data2 = CAST(LoadHandlerDataField(handler, 2));
- Node* expected_native_context = LoadWeakCellValue(data2, miss);
+ TNode<MaybeObject> data2 = LoadHandlerDataField(handler, 2);
+ CSA_ASSERT(this, IsWeakOrClearedHeapObject(data2));
+ TNode<Object> expected_native_context = ToWeakHeapObject(data2, miss);
EmitAccessCheck(expected_native_context, p->context, p->receiver, &done,
miss);
}
@@ -731,18 +761,20 @@ void AccessorAssembler::HandleLoadICProtoHandler(
},
miss, ic_mode);
- TNode<Object> maybe_holder_cell = CAST(LoadHandlerDataField(handler, 1));
+ TNode<MaybeObject> maybe_holder = LoadHandlerDataField(handler, 1);
Label load_from_cached_holder(this), done(this);
- Branch(IsNull(maybe_holder_cell), &done, &load_from_cached_holder);
+ Branch(IsStrongReferenceTo(maybe_holder, NullConstant()), &done,
+ &load_from_cached_holder);
BIND(&load_from_cached_holder);
{
// For regular holders, having passed the receiver map check and the
- // validity cell check implies that |holder| is alive. However, for
- // global object receivers, the |maybe_holder_cell| may be cleared.
- Node* holder = LoadWeakCellValue(CAST(maybe_holder_cell), miss);
+ // validity cell check implies that |holder| is alive. However, for global
+ // object receivers, |maybe_holder| may be cleared.
+ CSA_ASSERT(this, IsWeakOrClearedHeapObject(maybe_holder));
+ Node* holder = ToWeakHeapObject(maybe_holder, miss);
var_holder->Bind(holder);
Goto(&done);
@@ -785,6 +817,7 @@ void AccessorAssembler::JumpIfDataProperty(Node* details, Label* writable,
void AccessorAssembler::HandleStoreICNativeDataProperty(
const StoreICParameters* p, Node* holder, Node* handler_word) {
+ Comment("native_data_property_store");
Node* descriptor = DecodeWord<StoreHandler::DescriptorBits>(handler_word);
Node* accessor_info = LoadDescriptorValue(LoadMap(holder), descriptor);
CSA_CHECK(this, IsAccessorInfo(accessor_info));
@@ -1141,16 +1174,16 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
BIND(&cont);
}
- Node* properties =
- ExtendPropertiesBackingStore(object, backing_store_index);
- StoreFixedArrayElement(properties, backing_store_index,
- var_value.value());
+ TNode<PropertyArray> properties =
+ CAST(ExtendPropertiesBackingStore(object, backing_store_index));
+ StorePropertyArrayElement(properties, backing_store_index,
+ var_value.value());
StoreMap(object, object_map);
Goto(&done);
} else {
Label tagged_rep(this), double_rep(this);
- Node* properties = LoadFastProperties(object);
+ TNode<PropertyArray> properties = CAST(LoadFastProperties(object));
Branch(
Word32Equal(representation, Int32Constant(Representation::kDouble)),
&double_rep, &tagged_rep);
@@ -1164,7 +1197,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
}
BIND(&tagged_rep);
{
- StoreFixedArrayElement(properties, backing_store_index, value);
+ StorePropertyArrayElement(properties, backing_store_index, value);
Goto(&done);
}
}
@@ -1261,7 +1294,8 @@ void AccessorAssembler::HandleStoreICProtoHandler(
STATIC_ASSERT(kData == 0);
GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), miss);
- StoreValueByKeyIndex<NameDictionary>(properties, name_index, p->value);
+ StoreValueByKeyIndex<NameDictionary>(
+ CAST(properties), UncheckedCast<IntPtrT>(name_index), p->value);
Return(p->value);
},
miss, ic_mode);
@@ -1277,8 +1311,9 @@ void AccessorAssembler::HandleStoreICProtoHandler(
GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kNormal)),
&if_add_normal);
- TNode<WeakCell> holder_cell = CAST(LoadHandlerDataField(handler, 1));
- Node* holder = LoadWeakCellValue(holder_cell, miss);
+ TNode<MaybeObject> maybe_holder = LoadHandlerDataField(handler, 1);
+ CSA_ASSERT(this, IsWeakOrClearedHeapObject(maybe_holder));
+ TNode<Object> holder = ToWeakHeapObject(maybe_holder, miss);
GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kGlobalProxy)),
&if_store_global_proxy);
@@ -1334,12 +1369,16 @@ void AccessorAssembler::HandleStoreICProtoHandler(
// Context is stored either in data2 or data3 field depending on whether
// the access check is enabled for this handler or not.
- TNode<Object> context_cell = Select<Object>(
+ TNode<MaybeObject> maybe_context = Select<MaybeObject>(
IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_word),
- [=] { return CAST(LoadHandlerDataField(handler, 3)); },
- [=] { return CAST(LoadHandlerDataField(handler, 2)); });
+ [=] { return LoadHandlerDataField(handler, 3); },
+ [=] { return LoadHandlerDataField(handler, 2); });
- Node* context = LoadWeakCellValueUnchecked(CAST(context_cell));
+ CSA_ASSERT(this, IsWeakOrClearedHeapObject(maybe_context));
+ TNode<Object> context =
+ Select<Object>(IsClearedWeakHeapObject(maybe_context),
+ [=] { return SmiConstant(0); },
+ [=] { return ToWeakHeapObject(maybe_context); });
Node* foreign = LoadObjectField(call_handler_info,
CallHandlerInfo::kJsCallbackOffset);
@@ -1757,13 +1796,13 @@ void AccessorAssembler::EmitElementLoad(
BIND(&if_fast_packed);
{
Comment("fast packed elements");
- exit_point->Return(LoadFixedArrayElement(elements, intptr_index));
+ exit_point->Return(LoadFixedArrayElement(CAST(elements), intptr_index));
}
BIND(&if_fast_holey);
{
Comment("fast holey elements");
- Node* element = LoadFixedArrayElement(elements, intptr_index);
+ Node* element = LoadFixedArrayElement(CAST(elements), intptr_index);
GotoIf(WordEqual(element, TheHoleConstant()), if_hole);
exit_point->Return(element);
}
@@ -2471,6 +2510,8 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
StoreFeedbackVectorSlot(p->vector, p->slot,
LoadRoot(Heap::kpremonomorphic_symbolRootIndex),
SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
+ StoreWeakReferenceInFeedbackVector(p->vector, p->slot, receiver_map,
+ kPointerSize, SMI_PARAMETERS);
{
// Special case for Function.prototype load, because it's very common
@@ -2480,15 +2521,8 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
&not_function_prototype);
GotoIfNot(IsPrototypeString(p->name), &not_function_prototype);
- // if (!(has_prototype_slot() && !has_non_instance_prototype())) use generic
- // property loading mechanism.
- GotoIfNot(
- Word32Equal(
- Word32And(LoadMapBitField(receiver_map),
- Int32Constant(Map::HasPrototypeSlotBit::kMask |
- Map::HasNonInstancePrototypeBit::kMask)),
- Int32Constant(Map::HasPrototypeSlotBit::kMask)),
- &not_function_prototype);
+ GotoIfPrototypeRequiresRuntimeLookup(CAST(receiver), CAST(receiver_map),
+ &not_function_prototype);
Return(LoadJSFunctionPrototype(receiver, &miss));
BIND(&not_function_prototype);
}
@@ -3380,5 +3414,128 @@ void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
StoreInArrayLiteralIC(&p);
}
+void AccessorAssembler::GenerateCloneObjectIC() {
+ typedef CloneObjectWithVectorDescriptor Descriptor;
+ Node* source = Parameter(Descriptor::kSource);
+ Node* flags = Parameter(Descriptor::kFlags);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+ TVARIABLE(MaybeObject, var_handler);
+ Label if_handler(this, &var_handler);
+ Label miss(this, Label::kDeferred), try_polymorphic(this, Label::kDeferred),
+ try_megamorphic(this, Label::kDeferred);
+
+ CSA_SLOW_ASSERT(this, TaggedIsNotSmi(source));
+ Node* source_map = LoadMap(UncheckedCast<HeapObject>(source));
+ GotoIf(IsDeprecatedMap(source_map), &miss);
+ TNode<MaybeObject> feedback = TryMonomorphicCase(
+ slot, vector, source_map, &if_handler, &var_handler, &try_polymorphic);
+
+ BIND(&if_handler);
+ {
+ Comment("CloneObjectIC_if_handler");
+
+ // Handlers for the CloneObjectIC stub are weak references to the Map of
+ // a result object.
+ TNode<Map> result_map = CAST(var_handler.value());
+ TVARIABLE(Object, var_properties, EmptyFixedArrayConstant());
+ TVARIABLE(FixedArrayBase, var_elements, EmptyFixedArrayConstant());
+
+ Label allocate_object(this);
+ GotoIf(IsNullOrUndefined(source), &allocate_object);
+ CSA_SLOW_ASSERT(this, IsJSObjectMap(result_map));
+
+ // The IC fast case should only be taken if the result map a compatible
+ // elements kind with the source object.
+ TNode<FixedArrayBase> source_elements = LoadElements(source);
+
+ auto flags = ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW;
+ var_elements = CAST(CloneFixedArray(source_elements, flags));
+
+ // Copy the PropertyArray backing store. The source PropertyArray must be
+ // either an Smi, or a PropertyArray.
+ // FIXME: Make a CSA macro for this
+ TNode<Object> source_properties =
+ LoadObjectField(source, JSObject::kPropertiesOrHashOffset);
+ {
+ GotoIf(TaggedIsSmi(source_properties), &allocate_object);
+ GotoIf(IsEmptyFixedArray(source_properties), &allocate_object);
+
+ // This IC requires that the source object has fast properties
+ CSA_SLOW_ASSERT(this, IsPropertyArray(CAST(source_properties)));
+ TNode<IntPtrT> length = LoadPropertyArrayLength(
+ UncheckedCast<PropertyArray>(source_properties));
+ GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &allocate_object);
+
+ auto mode = INTPTR_PARAMETERS;
+ var_properties = CAST(AllocatePropertyArray(length, mode));
+ CopyPropertyArrayValues(source_properties, var_properties.value(), length,
+ SKIP_WRITE_BARRIER, mode);
+ }
+
+ Goto(&allocate_object);
+ BIND(&allocate_object);
+ TNode<JSObject> object = UncheckedCast<JSObject>(AllocateJSObjectFromMap(
+ result_map, var_properties.value(), var_elements.value()));
+ ReturnIf(IsNullOrUndefined(source), object);
+
+ // Lastly, clone any in-object properties.
+ // Determine the inobject property capacity of both objects, and copy the
+ // smaller number into the resulting object.
+ Node* source_start = LoadMapInobjectPropertiesStartInWords(source_map);
+ Node* source_size = LoadMapInstanceSizeInWords(source_map);
+ Node* result_start = LoadMapInobjectPropertiesStartInWords(result_map);
+ Node* field_offset_difference =
+ TimesPointerSize(IntPtrSub(result_start, source_start));
+ BuildFastLoop(source_start, source_size,
+ [=](Node* field_index) {
+ Node* field_offset = TimesPointerSize(field_index);
+ Node* field = LoadObjectField(source, field_offset);
+ Node* result_offset =
+ IntPtrAdd(field_offset, field_offset_difference);
+ StoreObjectFieldNoWriteBarrier(object, result_offset,
+ field);
+ },
+ 1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ Return(object);
+ }
+
+ BIND(&try_polymorphic);
+ TNode<HeapObject> strong_feedback = ToStrongHeapObject(feedback, &miss);
+ {
+ Comment("CloneObjectIC_try_polymorphic");
+ GotoIfNot(IsWeakFixedArrayMap(LoadMap(strong_feedback)), &try_megamorphic);
+ HandlePolymorphicCase(source_map, CAST(strong_feedback), &if_handler,
+ &var_handler, &miss, 2);
+ }
+
+ BIND(&try_megamorphic);
+ {
+ Comment("CloneObjectIC_try_megamorphic");
+ CSA_ASSERT(
+ this,
+ Word32Or(WordEqual(strong_feedback,
+ LoadRoot(Heap::kuninitialized_symbolRootIndex)),
+ WordEqual(strong_feedback,
+ LoadRoot(Heap::kmegamorphic_symbolRootIndex))));
+ GotoIfNot(WordEqual(strong_feedback,
+ LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ &miss);
+ TailCallRuntime(Runtime::kCloneObjectIC_Slow, context, source, flags);
+ }
+
+ BIND(&miss);
+ {
+ Comment("CloneObjectIC_miss");
+ Node* map_or_result = CallRuntime(Runtime::kCloneObjectIC_Miss, context,
+ source, flags, slot, vector);
+ var_handler = UncheckedCast<MaybeObject>(map_or_result);
+ GotoIf(IsMap(map_or_result), &if_handler);
+ CSA_ASSERT(this, IsJSObject(map_or_result));
+ Return(map_or_result);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 0aa9f0ab41..0de48e021a 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -39,6 +39,7 @@ class AccessorAssembler : public CodeStubAssembler {
void GenerateStoreICTrampoline();
void GenerateStoreGlobalIC();
void GenerateStoreGlobalICTrampoline();
+ void GenerateCloneObjectIC();
void GenerateLoadGlobalIC(TypeofMode typeof_mode);
void GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode);
@@ -137,9 +138,9 @@ class AccessorAssembler : public CodeStubAssembler {
TVariable<MaybeObject>* var_handler, Label* if_handler,
Label* miss, ExitPoint* exit_point);
- TNode<Object> LoadDescriptorValue(Node* map, Node* descriptor);
- TNode<MaybeObject> LoadDescriptorValueOrFieldType(Node* map,
- Node* descriptor);
+ TNode<Object> LoadDescriptorValue(TNode<Map> map, Node* descriptor);
+ TNode<MaybeObject> LoadDescriptorValueOrFieldType(
+ TNode<Map> map, SloppyTNode<IntPtrT> descriptor);
void LoadIC_Uninitialized(const LoadICParameters* p);
@@ -173,7 +174,8 @@ class AccessorAssembler : public CodeStubAssembler {
ElementSupport support_elements = kOnlyProperties);
void HandleLoadICSmiHandlerCase(const LoadICParameters* p, Node* holder,
- Node* smi_handler, Node* handler, Label* miss,
+ SloppyTNode<Smi> smi_handler,
+ SloppyTNode<Object> handler, Label* miss,
ExitPoint* exit_point,
OnNonExistent on_nonexistent,
ElementSupport support_elements);
@@ -183,6 +185,16 @@ class AccessorAssembler : public CodeStubAssembler {
Label* if_smi_handler, Label* miss,
ExitPoint* exit_point, ICMode ic_mode);
+ void HandleLoadCallbackProperty(const LoadICParameters* p,
+ TNode<JSObject> holder,
+ TNode<WordT> handler_word,
+ ExitPoint* exit_point);
+
+ void HandleLoadAccessor(const LoadICParameters* p,
+ TNode<CallHandlerInfo> call_handler_info,
+ TNode<WordT> handler_word, TNode<DataHandler> handler,
+ TNode<IntPtrT> handler_kind, ExitPoint* exit_point);
+
void HandleLoadField(Node* holder, Node* handler_word,
Variable* var_double_value, Label* rebox_double,
ExitPoint* exit_point);
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index 74ee29edaf..f95f0d7aab 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -8,8 +8,8 @@
#include "src/ic/handler-configuration.h"
#include "src/field-index-inl.h"
+#include "src/handles-inl.h"
#include "src/objects-inl.h"
-#include "src/objects/data-handler-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -17,7 +17,6 @@
namespace v8 {
namespace internal {
-TYPE_CHECKER(LoadHandler, LOAD_HANDLER_TYPE)
CAST_ACCESSOR(LoadHandler)
// Decodes kind from Smi-handler.
@@ -110,7 +109,6 @@ Handle<Smi> LoadHandler::LoadIndexedString(Isolate* isolate,
return handle(Smi::FromInt(config), isolate);
}
-TYPE_CHECKER(StoreHandler, STORE_HANDLER_TYPE)
CAST_ACCESSOR(StoreHandler)
Handle<Smi> StoreHandler::StoreGlobalProxy(Isolate* isolate) {
diff --git a/deps/v8/src/ic/handler-configuration.cc b/deps/v8/src/ic/handler-configuration.cc
index c95e036e1c..86ea4f9d3b 100644
--- a/deps/v8/src/ic/handler-configuration.cc
+++ b/deps/v8/src/ic/handler-configuration.cc
@@ -6,6 +6,8 @@
#include "src/code-stubs.h"
#include "src/ic/handler-configuration-inl.h"
+#include "src/objects/data-handler-inl.h"
+#include "src/objects/maybe-object.h"
#include "src/transitions.h"
namespace v8 {
@@ -26,8 +28,8 @@ Handle<Smi> SetBitFieldValue(Isolate* isolate, Handle<Smi> smi_handler,
template <typename ICHandler, bool fill_handler = true>
int InitPrototypeChecksImpl(Isolate* isolate, Handle<ICHandler> handler,
Handle<Smi>* smi_handler, Handle<Map> receiver_map,
- Handle<JSReceiver> holder, Handle<Object> data1,
- MaybeHandle<Object> maybe_data2) {
+ Handle<JSReceiver> holder, MaybeObjectHandle data1,
+ MaybeObjectHandle maybe_data2) {
int checks_count = 0;
// Holder-is-receiver case itself does not add entries unless there is an
// optional data2 value provided.
@@ -43,7 +45,7 @@ int InitPrototypeChecksImpl(Isolate* isolate, Handle<ICHandler> handler,
// corresponds.
if (fill_handler) {
Handle<Context> native_context = isolate->native_context();
- handler->set_data2(native_context->self_weak_cell());
+ handler->set_data2(HeapObjectReference::Weak(*native_context));
} else {
// Enable access checks on receiver.
typedef typename ICHandler::DoAccessCheckOnReceiverBits Bit;
@@ -59,18 +61,17 @@ int InitPrototypeChecksImpl(Isolate* isolate, Handle<ICHandler> handler,
}
}
if (fill_handler) {
- handler->set_data1(MaybeObject::FromObject(*data1));
+ handler->set_data1(*data1);
}
- Handle<Object> data2;
- if (maybe_data2.ToHandle(&data2)) {
+ if (!maybe_data2.is_null()) {
if (fill_handler) {
// This value will go either to data2 or data3 slot depending on whether
// data2 slot is already occupied by native context.
if (checks_count == 0) {
- handler->set_data2(*data2);
+ handler->set_data2(*maybe_data2);
} else {
DCHECK_EQ(1, checks_count);
- handler->set_data3(*data2);
+ handler->set_data3(*maybe_data2);
}
}
checks_count++;
@@ -87,8 +88,8 @@ int InitPrototypeChecksImpl(Isolate* isolate, Handle<ICHandler> handler,
template <typename ICHandler>
int GetPrototypeCheckCount(
Isolate* isolate, Handle<Smi>* smi_handler, Handle<Map> receiver_map,
- Handle<JSReceiver> holder, Handle<Object> data1,
- MaybeHandle<Object> maybe_data2 = MaybeHandle<Object>()) {
+ Handle<JSReceiver> holder, MaybeObjectHandle data1,
+ MaybeObjectHandle maybe_data2 = MaybeObjectHandle()) {
DCHECK_NOT_NULL(smi_handler);
return InitPrototypeChecksImpl<ICHandler, false>(isolate, Handle<ICHandler>(),
smi_handler, receiver_map,
@@ -96,10 +97,10 @@ int GetPrototypeCheckCount(
}
template <typename ICHandler>
-void InitPrototypeChecks(
- Isolate* isolate, Handle<ICHandler> handler, Handle<Map> receiver_map,
- Handle<JSReceiver> holder, Handle<Object> data1,
- MaybeHandle<Object> maybe_data2 = MaybeHandle<Object>()) {
+void InitPrototypeChecks(Isolate* isolate, Handle<ICHandler> handler,
+ Handle<Map> receiver_map, Handle<JSReceiver> holder,
+ MaybeObjectHandle data1,
+ MaybeObjectHandle maybe_data2 = MaybeObjectHandle()) {
InitPrototypeChecksImpl<ICHandler, true>(
isolate, handler, nullptr, receiver_map, holder, data1, maybe_data2);
}
@@ -111,11 +112,13 @@ Handle<Object> LoadHandler::LoadFromPrototype(Isolate* isolate,
Handle<Map> receiver_map,
Handle<JSReceiver> holder,
Handle<Smi> smi_handler,
- MaybeHandle<Object> maybe_data1,
- MaybeHandle<Object> maybe_data2) {
- Handle<Object> data1;
- if (!maybe_data1.ToHandle(&data1)) {
- data1 = Map::GetOrCreatePrototypeWeakCell(holder, isolate);
+ MaybeObjectHandle maybe_data1,
+ MaybeObjectHandle maybe_data2) {
+ MaybeObjectHandle data1;
+ if (maybe_data1.is_null()) {
+ data1 = MaybeObjectHandle::Weak(holder);
+ } else {
+ data1 = maybe_data1;
}
int checks_count = GetPrototypeCheckCount<LoadHandler>(
@@ -137,10 +140,10 @@ Handle<Object> LoadHandler::LoadFromPrototype(Isolate* isolate,
// static
Handle<Object> LoadHandler::LoadFullChain(Isolate* isolate,
Handle<Map> receiver_map,
- Handle<Object> holder,
+ MaybeObjectHandle holder,
Handle<Smi> smi_handler) {
Handle<JSReceiver> end; // null handle, means full prototype chain lookup.
- Handle<Object> data1 = holder;
+ MaybeObjectHandle data1 = holder;
int checks_count = GetPrototypeCheckCount<LoadHandler>(
isolate, &smi_handler, receiver_map, end, data1);
@@ -243,11 +246,13 @@ MaybeObjectHandle StoreHandler::StoreTransition(Isolate* isolate,
// static
Handle<Object> StoreHandler::StoreThroughPrototype(
Isolate* isolate, Handle<Map> receiver_map, Handle<JSReceiver> holder,
- Handle<Smi> smi_handler, MaybeHandle<Object> maybe_data1,
- MaybeHandle<Object> maybe_data2) {
- Handle<Object> data1;
- if (!maybe_data1.ToHandle(&data1)) {
- data1 = Map::GetOrCreatePrototypeWeakCell(holder, isolate);
+ Handle<Smi> smi_handler, MaybeObjectHandle maybe_data1,
+ MaybeObjectHandle maybe_data2) {
+ MaybeObjectHandle data1;
+ if (maybe_data1.is_null()) {
+ data1 = MaybeObjectHandle::Weak(holder);
+ } else {
+ data1 = maybe_data1;
}
int checks_count = GetPrototypeCheckCount<StoreHandler>(
@@ -269,8 +274,7 @@ Handle<Object> StoreHandler::StoreThroughPrototype(
}
// static
-MaybeObjectHandle StoreHandler::StoreGlobal(Isolate* isolate,
- Handle<PropertyCell> cell) {
+MaybeObjectHandle StoreHandler::StoreGlobal(Handle<PropertyCell> cell) {
return MaybeObjectHandle::Weak(cell);
}
@@ -281,9 +285,8 @@ Handle<Object> StoreHandler::StoreProxy(Isolate* isolate,
Handle<JSReceiver> receiver) {
Handle<Smi> smi_handler = StoreProxy(isolate);
if (receiver.is_identical_to(proxy)) return smi_handler;
- Handle<WeakCell> holder_cell = isolate->factory()->NewWeakCell(proxy);
return StoreThroughPrototype(isolate, receiver_map, proxy, smi_handler,
- holder_cell);
+ MaybeObjectHandle::Weak(proxy));
}
} // namespace internal
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index 4807ad695d..305577a2df 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -8,6 +8,7 @@
#include "src/elements-kind.h"
#include "src/field-index.h"
#include "src/globals.h"
+#include "src/maybe-handles.h"
#include "src/objects.h"
#include "src/objects/data-handler.h"
#include "src/utils.h"
@@ -18,6 +19,8 @@
namespace v8 {
namespace internal {
+class JSProxy;
+
// A set of bit fields representing Smi handlers for loads and a HeapObject
// that represents load handlers that can't be encoded in a Smi.
// TODO(ishell): move to load-handler.h
@@ -147,7 +150,7 @@ class LoadHandler final : public DataHandler {
// needed (e.g., for "nonexistent"), null_value() may be passed in.
static Handle<Object> LoadFullChain(Isolate* isolate,
Handle<Map> receiver_map,
- Handle<Object> holder,
+ MaybeObjectHandle holder,
Handle<Smi> smi_handler);
// Creates a data handler that represents a prototype chain check followed
@@ -156,8 +159,8 @@ class LoadHandler final : public DataHandler {
static Handle<Object> LoadFromPrototype(
Isolate* isolate, Handle<Map> receiver_map, Handle<JSReceiver> holder,
Handle<Smi> smi_handler,
- MaybeHandle<Object> maybe_data1 = MaybeHandle<Object>(),
- MaybeHandle<Object> maybe_data2 = MaybeHandle<Object>());
+ MaybeObjectHandle maybe_data1 = MaybeObjectHandle(),
+ MaybeObjectHandle maybe_data2 = MaybeObjectHandle());
// Creates a Smi-handler for loading a non-existent property. Works only as
// a part of prototype chain check.
@@ -266,8 +269,8 @@ class StoreHandler final : public DataHandler {
static Handle<Object> StoreThroughPrototype(
Isolate* isolate, Handle<Map> receiver_map, Handle<JSReceiver> holder,
Handle<Smi> smi_handler,
- MaybeHandle<Object> maybe_data1 = MaybeHandle<Object>(),
- MaybeHandle<Object> maybe_data2 = MaybeHandle<Object>());
+ MaybeObjectHandle maybe_data1 = MaybeObjectHandle(),
+ MaybeObjectHandle maybe_data2 = MaybeObjectHandle());
static Handle<Object> StoreElementTransition(Isolate* isolate,
Handle<Map> receiver_map,
@@ -280,8 +283,7 @@ class StoreHandler final : public DataHandler {
// Creates a handler for storing a property to the property cell of a global
// object.
- static MaybeObjectHandle StoreGlobal(Isolate* isolate,
- Handle<PropertyCell> cell);
+ static MaybeObjectHandle StoreGlobal(Handle<PropertyCell> cell);
// Creates a Smi-handler for storing a property to a global proxy object.
static inline Handle<Smi> StoreGlobalProxy(Isolate* isolate);
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 7c95f2fcf0..640bf7250c 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -39,6 +39,14 @@ Address IC::raw_constant_pool() const {
}
}
+void IC::update_receiver_map(Handle<Object> receiver) {
+ if (receiver->IsSmi()) {
+ receiver_map_ = isolate_->factory()->heap_number_map();
+ } else {
+ receiver_map_ = handle(HeapObject::cast(*receiver)->map(), isolate_);
+ }
+}
+
bool IC::IsHandler(MaybeObject* object) {
HeapObject* heap_object;
return (object->IsSmi() && (object != nullptr)) ||
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 0937d792c2..9237441ac9 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -7,12 +7,14 @@
#include "src/accessors.h"
#include "src/api-arguments-inl.h"
#include "src/api.h"
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
+#include "src/ast/ast.h"
#include "src/base/bits.h"
#include "src/conversions.h"
#include "src/execution.h"
#include "src/field-type.h"
#include "src/frames-inl.h"
+#include "src/handles-inl.h"
#include "src/ic/call-optimization.h"
#include "src/ic/handler-configuration-inl.h"
#include "src/ic/ic-inl.h"
@@ -21,7 +23,9 @@
#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/data-handler-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/module-inl.h"
#include "src/prototype.h"
#include "src/runtime-profiler.h"
@@ -120,7 +124,7 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
ic_info.type += type;
Object* maybe_function =
- Memory::Object_at(fp_ + JavaScriptFrameConstants::kFunctionOffset);
+ Memory<Object*>(fp_ + JavaScriptFrameConstants::kFunctionOffset);
DCHECK(maybe_function->IsJSFunction());
JSFunction* function = JSFunction::cast(maybe_function);
int code_offset = 0;
@@ -168,7 +172,7 @@ IC::IC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot)
}
Address* pc_address =
reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
- Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
+ Address fp = Memory<Address>(entry + ExitFrameConstants::kCallerFPOffset);
#ifdef DEBUG
StackFrameIterator it(isolate);
for (int i = 0; i < 1; i++) it.Advance();
@@ -181,9 +185,9 @@ IC::IC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot)
// is skip this frame. However, the pc should not be updated. The call to
// ICs happen from bytecode handlers.
intptr_t frame_marker =
- Memory::intptr_at(fp + TypedFrameConstants::kFrameTypeOffset);
+ Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
if (frame_marker == StackFrame::TypeToMarker(StackFrame::STUB)) {
- fp = Memory::Address_at(fp + TypedFrameConstants::kCallerFPOffset);
+ fp = Memory<Address>(fp + TypedFrameConstants::kCallerFPOffset);
}
fp_ = fp;
if (FLAG_enable_embedded_constant_pool) {
@@ -366,26 +370,24 @@ static bool MigrateDeprecated(Handle<Object> object) {
}
bool IC::ConfigureVectorState(IC::State new_state, Handle<Object> key) {
- bool changed = true;
- if (new_state == PREMONOMORPHIC) {
- nexus()->ConfigurePremonomorphic();
- } else if (new_state == MEGAMORPHIC) {
- DCHECK_IMPLIES(!is_keyed(), key->IsName());
- // Even though we don't change the feedback data, we still want to reset the
- // profiler ticks. Real-world observations suggest that optimizing these
- // functions doesn't improve performance.
- changed = nexus()->ConfigureMegamorphic(key->IsName() ? PROPERTY : ELEMENT);
- } else {
- UNREACHABLE();
- }
-
+ DCHECK_EQ(MEGAMORPHIC, new_state);
+ DCHECK_IMPLIES(!is_keyed(), key->IsName());
+ // Even though we don't change the feedback data, we still want to reset the
+ // profiler ticks. Real-world observations suggest that optimizing these
+ // functions doesn't improve performance.
+ bool changed =
+ nexus()->ConfigureMegamorphic(key->IsName() ? PROPERTY : ELEMENT);
vector_set_ = true;
- OnFeedbackChanged(
- isolate(), nexus(), GetHostFunction(),
- new_state == PREMONOMORPHIC ? "Premonomorphic" : "Megamorphic");
+ OnFeedbackChanged(isolate(), nexus(), GetHostFunction(), "Megamorphic");
return changed;
}
+void IC::ConfigureVectorState(Handle<Map> map) {
+ nexus()->ConfigurePremonomorphic(map);
+ vector_set_ = true;
+ OnFeedbackChanged(isolate(), nexus(), GetHostFunction(), "Premonomorphic");
+}
+
void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
Handle<Object> handler) {
ConfigureVectorState(name, map, MaybeObjectHandle(handler));
@@ -676,7 +678,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
TRACE_HANDLER_STATS(isolate(), LoadIC_Premonomorphic);
- ConfigureVectorState(PREMONOMORPHIC, Handle<Object>());
+ ConfigureVectorState(receiver_map());
TraceIC("LoadIC", lookup->name());
return;
}
@@ -687,9 +689,9 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
} else if (!lookup->IsFound()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonexistentDH);
Handle<Smi> smi_handler = LoadHandler::LoadNonExistent(isolate());
- code = LoadHandler::LoadFullChain(isolate(), receiver_map(),
- isolate()->factory()->null_value(),
- smi_handler);
+ code = LoadHandler::LoadFullChain(
+ isolate(), receiver_map(),
+ MaybeObjectHandle(isolate()->factory()->null_value()), smi_handler);
} else {
if (IsLoadGlobalIC()) {
if (lookup->TryLookupCachedProperty()) {
@@ -751,8 +753,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
// Use specialized code for getting prototype of functions.
if (receiver->IsJSFunction() && *lookup->name() == roots.prototype_string() &&
- JSFunction::cast(*receiver)->has_prototype_slot() &&
- !JSFunction::cast(*receiver)->map()->has_non_instance_prototype()) {
+ !JSFunction::cast(*receiver)->PrototypeRequiresRuntimeLookup()) {
Handle<Code> stub;
TRACE_HANDLER_STATS(isolate(), LoadIC_FunctionPrototypeStub);
return BUILTIN_CODE(isolate(), LoadIC_FunctionPrototype);
@@ -771,9 +772,9 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
Handle<Smi> smi_handler = LoadHandler::LoadInterceptor(isolate());
if (holder->GetNamedInterceptor()->non_masking()) {
- Handle<Object> holder_ref = isolate()->factory()->null_value();
+ MaybeObjectHandle holder_ref(isolate()->factory()->null_value());
if (!receiver_is_holder || IsLoadGlobalIC()) {
- holder_ref = Map::GetOrCreatePrototypeWeakCell(holder, isolate());
+ holder_ref = MaybeObjectHandle::Weak(holder);
}
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonMaskingInterceptorDH);
return LoadHandler::LoadFullChain(isolate(), map, holder_ref,
@@ -852,14 +853,12 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
Handle<Context> context(
call_optimization.GetAccessorContext(holder->map()), isolate());
- Handle<WeakCell> context_cell =
- isolate()->factory()->NewWeakCell(context);
- Handle<WeakCell> data_cell = isolate()->factory()->NewWeakCell(
- call_optimization.api_call_info());
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterFromPrototypeDH);
return LoadHandler::LoadFromPrototype(
- isolate(), map, holder, smi_handler, data_cell, context_cell);
+ isolate(), map, holder, smi_handler,
+ MaybeObjectHandle::Weak(call_optimization.api_call_info()),
+ MaybeObjectHandle::Weak(context));
}
if (holder->HasFastProperties()) {
@@ -872,10 +871,9 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
} else if (holder->IsJSGlobalObject()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadGlobalFromPrototypeDH);
smi_handler = LoadHandler::LoadGlobal(isolate());
- Handle<WeakCell> cell =
- isolate()->factory()->NewWeakCell(lookup->GetPropertyCell());
- return LoadHandler::LoadFromPrototype(isolate(), map, holder,
- smi_handler, cell);
+ return LoadHandler::LoadFromPrototype(
+ isolate(), map, holder, smi_handler,
+ MaybeObjectHandle::Weak(lookup->GetPropertyCell()));
} else {
smi_handler = LoadHandler::LoadNormal(isolate());
@@ -891,7 +889,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
if (v8::ToCData<Address>(info->getter()) == kNullAddress ||
- !AccessorInfo::IsCompatibleReceiverMap(isolate(), info, map) ||
+ !AccessorInfo::IsCompatibleReceiverMap(info, map) ||
!holder->HasFastProperties() ||
(info->is_sloppy() && !receiver->IsJSReceiver())) {
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
@@ -917,10 +915,9 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
// workaround for code that leaks the global object.
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadGlobalDH);
smi_handler = LoadHandler::LoadGlobal(isolate());
- Handle<WeakCell> cell =
- isolate()->factory()->NewWeakCell(lookup->GetPropertyCell());
- return LoadHandler::LoadFromPrototype(isolate(), map, holder,
- smi_handler, cell);
+ return LoadHandler::LoadFromPrototype(
+ isolate(), map, holder, smi_handler,
+ MaybeObjectHandle::Weak(lookup->GetPropertyCell()));
}
smi_handler = LoadHandler::LoadNormal(isolate());
@@ -1153,7 +1150,7 @@ bool ConvertKeyToIndex(Handle<Object> receiver, Handle<Object> key,
// For regular JSReceiver or String receivers, the {key} must be a positive
// array index.
if (receiver->IsJSReceiver() || receiver->IsString()) {
- return key->ToArrayIndex(index);
+ if (key->ToArrayIndex(index)) return true;
}
// For JSTypedArray receivers, we can also support negative keys, which we
// just map into the [2**31, 2**32 - 1] range via a bit_cast. This is valid
@@ -1437,10 +1434,10 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode) {
if (state() == UNINITIALIZED && !IsStoreGlobalIC()) {
- // This is the first time we execute this inline cache. Set the target to
- // the pre monomorphic stub to delay setting the monomorphic state.
+ // This is the first time we execute this inline cache. Transition
+ // to premonomorphic state to delay setting the monomorphic state.
TRACE_HANDLER_STATS(isolate(), StoreIC_Premonomorphic);
- ConfigureVectorState(PREMONOMORPHIC, Handle<Object>());
+ ConfigureVectorState(receiver_map());
TraceIC("StoreIC", lookup->name());
return;
}
@@ -1482,15 +1479,13 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
DCHECK_EQ(*lookup->GetReceiver(), *holder);
DCHECK_EQ(*store_target, *holder);
#endif
- return StoreHandler::StoreGlobal(isolate(),
- lookup->transition_cell());
+ return StoreHandler::StoreGlobal(lookup->transition_cell());
}
Handle<Smi> smi_handler = StoreHandler::StoreGlobalProxy(isolate());
- Handle<WeakCell> cell =
- isolate()->factory()->NewWeakCell(lookup->transition_cell());
Handle<Object> handler = StoreHandler::StoreThroughPrototype(
- isolate(), receiver_map(), store_target, smi_handler, cell);
+ isolate(), receiver_map(), store_target, smi_handler,
+ MaybeObjectHandle::Weak(lookup->transition_cell()));
return MaybeObjectHandle(handler);
}
// Dictionary-to-fast transitions are not expected and not supported.
@@ -1537,8 +1532,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return MaybeObjectHandle(slow_stub());
}
- if (!AccessorInfo::IsCompatibleReceiverMap(isolate(), info,
- receiver_map())) {
+ if (!AccessorInfo::IsCompatibleReceiverMap(info, receiver_map())) {
set_slow_stub_reason("incompatible receiver type");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return MaybeObjectHandle(slow_stub());
@@ -1584,14 +1578,11 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
Handle<Context> context(
call_optimization.GetAccessorContext(holder->map()), isolate());
- Handle<WeakCell> context_cell =
- isolate()->factory()->NewWeakCell(context);
- Handle<WeakCell> data_cell = isolate()->factory()->NewWeakCell(
- call_optimization.api_call_info());
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreApiSetterOnPrototypeDH);
return MaybeObjectHandle(StoreHandler::StoreThroughPrototype(
- isolate(), receiver_map(), holder, smi_handler, data_cell,
- context_cell));
+ isolate(), receiver_map(), holder, smi_handler,
+ MaybeObjectHandle::Weak(call_optimization.api_call_info()),
+ MaybeObjectHandle::Weak(context)));
}
set_slow_stub_reason("incompatible receiver");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
@@ -1630,7 +1621,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
if (holder->IsJSGlobalObject()) {
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobalDH);
return MaybeObjectHandle(
- StoreHandler::StoreGlobal(isolate(), lookup->GetPropertyCell()));
+ StoreHandler::StoreGlobal(lookup->GetPropertyCell()));
}
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreNormalDH);
DCHECK(holder.is_identical_to(receiver));
@@ -1795,7 +1786,10 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
handlers.reserve(target_receiver_maps.size());
StoreElementPolymorphicHandlers(&target_receiver_maps, &handlers, store_mode);
if (target_receiver_maps.size() == 0) {
- ConfigureVectorState(PREMONOMORPHIC, Handle<Name>());
+ // Transition to PREMONOMORPHIC state here and remember a weak-reference
+ // to the {receiver_map} in case TurboFan sees this function before the
+ // IC can transition further.
+ ConfigureVectorState(receiver_map);
} else if (target_receiver_maps.size() == 1) {
ConfigureVectorState(Handle<Name>(), target_receiver_maps[0], handlers[0]);
} else {
@@ -2456,11 +2450,144 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
}
}
+static bool CanFastCloneObject(Handle<Map> map) {
+ DisallowHeapAllocation no_gc;
+ if (map->IsNullOrUndefinedMap()) return true;
+ if (!map->IsJSObjectMap() ||
+ !IsSmiOrObjectElementsKind(map->elements_kind()) ||
+ !map->OnlyHasSimpleProperties()) {
+ return false;
+ }
+
+ DescriptorArray* descriptors = map->instance_descriptors();
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ Name* key = descriptors->GetKey(i);
+ if (details.kind() != kData || !details.IsEnumerable() ||
+ key->IsPrivateField()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static Handle<Map> FastCloneObjectMap(Isolate* isolate,
+ Handle<HeapObject> source, int flags) {
+ Handle<Map> source_map(source->map(), isolate);
+ SLOW_DCHECK(source->IsNullOrUndefined() || CanFastCloneObject(source_map));
+ Handle<JSFunction> constructor(isolate->native_context()->object_function(),
+ isolate);
+ DCHECK(constructor->has_initial_map());
+ Handle<Map> initial_map(constructor->initial_map(), isolate);
+ Handle<Map> map = initial_map;
+
+ if (source_map->IsJSObjectMap() && source_map->GetInObjectProperties() !=
+ initial_map->GetInObjectProperties()) {
+ int inobject_properties = source_map->GetInObjectProperties();
+ int instance_size =
+ JSObject::kHeaderSize + kPointerSize * inobject_properties;
+ int unused = source_map->UnusedInObjectProperties();
+ DCHECK(instance_size <= JSObject::kMaxInstanceSize);
+ map = Map::CopyInitialMap(isolate, map, instance_size, inobject_properties,
+ unused);
+ }
+
+ if (flags & ObjectLiteral::kHasNullPrototype) {
+ if (map.is_identical_to(initial_map)) {
+ map = Map::Copy(isolate, map, "ObjectWithNullProto");
+ }
+ Map::SetPrototype(isolate, map, isolate->factory()->null_value());
+ }
+
+ if (source->IsNullOrUndefined() || !source_map->NumberOfOwnDescriptors()) {
+ return map;
+ }
+
+ if (map.is_identical_to(initial_map)) {
+ map = Map::Copy(isolate, map, "InitializeClonedDescriptors");
+ }
+
+ Handle<DescriptorArray> source_descriptors(source_map->instance_descriptors(),
+ isolate);
+ int size = source_map->NumberOfOwnDescriptors();
+ int slack = 0;
+ Handle<DescriptorArray> descriptors = DescriptorArray::CopyForFastObjectClone(
+ isolate, source_descriptors, size, slack);
+ Handle<LayoutDescriptor> layout =
+ LayoutDescriptor::New(isolate, map, descriptors, size);
+ map->InitializeDescriptors(*descriptors, *layout);
+ map->CopyUnusedPropertyFieldsAdjustedForInstanceSize(*source_map);
+
+ // Update bitfields
+ map->set_may_have_interesting_symbols(
+ source_map->may_have_interesting_symbols());
+
+ return map;
+}
+
+static MaybeHandle<JSObject> CloneObjectSlowPath(Isolate* isolate,
+ Handle<HeapObject> source,
+ int flags) {
+ Handle<JSObject> new_object;
+ if (flags & ObjectLiteral::kHasNullPrototype) {
+ new_object = isolate->factory()->NewJSObjectWithNullProto();
+ } else {
+ Handle<JSFunction> constructor(isolate->native_context()->object_function(),
+ isolate);
+ new_object = isolate->factory()->NewJSObject(constructor);
+ }
+
+ if (source->IsNullOrUndefined()) {
+ return new_object;
+ }
+
+ MAYBE_RETURN(JSReceiver::SetOrCopyDataProperties(isolate, new_object, source,
+ nullptr, false),
+ MaybeHandle<JSObject>());
+ return new_object;
+}
+
+RUNTIME_FUNCTION(Runtime_CloneObjectIC_Miss) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ Handle<HeapObject> source = args.at<HeapObject>(0);
+ int flags = args.smi_at(1);
+
+ MigrateDeprecated(source);
+
+ FeedbackSlot slot = FeedbackVector::ToSlot(args.smi_at(2));
+ Handle<FeedbackVector> vector = args.at<FeedbackVector>(3);
+
+ FeedbackNexus nexus(vector, slot);
+ Handle<Map> source_map(source->map(), isolate);
+
+ if (!CanFastCloneObject(source_map) || nexus.IsMegamorphic()) {
+ // Migrate to slow mode if needed.
+ nexus.ConfigureMegamorphic();
+ RETURN_RESULT_OR_FAILURE(isolate,
+ CloneObjectSlowPath(isolate, source, flags));
+ }
+
+ Handle<Map> result_map = FastCloneObjectMap(isolate, source, flags);
+ nexus.ConfigureCloneObject(source_map, result_map);
+
+ return *result_map;
+}
+
+RUNTIME_FUNCTION(Runtime_CloneObjectIC_Slow) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<HeapObject> source = args.at<HeapObject>(0);
+ int flags = args.smi_at(1);
+ RETURN_RESULT_OR_FAILURE(isolate,
+ CloneObjectSlowPath(isolate, source, flags));
+}
RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
Handle<JSObject> receiver = args.at<JSObject>(0);
Handle<JSObject> holder = args.at<JSObject>(1);
- Handle<HeapObject> callback_or_cell = args.at<HeapObject>(2);
+ Handle<AccessorInfo> info = args.at<AccessorInfo>(2);
Handle<Name> name = args.at<Name>(3);
Handle<Object> value = args.at(4);
CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 5);
@@ -2472,12 +2599,6 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
language_mode));
}
- Handle<AccessorInfo> info(
- callback_or_cell->IsWeakCell()
- ? AccessorInfo::cast(WeakCell::cast(*callback_or_cell)->value())
- : AccessorInfo::cast(*callback_or_cell),
- isolate);
-
DCHECK(info->IsCompatibleReceiver(*receiver));
ShouldThrow should_throw =
@@ -2489,6 +2610,45 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
return *value;
}
+RUNTIME_FUNCTION(Runtime_LoadCallbackProperty) {
+ Handle<JSObject> receiver = args.at<JSObject>(0);
+ Handle<JSObject> holder = args.at<JSObject>(1);
+ Handle<AccessorInfo> info = args.at<AccessorInfo>(2);
+ Handle<Name> name = args.at<Name>(3);
+ HandleScope scope(isolate);
+
+ DCHECK(info->IsCompatibleReceiver(*receiver));
+
+ PropertyCallbackArguments custom_args(isolate, info->data(), *receiver,
+ *holder, kThrowOnError);
+ Handle<Object> result = custom_args.CallAccessorGetter(info, name);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ if (result.is_null()) return ReadOnlyRoots(isolate).undefined_value();
+ return *result;
+}
+
+RUNTIME_FUNCTION(Runtime_LoadAccessorProperty) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(args.length(), 3);
+ Handle<JSObject> receiver = args.at<JSObject>(0);
+ int handler_kind = args.smi_at(1);
+ Handle<CallHandlerInfo> call_handler_info = args.at<CallHandlerInfo>(2);
+
+ Object* holder = *receiver;
+ if (handler_kind == LoadHandler::kApiGetterHolderIsPrototype) {
+ holder = receiver->map()->prototype();
+ } else {
+ DCHECK_EQ(handler_kind, LoadHandler::kApiGetter);
+ }
+
+ // Call the accessor without additional arguments.
+ FunctionCallbackArguments custom(isolate, call_handler_info->data(),
+ *receiver, holder, nullptr, nullptr, 0);
+ Handle<Object> result_handle = custom.Call(*call_handler_info);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ if (result_handle.is_null()) return ReadOnlyRoots(isolate).undefined_value();
+ return *result_handle;
+}
/**
* Loads a property with an interceptor performing post interceptor
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index feac4dc63b..0a831b757f 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -93,6 +93,8 @@ class IC {
// Configure for most states.
bool ConfigureVectorState(IC::State new_state, Handle<Object> key);
+ // Configure the vector for PREMONOMORPHIC.
+ void ConfigureVectorState(Handle<Map> map);
// Configure the vector for MONOMORPHIC.
void ConfigureVectorState(Handle<Name> name, Handle<Map> map,
Handle<Object> handler);
@@ -140,13 +142,7 @@ class IC {
bool ShouldRecomputeHandler(Handle<String> name);
Handle<Map> receiver_map() { return receiver_map_; }
- void update_receiver_map(Handle<Object> receiver) {
- if (receiver->IsSmi()) {
- receiver_map_ = isolate_->factory()->heap_number_map();
- } else {
- receiver_map_ = handle(HeapObject::cast(*receiver)->map(), isolate_);
- }
- }
+ inline void update_receiver_map(Handle<Object> receiver);
void TargetMaps(MapHandles* list) {
FindTargetMaps();
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 4257f05914..23c49c8d73 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -35,6 +35,14 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
TNode<BoolT> is_simple_receiver, TNode<Name> unique_name,
TNode<Object> value, LanguageMode language_mode);
+ // [[Set]], but more generic than the above. This impl does essentially the
+ // same as "KeyedStoreGeneric" but does not use feedback slot and uses a
+ // hardcoded LanguageMode instead of trying to deduce it from the feedback
+ // slot's kind.
+ void SetProperty(TNode<Context> context, TNode<Object> receiver,
+ TNode<Object> key, TNode<Object> value,
+ LanguageMode language_mode);
+
private:
enum UpdateLength {
kDontChangeLength,
@@ -44,6 +52,12 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
enum UseStubCache { kUseStubCache, kDontUseStubCache };
+ // Helper that is used by the public KeyedStoreGeneric and by SetProperty.
+ void KeyedStoreGeneric(TNode<Context> context, TNode<Object> receiver,
+ TNode<Object> key, TNode<Object> value,
+ Maybe<LanguageMode> language_mode, TNode<Smi> slot,
+ TNode<FeedbackVector> vector);
+
void EmitGenericElementStore(Node* receiver, Node* receiver_map,
Node* instance_type, Node* intptr_index,
Node* value, Node* context, Label* slow);
@@ -119,6 +133,14 @@ void KeyedStoreGenericGenerator::SetProperty(
language_mode);
}
+void KeyedStoreGenericGenerator::SetProperty(
+ compiler::CodeAssemblerState* state, TNode<Context> context,
+ TNode<Object> receiver, TNode<Object> key, TNode<Object> value,
+ LanguageMode language_mode) {
+ KeyedStoreGenericAssembler assembler(state);
+ assembler.SetProperty(context, receiver, key, value, language_mode);
+}
+
void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
Node* receiver_map, Label* non_fast_elements, Label* only_fast_elements) {
VARIABLE(var_map, MachineRepresentation::kTagged);
@@ -879,30 +901,24 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
}
}
-void KeyedStoreGenericAssembler::KeyedStoreGeneric() {
- typedef StoreWithVectorDescriptor Descriptor;
-
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* name = Parameter(Descriptor::kName);
- Node* value = Parameter(Descriptor::kValue);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
-
- VARIABLE(var_index, MachineType::PointerRepresentation());
- VARIABLE(var_unique, MachineRepresentation::kTagged);
- var_unique.Bind(name); // Dummy initialization.
+// Helper that is used by the public KeyedStoreGeneric and by SetProperty.
+void KeyedStoreGenericAssembler::KeyedStoreGeneric(
+ TNode<Context> context, TNode<Object> receiver, TNode<Object> key,
+ TNode<Object> value, Maybe<LanguageMode> language_mode, TNode<Smi> slot,
+ TNode<FeedbackVector> vector) {
+ TVARIABLE(WordT, var_index);
+ TVARIABLE(Object, var_unique, key);
Label if_index(this), if_unique_name(this), not_internalized(this),
slow(this);
GotoIf(TaggedIsSmi(receiver), &slow);
- Node* receiver_map = LoadMap(receiver);
+ TNode<Map> receiver_map = LoadMap(CAST(receiver));
TNode<Int32T> instance_type = LoadMapInstanceType(receiver_map);
// Receivers requiring non-standard element accesses (interceptors, access
// checks, strings and string wrappers, proxies) are handled in the runtime.
GotoIf(IsCustomElementsReceiverInstanceType(instance_type), &slow);
- TryToName(name, &if_index, &var_index, &if_unique_name, &var_unique, &slow,
+ TryToName(key, &if_index, &var_index, &if_unique_name, &var_unique, &slow,
&not_internalized);
BIND(&if_index);
@@ -917,13 +933,15 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric() {
Comment("key is unique name");
StoreICParameters p(context, receiver, var_unique.value(), value, slot,
vector);
- EmitGenericPropertyStore(receiver, receiver_map, &p, &slow);
+ ExitPoint direct_exit(this);
+ EmitGenericPropertyStore(CAST(receiver), receiver_map, &p, &direct_exit,
+ &slow, language_mode);
}
BIND(&not_internalized);
{
if (FLAG_internalize_on_the_fly) {
- TryInternalizeString(name, &if_index, &var_index, &if_unique_name,
+ TryInternalizeString(key, &if_index, &var_index, &if_unique_name,
&var_unique, &slow, &slow);
} else {
Goto(&slow);
@@ -933,18 +951,45 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric() {
BIND(&slow);
{
Comment("KeyedStoreGeneric_slow");
- VARIABLE(var_language_mode, MachineRepresentation::kTaggedSigned,
- SmiConstant(LanguageMode::kStrict));
- Label call_runtime(this);
- BranchIfStrictMode(vector, slot, &call_runtime);
- var_language_mode.Bind(SmiConstant(LanguageMode::kSloppy));
- Goto(&call_runtime);
- BIND(&call_runtime);
- TailCallRuntime(Runtime::kSetProperty, context, receiver, name, value,
- var_language_mode.value());
+ if (language_mode.IsJust()) {
+ TailCallRuntime(Runtime::kSetProperty, context, receiver, key, value,
+ SmiConstant(language_mode.FromJust()));
+ } else {
+ TVARIABLE(Smi, var_language_mode, SmiConstant(LanguageMode::kStrict));
+ Label call_runtime(this);
+ BranchIfStrictMode(vector, slot, &call_runtime);
+ var_language_mode = SmiConstant(LanguageMode::kSloppy);
+ Goto(&call_runtime);
+ BIND(&call_runtime);
+ TailCallRuntime(Runtime::kSetProperty, context, receiver, key, value,
+ var_language_mode.value());
+ }
}
}
+void KeyedStoreGenericAssembler::KeyedStoreGeneric() {
+ typedef StoreWithVectorDescriptor Descriptor;
+
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> name = CAST(Parameter(Descriptor::kName));
+ TNode<Object> value = CAST(Parameter(Descriptor::kValue));
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<FeedbackVector> vector = CAST(Parameter(Descriptor::kVector));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ KeyedStoreGeneric(context, receiver, name, value, Nothing<LanguageMode>(),
+ slot, vector);
+}
+
+void KeyedStoreGenericAssembler::SetProperty(TNode<Context> context,
+ TNode<Object> receiver,
+ TNode<Object> key,
+ TNode<Object> value,
+ LanguageMode language_mode) {
+ KeyedStoreGeneric(context, receiver, key, value, Just(language_mode),
+ TNode<Smi>(), TNode<FeedbackVector>());
+}
+
void KeyedStoreGenericAssembler::StoreIC_Uninitialized() {
typedef StoreWithVectorDescriptor Descriptor;
diff --git a/deps/v8/src/ic/keyed-store-generic.h b/deps/v8/src/ic/keyed-store-generic.h
index 9cda8ef2e2..0934c96cc8 100644
--- a/deps/v8/src/ic/keyed-store-generic.h
+++ b/deps/v8/src/ic/keyed-store-generic.h
@@ -23,6 +23,13 @@ class KeyedStoreGenericGenerator {
TNode<Context> context, TNode<JSReceiver> receiver,
TNode<BoolT> is_simple_receiver, TNode<Name> name,
TNode<Object> value, LanguageMode language_mode);
+
+ // Same as above but more generic. I.e. the receiver can by anything and the
+ // key does not have to be unique. Essentially the same as KeyedStoreGeneric.
+ static void SetProperty(compiler::CodeAssemblerState* state,
+ TNode<Context> context, TNode<Object> receiver,
+ TNode<Object> key, TNode<Object> value,
+ LanguageMode language_mode);
};
class StoreICUninitializedGenerator {
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
index e3457ca0f9..8624d47bf4 100644
--- a/deps/v8/src/inspector/DEPS
+++ b/deps/v8/src/inspector/DEPS
@@ -2,13 +2,14 @@ include_rules = [
"-src",
"-include/v8-debug.h",
"+src/base/atomicops.h",
+ "+src/base/compiler-specific.h",
"+src/base/macros.h",
"+src/base/logging.h",
"+src/base/platform/platform.h",
"+src/base/platform/mutex.h",
"+src/conversions.h",
"+src/flags.h",
- "+src/utils.h",
+ "+src/v8memory.h",
"+src/unicode-cache.h",
"+src/inspector",
"+src/tracing",
diff --git a/deps/v8/src/inspector/injected-script-source.js b/deps/v8/src/inspector/injected-script-source.js
index 5e5302fbbe..ea0d871248 100644
--- a/deps/v8/src/inspector/injected-script-source.js
+++ b/deps/v8/src/inspector/injected-script-source.js
@@ -401,9 +401,12 @@ InjectedScript.prototype = {
var isAccessorProperty = descriptor && ("get" in descriptor || "set" in descriptor);
if (accessorPropertiesOnly && !isAccessorProperty)
continue;
- if (descriptor && "get" in descriptor && "set" in descriptor && name !== "__proto__" &&
+ // Special case for Symbol.prototype.description where the receiver of the getter is not an actual object.
+ // Should only occur for nested previews.
+ var isSymbolDescription = isSymbol(object) && name === 'description';
+ if (isSymbolDescription || (descriptor && "get" in descriptor && "set" in descriptor && name !== "__proto__" &&
InjectedScriptHost.formatAccessorsAsProperties(object, descriptor.get) &&
- !doesAttributeHaveObservableSideEffectOnGet(object, name)) {
+ !doesAttributeHaveObservableSideEffectOnGet(object, name))) {
descriptor.value = object[property];
descriptor.isOwn = true;
delete descriptor.get;
@@ -594,6 +597,9 @@ InjectedScript.prototype = {
return toString(obj);
if (subtype === "node") {
+ // We should warmup blink dom binding before calling anything,
+ // see (crbug.com/827585) for details.
+ InjectedScriptHost.getOwnPropertyDescriptor(/** @type {!Object} */(obj), "nodeName");
var description = "";
var nodeName = InjectedScriptHost.getProperty(obj, "nodeName");
if (nodeName) {
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index 5852b227e7..0d1b8d6e89 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -204,6 +204,7 @@ class InjectedScript::ProtocolPromiseHandler {
v8::Isolate* isolate = session->inspector()->isolate();
if (result->IsNativeError()) {
message = " " + toProtocolString(
+ isolate,
result->ToDetailString(isolate->GetCurrentContext())
.ToLocalChecked());
v8::Local<v8::StackTrace> stackTrace = v8::debug::GetDetailedStackTrace(
@@ -564,7 +565,9 @@ Response InjectedScript::createExceptionDetails(
v8::Local<v8::Message> message = tryCatch.Message();
v8::Local<v8::Value> exception = tryCatch.Exception();
String16 messageText =
- message.IsEmpty() ? String16() : toProtocolString(message->Get());
+ message.IsEmpty()
+ ? String16()
+ : toProtocolString(m_context->isolate(), message->Get());
std::unique_ptr<protocol::Runtime::ExceptionDetails> exceptionDetails =
protocol::Runtime::ExceptionDetails::create()
.setExceptionId(m_context->inspector()->nextExceptionId())
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index f5d8556da8..fbcb0f4338 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -375,8 +375,9 @@ String16::String16() {}
String16::String16(const String16& other)
: m_impl(other.m_impl), hash_code(other.hash_code) {}
-String16::String16(String16&& other)
- : m_impl(std::move(other.m_impl)), hash_code(other.hash_code) {}
+String16::String16(String16&& other) V8_NOEXCEPT
+ : m_impl(std::move(other.m_impl)),
+ hash_code(other.hash_code) {}
String16::String16(const UChar* characters, size_t size)
: m_impl(characters, size) {}
@@ -399,7 +400,7 @@ String16& String16::operator=(const String16& other) {
return *this;
}
-String16& String16::operator=(String16&& other) {
+String16& String16::operator=(String16&& other) V8_NOEXCEPT {
m_impl = std::move(other.m_impl);
hash_code = other.hash_code;
return *this;
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index 461b2961e1..7c6f9e992d 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -12,6 +12,8 @@
#include <string>
#include <vector>
+#include "src/base/compiler-specific.h"
+
namespace v8_inspector {
using UChar = uint16_t;
@@ -22,7 +24,7 @@ class String16 {
String16();
String16(const String16& other);
- String16(String16&& other);
+ String16(String16&& other) V8_NOEXCEPT;
String16(const UChar* characters, size_t size);
String16(const UChar* characters); // NOLINT(runtime/explicit)
String16(const char* characters); // NOLINT(runtime/explicit)
@@ -30,7 +32,7 @@ class String16 {
explicit String16(const std::basic_string<UChar>& impl);
String16& operator=(const String16& other);
- String16& operator=(String16&& other);
+ String16& operator=(String16&& other) V8_NOEXCEPT;
static String16 fromInteger(int);
static String16 fromInteger(size_t);
diff --git a/deps/v8/src/inspector/string-util.cc b/deps/v8/src/inspector/string-util.cc
index 508229365f..79c5dcc9cf 100644
--- a/deps/v8/src/inspector/string-util.cc
+++ b/deps/v8/src/inspector/string-util.cc
@@ -52,16 +52,18 @@ v8::Local<v8::String> toV8String(v8::Isolate* isolate,
.ToLocalChecked();
}
-String16 toProtocolString(v8::Local<v8::String> value) {
+String16 toProtocolString(v8::Isolate* isolate, v8::Local<v8::String> value) {
if (value.IsEmpty() || value->IsNullOrUndefined()) return String16();
std::unique_ptr<UChar[]> buffer(new UChar[value->Length()]);
- value->Write(reinterpret_cast<uint16_t*>(buffer.get()), 0, value->Length());
+ value->Write(isolate, reinterpret_cast<uint16_t*>(buffer.get()), 0,
+ value->Length());
return String16(buffer.get(), value->Length());
}
-String16 toProtocolStringWithTypeCheck(v8::Local<v8::Value> value) {
+String16 toProtocolStringWithTypeCheck(v8::Isolate* isolate,
+ v8::Local<v8::Value> value) {
if (value.IsEmpty() || !value->IsString()) return String16();
- return toProtocolString(value.As<v8::String>());
+ return toProtocolString(isolate, value.As<v8::String>());
}
String16 toString16(const StringView& string) {
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
index 0c025ef93a..97aaa93eb7 100644
--- a/deps/v8/src/inspector/string-util.h
+++ b/deps/v8/src/inspector/string-util.h
@@ -67,8 +67,8 @@ v8::Local<v8::String> toV8StringInternalized(v8::Isolate*, const String16&);
v8::Local<v8::String> toV8StringInternalized(v8::Isolate*, const char*);
v8::Local<v8::String> toV8String(v8::Isolate*, const StringView&);
// TODO(dgozman): rename to toString16.
-String16 toProtocolString(v8::Local<v8::String>);
-String16 toProtocolStringWithTypeCheck(v8::Local<v8::Value>);
+String16 toProtocolString(v8::Isolate*, v8::Local<v8::String>);
+String16 toProtocolStringWithTypeCheck(v8::Isolate*, v8::Local<v8::Value>);
String16 toString16(const StringView&);
StringView toStringView(const String16&);
bool stringViewStartsWith(const StringView&, const char*);
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index 408f88bc1d..4bb0bf904e 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -118,13 +118,11 @@ class V8ValueStringBuilder {
!value->IsNativeError() && !value->IsRegExp()) {
v8::Local<v8::Object> object = v8::Local<v8::Object>::Cast(value);
v8::Local<v8::String> stringValue;
- if (object->ObjectProtoToString(m_isolate->GetCurrentContext())
- .ToLocal(&stringValue))
+ if (object->ObjectProtoToString(m_context).ToLocal(&stringValue))
return append(stringValue);
}
v8::Local<v8::String> stringValue;
- if (!value->ToString(m_isolate->GetCurrentContext()).ToLocal(&stringValue))
- return false;
+ if (!value->ToString(m_context).ToLocal(&stringValue)) return false;
return append(stringValue);
}
@@ -160,7 +158,9 @@ class V8ValueStringBuilder {
}
bool append(v8::Local<v8::BigInt> bigint) {
- bool result = append(bigint->ToString());
+ v8::Local<v8::String> bigint_string;
+ if (!bigint->ToString(m_context).ToLocal(&bigint_string)) return false;
+ bool result = append(bigint_string);
if (m_tryCatch.HasCaught()) return false;
m_builder.append('n');
return result;
@@ -168,7 +168,9 @@ class V8ValueStringBuilder {
bool append(v8::Local<v8::String> string) {
if (m_tryCatch.HasCaught()) return false;
- if (!string.IsEmpty()) m_builder.append(toProtocolString(string));
+ if (!string.IsEmpty()) {
+ m_builder.append(toProtocolString(m_isolate, string));
+ }
return true;
}
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index faba4a082b..752b50fa36 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -24,9 +24,9 @@ namespace v8_inspector {
namespace {
String16 consoleContextToString(
- const v8::debug::ConsoleContext& consoleContext) {
+ v8::Isolate* isolate, const v8::debug::ConsoleContext& consoleContext) {
if (consoleContext.id() == 0) return String16();
- return toProtocolString(consoleContext.name()) + "#" +
+ return toProtocolString(isolate, consoleContext.name()) + "#" +
String16::fromInteger(consoleContext.id());
}
@@ -88,7 +88,7 @@ class ConsoleHelper {
V8ConsoleMessage::createForConsoleAPI(
m_context, m_contextId, m_groupId, m_inspector,
m_inspector->client()->currentTimeMS(), type, arguments,
- consoleContextToString(m_consoleContext),
+ consoleContextToString(m_isolate, m_consoleContext),
m_inspector->debugger()->captureStackTrace(false));
consoleMessageStorage()->addMessage(std::move(message));
}
@@ -124,7 +124,7 @@ class ConsoleHelper {
if (!m_info[0]->ToString(m_context).ToLocal(&titleValue))
return defaultValue;
}
- return toProtocolString(titleValue);
+ return toProtocolString(m_context->GetIsolate(), titleValue);
}
v8::MaybeLocal<v8::Object> firstArgAsObject() {
@@ -297,7 +297,8 @@ static String16 identifierFromTitleOrStackTrace(
} else {
identifier = title + "@";
}
- identifier = consoleContextToString(consoleContext) + "@" + identifier;
+ identifier = consoleContextToString(inspector->isolate(), consoleContext) +
+ "@" + identifier;
return identifier;
}
@@ -370,7 +371,8 @@ static void timeFunction(const v8::debug::ConsoleCallArguments& info,
String16 protocolTitle = helper.firstArgToString("default", false);
if (timelinePrefix) protocolTitle = "Timeline '" + protocolTitle + "'";
const String16& timerId =
- protocolTitle + "@" + consoleContextToString(consoleContext);
+ protocolTitle + "@" +
+ consoleContextToString(inspector->isolate(), consoleContext);
if (helper.consoleMessageStorage()->hasTimer(helper.contextId(), timerId)) {
helper.reportCallWithArgument(
ConsoleAPIType::kWarning,
@@ -388,7 +390,8 @@ static void timeEndFunction(const v8::debug::ConsoleCallArguments& info,
String16 protocolTitle = helper.firstArgToString("default", false);
if (timelinePrefix) protocolTitle = "Timeline '" + protocolTitle + "'";
const String16& timerId =
- protocolTitle + "@" + consoleContextToString(consoleContext);
+ protocolTitle + "@" +
+ consoleContextToString(inspector->isolate(), consoleContext);
if (!helper.consoleMessageStorage()->hasTimer(helper.contextId(), timerId)) {
helper.reportCallWithArgument(
ConsoleAPIType::kWarning,
@@ -398,7 +401,8 @@ static void timeEndFunction(const v8::debug::ConsoleCallArguments& info,
inspector->client()->consoleTimeEnd(toStringView(protocolTitle));
double elapsed = helper.consoleMessageStorage()->timeEnd(
helper.contextId(),
- protocolTitle + "@" + consoleContextToString(consoleContext));
+ protocolTitle + "@" +
+ consoleContextToString(inspector->isolate(), consoleContext));
String16 message =
protocolTitle + ": " + String16::fromDouble(elapsed) + "ms";
helper.reportCallWithArgument(ConsoleAPIType::kTimeEnd, message);
@@ -527,7 +531,8 @@ void V8Console::monitorFunctionCallback(
v8::Local<v8::Value> name = function->GetName();
if (!name->IsString() || !v8::Local<v8::String>::Cast(name)->Length())
name = function->GetInferredName();
- String16 functionName = toProtocolStringWithTypeCheck(name);
+ String16 functionName =
+ toProtocolStringWithTypeCheck(info.GetIsolate(), name);
String16Builder builder;
builder.append("console.log(\"function ");
if (functionName.isEmpty())
@@ -779,7 +784,8 @@ void V8Console::CommandLineAPIScope::accessorGetterCallback(
v8::Local<v8::Value> value;
if (!commandLineAPI->Get(context, name).ToLocal(&value)) return;
- if (isCommandLineAPIGetter(toProtocolStringWithTypeCheck(name))) {
+ if (isCommandLineAPIGetter(
+ toProtocolStringWithTypeCheck(info.GetIsolate(), name))) {
DCHECK(value->IsFunction());
v8::MicrotasksScope microtasks(info.GetIsolate(),
v8::MicrotasksScope::kDoNotRunMicrotasks);
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index d9cb49b1d4..a27af98d8d 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -250,7 +250,7 @@ String16 scopeType(v8::debug::ScopeIterator::ScopeType type) {
namespace {
-Response buildScopes(v8::debug::ScopeIterator* iterator,
+Response buildScopes(v8::Isolate* isolate, v8::debug::ScopeIterator* iterator,
InjectedScript* injectedScript,
std::unique_ptr<Array<Scope>>* scopes) {
*scopes = Array<Scope>::create();
@@ -270,8 +270,8 @@ Response buildScopes(v8::debug::ScopeIterator* iterator,
.setObject(std::move(object))
.build();
- String16 name =
- toProtocolStringWithTypeCheck(iterator->GetFunctionDebugName());
+ String16 name = toProtocolStringWithTypeCheck(
+ isolate, iterator->GetFunctionDebugName());
if (!name.isEmpty()) scope->setName(name);
if (iterator->HasLocationInfo()) {
@@ -878,7 +878,7 @@ Response V8DebuggerAgentImpl::setScriptSource(
*optOutCompileError =
protocol::Runtime::ExceptionDetails::create()
.setExceptionId(m_inspector->nextExceptionId())
- .setText(toProtocolString(result.message))
+ .setText(toProtocolString(m_isolate, result.message))
.setLineNumber(result.line_number != -1 ? result.line_number - 1
: 0)
.setColumnNumber(result.column_number != -1 ? result.column_number
@@ -1265,7 +1265,8 @@ Response V8DebuggerAgentImpl::currentCallFrames(
std::unique_ptr<Array<Scope>> scopes;
auto scopeIterator = iterator->GetScopeIterator();
- Response res = buildScopes(scopeIterator.get(), injectedScript, &scopes);
+ Response res =
+ buildScopes(m_isolate, scopeIterator.get(), injectedScript, &scopes);
if (!res.isSuccess()) return res;
std::unique_ptr<RemoteObject> protocolReceiver;
@@ -1300,15 +1301,15 @@ Response V8DebuggerAgentImpl::currentCallFrames(
url = scriptIterator->second->sourceURL();
}
- auto frame =
- CallFrame::create()
- .setCallFrameId(callFrameId)
- .setFunctionName(toProtocolString(iterator->GetFunctionDebugName()))
- .setLocation(std::move(location))
- .setUrl(url)
- .setScopeChain(std::move(scopes))
- .setThis(std::move(protocolReceiver))
- .build();
+ auto frame = CallFrame::create()
+ .setCallFrameId(callFrameId)
+ .setFunctionName(toProtocolString(
+ m_isolate, iterator->GetFunctionDebugName()))
+ .setLocation(std::move(location))
+ .setUrl(url)
+ .setScopeChain(std::move(scopes))
+ .setThis(std::move(protocolReceiver))
+ .build();
v8::Local<v8::Function> func = iterator->GetFunction();
if (!func.IsEmpty()) {
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index d861265e14..babb7700c6 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -8,7 +8,7 @@
#include "src/inspector/string-util.h"
#include "src/inspector/v8-inspector-impl.h"
#include "src/inspector/wasm-translation.h"
-#include "src/utils.h"
+#include "src/v8memory.h"
namespace v8_inspector {
@@ -113,7 +113,7 @@ class ActualScript : public V8DebuggerScript {
ActualScript(v8::Isolate* isolate, v8::Local<v8::debug::Script> script,
bool isLiveEdit, V8InspectorClient* client)
: V8DebuggerScript(isolate, String16::fromInteger(script->Id()),
- GetScriptURL(script, client)),
+ GetScriptURL(isolate, script, client)),
m_isLiveEdit(isLiveEdit) {
Initialize(script);
}
@@ -219,14 +219,15 @@ class ActualScript : public V8DebuggerScript {
}
private:
- String16 GetScriptURL(v8::Local<v8::debug::Script> script,
+ String16 GetScriptURL(v8::Isolate* isolate,
+ v8::Local<v8::debug::Script> script,
V8InspectorClient* client) {
v8::Local<v8::String> sourceURL;
if (script->SourceURL().ToLocal(&sourceURL) && sourceURL->Length() > 0)
- return toProtocolString(sourceURL);
+ return toProtocolString(isolate, sourceURL);
v8::Local<v8::String> v8Name;
if (script->Name().ToLocal(&v8Name) && v8Name->Length() > 0) {
- String16 name = toProtocolString(v8Name);
+ String16 name = toProtocolString(isolate, v8Name);
std::unique_ptr<StringBuffer> url =
client->resourceNameToUrl(toStringView(name));
return url ? toString16(url->string()) : name;
@@ -243,7 +244,7 @@ class ActualScript : public V8DebuggerScript {
m_hasSourceURLComment =
script->SourceURL().ToLocal(&tmp) && tmp->Length() > 0;
if (script->SourceMappingURL().ToLocal(&tmp))
- m_sourceMappingURL = toProtocolString(tmp);
+ m_sourceMappingURL = toProtocolString(m_isolate, tmp);
m_startLine = script->LineOffset();
m_startColumn = script->ColumnOffset();
std::vector<int> lineEnds = script->LineEnds();
@@ -264,7 +265,7 @@ class ActualScript : public V8DebuggerScript {
USE(script->ContextId().To(&m_executionContextId));
if (script->Source().ToLocal(&tmp)) {
- m_source = toProtocolString(tmp);
+ m_source = toProtocolString(m_isolate, tmp);
}
m_isModule = script->IsModule();
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 7d1f7cefd1..ccc674af43 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -722,7 +722,7 @@ v8::MaybeLocal<v8::Value> V8Debugger::getTargetScopes(
String16 name;
v8::Local<v8::Value> maybe_name = iterator->GetFunctionDebugName();
if (!maybe_name->IsUndefined()) {
- name = toProtocolStringWithTypeCheck(maybe_name);
+ name = toProtocolStringWithTypeCheck(m_isolate, maybe_name);
}
v8::Local<v8::Object> object = iterator->GetObject();
createDataProperty(context, scope,
@@ -1134,7 +1134,7 @@ std::shared_ptr<StackFrame> V8Debugger::symbolize(
if (it != m_framesCache.end() && !it->second.expired()) {
return std::shared_ptr<StackFrame>(it->second);
}
- std::shared_ptr<StackFrame> frame(new StackFrame(v8Frame));
+ std::shared_ptr<StackFrame> frame(new StackFrame(isolate(), v8Frame));
// TODO(clemensh): Figure out a way to do this translation only right before
// sending the stack trace over wire.
if (v8Frame->IsWasm()) frame->translate(&m_wasmTranslation);
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
index b876a956b2..f255287c03 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -335,19 +335,20 @@ Response V8HeapProfilerAgentImpl::startSampling(
namespace {
std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfileNode>
-buildSampingHeapProfileNode(const v8::AllocationProfile::Node* node) {
+buildSampingHeapProfileNode(v8::Isolate* isolate,
+ const v8::AllocationProfile::Node* node) {
auto children = protocol::Array<
protocol::HeapProfiler::SamplingHeapProfileNode>::create();
for (const auto* child : node->children)
- children->addItem(buildSampingHeapProfileNode(child));
+ children->addItem(buildSampingHeapProfileNode(isolate, child));
size_t selfSize = 0;
for (const auto& allocation : node->allocations)
selfSize += allocation.size * allocation.count;
std::unique_ptr<protocol::Runtime::CallFrame> callFrame =
protocol::Runtime::CallFrame::create()
- .setFunctionName(toProtocolString(node->name))
+ .setFunctionName(toProtocolString(isolate, node->name))
.setScriptId(String16::fromInteger(node->script_id))
- .setUrl(toProtocolString(node->script_name))
+ .setUrl(toProtocolString(isolate, node->script_name))
.setLineNumber(node->line_number - 1)
.setColumnNumber(node->column_number - 1)
.build();
@@ -383,7 +384,7 @@ Response V8HeapProfilerAgentImpl::getSamplingProfile(
return Response::Error("V8 sampling heap profiler was not started.");
v8::AllocationProfile::Node* root = v8Profile->GetRootNode();
*profile = protocol::HeapProfiler::SamplingHeapProfile::create()
- .setHead(buildSampingHeapProfileNode(root))
+ .setHead(buildSampingHeapProfileNode(m_isolate, root))
.build();
return Response::OK();
}
diff --git a/deps/v8/src/inspector/v8-injected-script-host.cc b/deps/v8/src/inspector/v8-injected-script-host.cc
index 23f56f93e2..d9c1d59aa8 100644
--- a/deps/v8/src/inspector/v8-injected-script-host.cc
+++ b/deps/v8/src/inspector/v8-injected-script-host.cc
@@ -299,7 +299,7 @@ void V8InjectedScriptHost::getInternalPropertiesCallback(
tryCatch.Reset();
continue;
}
- String16 keyString = toProtocolStringWithTypeCheck(key);
+ String16 keyString = toProtocolStringWithTypeCheck(isolate, key);
if (keyString.isEmpty() ||
allowedProperties.find(keyString) == allowedProperties.end())
continue;
@@ -337,7 +337,8 @@ void V8InjectedScriptHost::bindCallback(
v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
v8::Local<v8::String> v8groupName =
info[1]->ToString(context).ToLocalChecked();
- String16 groupName = toProtocolStringWithTypeCheck(v8groupName);
+ String16 groupName =
+ toProtocolStringWithTypeCheck(info.GetIsolate(), v8groupName);
int id = injectedScript->bindObject(info[0], groupName);
info.GetReturnValue().Set(id);
}
diff --git a/deps/v8/src/inspector/v8-inspector-impl.cc b/deps/v8/src/inspector/v8-inspector-impl.cc
index 6272e4b4b2..62790a6335 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-impl.cc
@@ -188,6 +188,17 @@ InspectedContext* V8InspectorImpl::getContext(int contextId) const {
return getContext(contextGroupId(contextId), contextId);
}
+v8::MaybeLocal<v8::Context> V8InspectorImpl::contextById(
+ int groupId, v8::Maybe<int> contextId) {
+ if (contextId.IsNothing()) {
+ v8::Local<v8::Context> context =
+ client()->ensureDefaultContextInGroup(groupId);
+ return context.IsEmpty() ? v8::MaybeLocal<v8::Context>() : context;
+ }
+ InspectedContext* context = getContext(contextId.FromJust());
+ return context ? context->context() : v8::MaybeLocal<v8::Context>();
+}
+
void V8InspectorImpl::contextCreated(const V8ContextInfo& info) {
int contextId = ++m_lastContextId;
InspectedContext* context = new InspectedContext(this, info, contextId);
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index b255feed40..2124ba6250 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -78,6 +78,8 @@ class V8InspectorImpl : public V8Inspector {
const StringView& state) override;
void contextCreated(const V8ContextInfo&) override;
void contextDestroyed(v8::Local<v8::Context>) override;
+ v8::MaybeLocal<v8::Context> contextById(int groupId,
+ v8::Maybe<int> contextId) override;
void contextCollected(int contextGroupId, int contextId);
void resetContextGroup(int contextGroupId) override;
void idleStarted() override;
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index 1d8d12ac0d..d37f87a2a7 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -170,6 +170,12 @@ void V8InspectorSessionImpl::sendProtocolNotification(
m_channel->sendNotification(MessageBuffer::create(std::move(message)));
}
+void V8InspectorSessionImpl::fallThrough(int callId, const String16& method,
+ const String16& message) {
+ // There's no other layer to handle the command.
+ UNREACHABLE();
+}
+
void V8InspectorSessionImpl::flushProtocolNotifications() {
m_channel->flushProtocolNotifications();
}
@@ -313,7 +319,15 @@ void V8InspectorSessionImpl::reportAllContexts(V8RuntimeAgentImpl* agent) {
void V8InspectorSessionImpl::dispatchProtocolMessage(
const StringView& message) {
- m_dispatcher.dispatch(protocol::StringUtil::parseJSON(message));
+ int callId;
+ String16 method;
+ std::unique_ptr<protocol::Value> parsedMessage =
+ protocol::StringUtil::parseJSON(message);
+ if (m_dispatcher.parseCommand(parsedMessage.get(), &callId, &method)) {
+ // Pass empty string instead of the actual message to save on a conversion.
+ // We're allowed to do so because fall-through is not implemented.
+ m_dispatcher.dispatch(callId, method, std::move(parsedMessage), "");
+ }
}
std::unique_ptr<StringBuffer> V8InspectorSessionImpl::stateJSON() {
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
index 8ca0915b66..85861a05bf 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -101,6 +101,8 @@ class V8InspectorSessionImpl : public V8InspectorSession,
int callId, std::unique_ptr<protocol::Serializable> message) override;
void sendProtocolNotification(
std::unique_ptr<protocol::Serializable> message) override;
+ void fallThrough(int callId, const String16& method,
+ const String16& message) override;
void flushProtocolNotifications() override;
int m_contextGroupId;
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
index f14815fdc4..1e16218e1c 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -34,7 +34,7 @@ namespace {
String16 resourceNameToUrl(V8InspectorImpl* inspector,
v8::Local<v8::String> v8Name) {
- String16 name = toProtocolString(v8Name);
+ String16 name = toProtocolString(inspector->isolate(), v8Name);
if (!inspector) return name;
std::unique_ptr<StringBuffer> url =
inspector->client()->resourceNameToUrl(toStringView(name));
@@ -66,7 +66,7 @@ std::unique_ptr<protocol::Profiler::ProfileNode> buildInspectorObjectFor(
v8::HandleScope handleScope(isolate);
auto callFrame =
protocol::Runtime::CallFrame::create()
- .setFunctionName(toProtocolString(node->GetFunctionName()))
+ .setFunctionName(toProtocolString(isolate, node->GetFunctionName()))
.setScriptId(String16::fromInteger(node->GetScriptId()))
.setUrl(resourceNameToUrl(inspector, node->GetScriptResourceName()))
.setLineNumber(node->GetLineNumber() - 1)
@@ -337,6 +337,7 @@ Response coverageToProtocol(
out_result) {
std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>> result =
protocol::Array<protocol::Profiler::ScriptCoverage>::create();
+ v8::Isolate* isolate = inspector->isolate();
for (size_t i = 0; i < coverage.ScriptCount(); i++) {
v8::debug::Coverage::ScriptData script_data = coverage.GetScriptData(i);
v8::Local<v8::debug::Script> script = script_data.GetScript();
@@ -366,6 +367,7 @@ Response coverageToProtocol(
functions->addItem(
protocol::Profiler::FunctionCoverage::create()
.setFunctionName(toProtocolString(
+ isolate,
function_data.Name().FromMaybe(v8::Local<v8::String>())))
.setRanges(std::move(ranges))
.setIsBlockCoverage(function_data.HasBlockCoverage())
@@ -374,7 +376,7 @@ Response coverageToProtocol(
String16 url;
v8::Local<v8::String> name;
if (script->SourceURL().ToLocal(&name) && name->Length()) {
- url = toProtocolString(name);
+ url = toProtocolString(isolate, name);
} else if (script->Name().ToLocal(&name) && name->Length()) {
url = resourceNameToUrl(inspector, name);
}
@@ -416,6 +418,7 @@ typeProfileToProtocol(V8InspectorImpl* inspector,
const v8::debug::TypeProfile& type_profile) {
std::unique_ptr<protocol::Array<protocol::Profiler::ScriptTypeProfile>>
result = protocol::Array<protocol::Profiler::ScriptTypeProfile>::create();
+ v8::Isolate* isolate = inspector->isolate();
for (size_t i = 0; i < type_profile.ScriptCount(); i++) {
v8::debug::TypeProfile::ScriptData script_data =
type_profile.GetScriptData(i);
@@ -428,10 +431,11 @@ typeProfileToProtocol(V8InspectorImpl* inspector,
std::unique_ptr<protocol::Array<protocol::Profiler::TypeObject>> types =
protocol::Array<protocol::Profiler::TypeObject>::create();
for (const auto& type : entry.Types()) {
- types->addItem(protocol::Profiler::TypeObject::create()
- .setName(toProtocolString(
- type.FromMaybe(v8::Local<v8::String>())))
- .build());
+ types->addItem(
+ protocol::Profiler::TypeObject::create()
+ .setName(toProtocolString(
+ isolate, type.FromMaybe(v8::Local<v8::String>())))
+ .build());
}
entries->addItem(protocol::Profiler::TypeProfileEntry::create()
.setOffset(entry.SourcePosition())
@@ -441,7 +445,7 @@ typeProfileToProtocol(V8InspectorImpl* inspector,
String16 url;
v8::Local<v8::String> name;
if (script->SourceURL().ToLocal(&name) && name->Length()) {
- url = toProtocolString(name);
+ url = toProtocolString(isolate, name);
} else if (script->Name().ToLocal(&name) && name->Length()) {
url = resourceNameToUrl(inspector, name);
}
diff --git a/deps/v8/src/inspector/v8-regex.cc b/deps/v8/src/inspector/v8-regex.cc
index 0bab4364c4..5f43d84e2c 100644
--- a/deps/v8/src/inspector/v8-regex.cc
+++ b/deps/v8/src/inspector/v8-regex.cc
@@ -32,7 +32,7 @@ V8Regex::V8Regex(V8InspectorImpl* inspector, const String16& pattern,
.ToLocal(&regex))
m_regex.Reset(isolate, regex);
else if (tryCatch.HasCaught())
- m_errorMessage = toProtocolString(tryCatch.Message()->Get());
+ m_errorMessage = toProtocolString(isolate, tryCatch.Message()->Get());
else
m_errorMessage = "Internal error";
}
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index 31111add16..d0ae633945 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -426,7 +426,8 @@ Response V8RuntimeAgentImpl::getProperties(
if (!response.isSuccess()) return response;
propertiesProtocolArray->addItem(
InternalPropertyDescriptor::create()
- .setName(toProtocolString(name.As<v8::String>()))
+ .setName(
+ toProtocolString(m_inspector->isolate(), name.As<v8::String>()))
.setValue(std::move(wrappedValue))
.build());
}
@@ -622,7 +623,8 @@ Response V8RuntimeAgentImpl::globalLexicalScopeNames(
v8::debug::GlobalLexicalScopeNames(scope.context(), &names);
*outNames = protocol::Array<String16>::create();
for (size_t i = 0; i < names.Size(); ++i) {
- (*outNames)->addItem(toProtocolString(names.Get(i)));
+ (*outNames)->addItem(
+ toProtocolString(m_inspector->isolate(), names.Get(i)));
}
return Response::OK();
}
@@ -690,8 +692,10 @@ void V8RuntimeAgentImpl::bindingCallback(
int contextId = InspectedContext::contextId(isolate->GetCurrentContext());
int contextGroupId = inspector->contextGroupId(contextId);
- String16 name = toProtocolString(v8::Local<v8::String>::Cast(info.Data()));
- String16 payload = toProtocolString(v8::Local<v8::String>::Cast(info[0]));
+ String16 name =
+ toProtocolString(isolate, v8::Local<v8::String>::Cast(info.Data()));
+ String16 payload =
+ toProtocolString(isolate, v8::Local<v8::String>::Cast(info[0]));
inspector->forEachSession(
contextGroupId,
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index 9be0d4fa38..21ca98d911 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -28,7 +28,8 @@ std::vector<std::shared_ptr<StackFrame>> toFramesVector(
int frameCount = std::min(v8StackTrace->GetFrameCount(), maxStackSize);
std::vector<std::shared_ptr<StackFrame>> frames(frameCount);
for (int i = 0; i < frameCount; ++i) {
- frames[i] = debugger->symbolize(v8StackTrace->GetFrame(i));
+ frames[i] =
+ debugger->symbolize(v8StackTrace->GetFrame(debugger->isolate(), i));
}
return frames;
}
@@ -116,10 +117,11 @@ V8StackTraceId::V8StackTraceId(uintptr_t id,
bool V8StackTraceId::IsInvalid() const { return !id; }
-StackFrame::StackFrame(v8::Local<v8::StackFrame> v8Frame)
- : m_functionName(toProtocolString(v8Frame->GetFunctionName())),
+StackFrame::StackFrame(v8::Isolate* isolate, v8::Local<v8::StackFrame> v8Frame)
+ : m_functionName(toProtocolString(isolate, v8Frame->GetFunctionName())),
m_scriptId(String16::fromInteger(v8Frame->GetScriptId())),
- m_sourceURL(toProtocolString(v8Frame->GetScriptNameOrSourceURL())),
+ m_sourceURL(
+ toProtocolString(isolate, v8Frame->GetScriptNameOrSourceURL())),
m_lineNumber(v8Frame->GetLineNumber() - 1),
m_columnNumber(v8Frame->GetColumn() - 1),
m_hasSourceURLComment(v8Frame->GetScriptName() !=
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index 019fd469cd..abda0f12ee 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -23,7 +23,7 @@ struct V8StackTraceId;
class StackFrame {
public:
- explicit StackFrame(v8::Local<v8::StackFrame> frame);
+ explicit StackFrame(v8::Isolate* isolate, v8::Local<v8::StackFrame> frame);
~StackFrame() = default;
void translate(WasmTranslation* wasmTranslation);
diff --git a/deps/v8/src/inspector/v8-value-utils.cc b/deps/v8/src/inspector/v8-value-utils.cc
index 3835f34f6d..feaffd36d0 100644
--- a/deps/v8/src/inspector/v8-value-utils.cc
+++ b/deps/v8/src/inspector/v8-value-utils.cc
@@ -39,8 +39,8 @@ protocol::Response toProtocolValue(v8::Local<v8::Context> context,
return Response::OK();
}
if (value->IsString()) {
- *result =
- protocol::StringValue::create(toProtocolString(value.As<v8::String>()));
+ *result = protocol::StringValue::create(
+ toProtocolString(context->GetIsolate(), value.As<v8::String>()));
return Response::OK();
}
if (value->IsArray()) {
@@ -90,8 +90,9 @@ protocol::Response toProtocolValue(v8::Local<v8::Context> context,
Response response =
toProtocolValue(context, property, maxDepth, &propertyValue);
if (!response.isSuccess()) return response;
- jsonObject->setValue(toProtocolString(propertyName),
- std::move(propertyValue));
+ jsonObject->setValue(
+ toProtocolString(context->GetIsolate(), propertyName),
+ std::move(propertyValue));
}
*result = std::move(jsonObject);
return Response::OK();
diff --git a/deps/v8/src/inspector/wasm-translation.cc b/deps/v8/src/inspector/wasm-translation.cc
index 4754af5442..f049871202 100644
--- a/deps/v8/src/inspector/wasm-translation.cc
+++ b/deps/v8/src/inspector/wasm-translation.cc
@@ -219,7 +219,8 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
private:
String16 GetFakeScriptUrl(v8::Isolate* isolate, int func_index) {
v8::Local<v8::debug::WasmScript> script = script_.Get(isolate);
- String16 script_name = toProtocolString(script->Name().ToLocalChecked());
+ String16 script_name =
+ toProtocolString(isolate, script->Name().ToLocalChecked());
int numFunctions = script->NumFunctions();
int numImported = script->NumImportedFunctions();
String16Builder builder;
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index 85c2b56e21..7438731c20 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -9,6 +9,8 @@ namespace internal {
void CallInterfaceDescriptorData::InitializePlatformSpecific(
int register_parameter_count, const Register* registers) {
+ DCHECK(!IsInitializedPlatformIndependent());
+
register_param_count_ = register_parameter_count;
// InterfaceDescriptor owns a copy of the registers array.
@@ -21,19 +23,37 @@ void CallInterfaceDescriptorData::InitializePlatformSpecific(
void CallInterfaceDescriptorData::InitializePlatformIndependent(
Flags flags, int return_count, int parameter_count,
const MachineType* machine_types, int machine_types_length) {
+ DCHECK(IsInitializedPlatformSpecific());
+
flags_ = flags;
return_count_ = return_count;
param_count_ = parameter_count;
- int types_length = return_count_ + param_count_;
- machine_types_ = NewArray<MachineType>(types_length);
- for (int i = 0; i < types_length; i++) {
- if (machine_types == nullptr || i >= machine_types_length) {
- machine_types_[i] = MachineType::AnyTagged();
- } else {
- machine_types_[i] = machine_types[i];
- }
+ const int types_length = return_count_ + param_count_;
+
+ // Machine types are either fully initialized or null.
+ if (machine_types == nullptr) {
+ machine_types_ =
+ NewArray<MachineType>(types_length, MachineType::AnyTagged());
+ } else {
+ DCHECK_EQ(machine_types_length, types_length);
+ machine_types_ = NewArray<MachineType>(types_length);
+ for (int i = 0; i < types_length; i++) machine_types_[i] = machine_types[i];
+ }
+
+ DCHECK(AllStackParametersAreTagged());
+}
+
+#ifdef DEBUG
+bool CallInterfaceDescriptorData::AllStackParametersAreTagged() const {
+ DCHECK(IsInitialized());
+ const int types_length = return_count_ + param_count_;
+ const int first_stack_param = return_count_ + register_param_count_;
+ for (int i = first_stack_param; i < types_length; i++) {
+ if (!machine_types_[i].IsTagged()) return false;
}
+ return true;
}
+#endif // DEBUG
void CallInterfaceDescriptorData::Reset() {
delete[] machine_types_;
@@ -119,6 +139,28 @@ void CEntry1ArgvOnStackDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+namespace {
+
+void InterpreterCEntryDescriptor_InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {kRuntimeCallArgCountRegister,
+ kRuntimeCallArgvRegister,
+ kRuntimeCallFunctionRegister};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+} // namespace
+
+void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
+void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
+}
+
void FastNewFunctionContextDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ScopeInfoRegister(), SlotsRegister()};
@@ -291,5 +333,10 @@ void WasmGrowMemoryDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, kParameterCount);
}
+void CloneObjectWithVectorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index cf841383b2..ee9abac9ea 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -74,6 +74,7 @@ namespace internal {
V(FrameDropperTrampoline) \
V(RunMicrotasks) \
V(WasmGrowMemory) \
+ V(CloneObjectWithVector) \
BUILTIN_LIST_TFS(V)
class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
@@ -108,8 +109,8 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
void Reset();
bool IsInitialized() const {
- return register_param_count_ >= 0 && return_count_ >= 0 &&
- param_count_ >= 0;
+ return IsInitializedPlatformSpecific() &&
+ IsInitializedPlatformIndependent();
}
Flags flags() const { return flags_; }
@@ -138,6 +139,23 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
RegList allocatable_registers() const { return allocatable_registers_; }
private:
+ bool IsInitializedPlatformSpecific() const {
+ const bool initialized =
+ register_param_count_ >= 0 && register_params_ != nullptr;
+ // Platform-specific initialization happens before platform-independent.
+ return initialized;
+ }
+ bool IsInitializedPlatformIndependent() const {
+ const bool initialized =
+ return_count_ >= 0 && param_count_ >= 0 && machine_types_ != nullptr;
+ // Platform-specific initialization happens before platform-independent.
+ return initialized;
+ }
+
+#ifdef DEBUG
+ bool AllStackParametersAreTagged() const;
+#endif // DEBUG
+
int register_param_count_ = -1;
int return_count_ = -1;
int param_count_ = -1;
@@ -288,7 +306,12 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
explicit name() : base(key()) {} \
static inline CallDescriptors::Key key();
+#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
+// TODO(jgruber,v8:6666): Keep kRootRegister free unconditionally.
+constexpr int kMaxBuiltinRegisterParams = 4;
+#else
constexpr int kMaxBuiltinRegisterParams = 5;
+#endif
#define DECLARE_DEFAULT_DESCRIPTOR(name, base) \
DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
@@ -1021,6 +1044,17 @@ class WasmGrowMemoryDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(WasmGrowMemoryDescriptor, CallInterfaceDescriptor)
};
+class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kSource, kFlags, kSlot, kVector)
+ DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(), // result 1
+ MachineType::AnyTagged(), // kSource
+ MachineType::TaggedSigned(), // kFlags
+ MachineType::TaggedSigned(), // kSlot
+ MachineType::AnyTagged()) // kVector
+ DECLARE_DESCRIPTOR(CloneObjectWithVectorDescriptor, CallInterfaceDescriptor)
+};
+
#define DEFINE_TFS_BUILTIN_DESCRIPTOR(Name, ...) \
class Name##Descriptor : public CallInterfaceDescriptor { \
public: \
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index b0162c77ad..33731599c8 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -986,6 +986,13 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateEmptyObjectLiteral() {
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::CloneObject(Register source,
+ int flags,
+ int feedback_slot) {
+ OutputCloneObject(source, flags, feedback_slot);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::GetTemplateObject(
size_t template_object_description_entry, int feedback_slot) {
OutputGetTemplateObject(template_object_description_entry, feedback_slot);
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index f34f6f3a7d..3feda90495 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -238,6 +238,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
int literal_index, int flags,
Register output);
BytecodeArrayBuilder& CreateEmptyObjectLiteral();
+ BytecodeArrayBuilder& CloneObject(Register source, int flags,
+ int feedback_slot);
// Gets or creates the template for a TemplateObjectDescription which will
// be inserted at constant pool index |template_object_description_entry|.
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index b44f726e92..3dbd009879 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -4,7 +4,7 @@
#include "src/interpreter/bytecode-array-writer.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-node.h"
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 79792a3d56..11a19443e1 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -4,7 +4,7 @@
#include "src/interpreter/bytecode-generator.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/ast/ast-source-ranges.h"
#include "src/ast/scopes.h"
#include "src/builtins/builtins-constructor.h"
@@ -1815,6 +1815,14 @@ void BytecodeGenerator::AddToEagerLiteralsIfEager(FunctionLiteral* literal) {
}
}
+bool BytecodeGenerator::ShouldOptimizeAsOneShot() const {
+ if (!FLAG_enable_one_shot_optimization) return false;
+
+ if (loop_depth_ > 0) return false;
+
+ return info()->literal()->is_top_level() || info()->literal()->is_iife();
+}
+
void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
size_t class_boilerplate_entry =
builder()->AllocateDeferredConstantPoolEntry();
@@ -2097,6 +2105,27 @@ void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
expr->flags());
}
+void BytecodeGenerator::BuildCreateObjectLiteral(Register literal,
+ uint8_t flags, size_t entry) {
+ if (ShouldOptimizeAsOneShot()) {
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->LoadConstantPoolEntry(entry)
+ .StoreAccumulatorInRegister(args[0])
+ .LoadLiteral(Smi::FromInt(flags))
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kCreateObjectLiteralWithoutAllocationSite, args)
+ .StoreAccumulatorInRegister(literal);
+
+ } else {
+ // TODO(cbruni): Directly generate runtime call for literals we cannot
+ // optimize once the CreateShallowObjectLiteral stub is in sync with the TF
+ // optimizations.
+ int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
+ builder()->CreateObjectLiteral(entry, literal_index, flags, literal);
+ }
+}
+
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
expr->InitDepthAndFlags();
@@ -2108,42 +2137,63 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
return;
}
- int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
// Deep-copy the literal boilerplate.
uint8_t flags = CreateObjectLiteralFlags::Encode(
expr->ComputeFlags(), expr->IsFastCloningSupported());
Register literal = register_allocator()->NewRegister();
- size_t entry;
- // If constant properties is an empty fixed array, use a cached empty fixed
- // array to ensure it's only added to the constant pool once.
- if (expr->properties_count() == 0) {
- entry = builder()->EmptyObjectBoilerplateDescriptionConstantPoolEntry();
+
+ // Create literal object.
+ int property_index = 0;
+ bool clone_object_spread =
+ expr->properties()->first()->kind() == ObjectLiteral::Property::SPREAD;
+ if (clone_object_spread) {
+ // Avoid the slow path for spreads in the following common cases:
+ // 1) `let obj = { ...source }`
+ // 2) `let obj = { ...source, override: 1 }`
+ // 3) `let obj = { ...source, ...overrides }`
+ RegisterAllocationScope register_scope(this);
+ Expression* property = expr->properties()->first()->value();
+ Register from_value = VisitForRegisterValue(property);
+
+ BytecodeLabels clone_object(zone());
+ builder()->JumpIfUndefined(clone_object.New());
+ builder()->JumpIfNull(clone_object.New());
+ builder()->ToObject(from_value);
+
+ clone_object.Bind(builder());
+ int clone_index = feedback_index(feedback_spec()->AddCloneObjectSlot());
+ builder()->CloneObject(from_value, flags, clone_index);
+ builder()->StoreAccumulatorInRegister(literal);
+ property_index++;
} else {
- entry = builder()->AllocateDeferredConstantPoolEntry();
- object_literals_.push_back(std::make_pair(expr, entry));
+ size_t entry;
+ // If constant properties is an empty fixed array, use a cached empty fixed
+ // array to ensure it's only added to the constant pool once.
+ if (expr->properties_count() == 0) {
+ entry = builder()->EmptyObjectBoilerplateDescriptionConstantPoolEntry();
+ } else {
+ entry = builder()->AllocateDeferredConstantPoolEntry();
+ object_literals_.push_back(std::make_pair(expr, entry));
+ }
+ BuildCreateObjectLiteral(literal, flags, entry);
}
- // TODO(cbruni): Directly generate runtime call for literals we cannot
- // optimize once the CreateShallowObjectLiteral stub is in sync with the TF
- // optimizations.
- builder()->CreateObjectLiteral(entry, literal_index, flags, literal);
// Store computed values into the literal.
- int property_index = 0;
AccessorTable accessor_table(zone());
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
- if (property->IsCompileTimeValue()) continue;
+ if (!clone_object_spread && property->IsCompileTimeValue()) continue;
RegisterAllocationScope inner_register_scope(this);
Literal* key = property->key()->AsLiteral();
switch (property->kind()) {
case ObjectLiteral::Property::SPREAD:
- case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
+ case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- DCHECK(!property->value()->IsCompileTimeValue());
+ DCHECK(clone_object_spread || !property->value()->IsCompileTimeValue());
V8_FALLTHROUGH;
case ObjectLiteral::Property::COMPUTED: {
// It is safe to use [[Put]] here because the boilerplate already
@@ -2382,22 +2432,41 @@ void BytecodeGenerator::BuildArrayLiteralElementsInsertion(
void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
expr->InitDepthAndFlags();
+ uint8_t flags = CreateArrayLiteralFlags::Encode(
+ expr->IsFastCloningSupported(), expr->ComputeFlags());
- // Deep-copy the literal boilerplate.
- int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
- if (expr->is_empty()) {
+ bool is_empty = expr->is_empty();
+ bool optimize_as_one_shot = ShouldOptimizeAsOneShot();
+ size_t entry;
+ if (is_empty && optimize_as_one_shot) {
+ entry = builder()->EmptyArrayBoilerplateDescriptionConstantPoolEntry();
+ } else if (!is_empty) {
+ entry = builder()->AllocateDeferredConstantPoolEntry();
+ array_literals_.push_back(std::make_pair(expr, entry));
+ }
+
+ if (optimize_as_one_shot) {
+ // Create array literal without any allocation sites
+ RegisterAllocationScope register_scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->LoadConstantPoolEntry(entry)
+ .StoreAccumulatorInRegister(args[0])
+ .LoadLiteral(Smi::FromInt(flags))
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kCreateArrayLiteralWithoutAllocationSite, args);
+ } else if (is_empty) {
// Empty array literal fast-path.
+ int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
DCHECK(expr->IsFastCloningSupported());
builder()->CreateEmptyArrayLiteral(literal_index);
return;
+ } else {
+ // Deep-copy the literal boilerplate
+ int literal_index = feedback_index(feedback_spec()->AddLiteralSlot());
+ builder()->CreateArrayLiteral(entry, literal_index, flags);
}
- uint8_t flags = CreateArrayLiteralFlags::Encode(
- expr->IsFastCloningSupported(), expr->ComputeFlags());
- size_t entry = builder()->AllocateDeferredConstantPoolEntry();
- builder()->CreateArrayLiteral(entry, literal_index, flags);
- array_literals_.push_back(std::make_pair(expr, entry));
-
Register literal = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(literal);
// Insert all elements except the constant ones, since they are already there.
@@ -2751,6 +2820,54 @@ void BytecodeGenerator::BuildVariableAssignment(
}
}
+void BytecodeGenerator::BuildLoadNamedProperty(Property* property,
+ Register object,
+ const AstRawString* name) {
+ if (ShouldOptimizeAsOneShot()) {
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ size_t name_index = builder()->GetConstantPoolEntry(name);
+ builder()
+ ->MoveRegister(object, args[0])
+ .LoadConstantPoolEntry(name_index)
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kInlineGetProperty, args);
+ } else {
+ FeedbackSlot slot = GetCachedLoadICSlot(property->obj(), name);
+ builder()->LoadNamedProperty(object, name, feedback_index(slot));
+ }
+}
+
+void BytecodeGenerator::BuildStoreNamedProperty(Property* property,
+ Register object,
+ const AstRawString* name) {
+ Register value;
+ if (!execution_result()->IsEffect()) {
+ value = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ }
+
+ if (ShouldOptimizeAsOneShot()) {
+ RegisterList args = register_allocator()->NewRegisterList(4);
+ size_t name_index = builder()->GetConstantPoolEntry(name);
+ builder()
+ ->MoveRegister(object, args[0])
+ .StoreAccumulatorInRegister(args[2])
+ .LoadConstantPoolEntry(name_index)
+ .StoreAccumulatorInRegister(args[1])
+ .LoadLiteral(Smi::FromEnum(language_mode()))
+ .StoreAccumulatorInRegister(args[3])
+ .CallRuntime(Runtime::kSetProperty, args);
+ } else {
+ FeedbackSlot slot = GetCachedStoreICSlot(property->obj(), name);
+ builder()->StoreNamedProperty(object, name, feedback_index(slot),
+ language_mode());
+ }
+
+ if (!execution_result()->IsEffect()) {
+ builder()->LoadAccumulatorWithRegister(value);
+ }
+}
+
void BytecodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpression() ||
(expr->op() == Token::INIT && expr->target()->IsVariableProxy() &&
@@ -2812,8 +2929,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
break;
}
case NAMED_PROPERTY: {
- FeedbackSlot slot = GetCachedLoadICSlot(property->obj(), name);
- builder()->LoadNamedProperty(object, name, feedback_index(slot));
+ BuildLoadNamedProperty(property, object, name);
break;
}
case KEYED_PROPERTY: {
@@ -2863,17 +2979,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
break;
}
case NAMED_PROPERTY: {
- FeedbackSlot slot = GetCachedStoreICSlot(property->obj(), name);
- Register value;
- if (!execution_result()->IsEffect()) {
- value = register_allocator()->NewRegister();
- builder()->StoreAccumulatorInRegister(value);
- }
- builder()->StoreNamedProperty(object, name, feedback_index(slot),
- language_mode());
- if (!execution_result()->IsEffect()) {
- builder()->LoadAccumulatorWithRegister(value);
- }
+ BuildStoreNamedProperty(property, object, name);
break;
}
case KEYED_PROPERTY: {
@@ -3339,11 +3445,9 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
UNREACHABLE();
case NAMED_PROPERTY: {
builder()->SetExpressionPosition(property);
- builder()->LoadNamedProperty(
- obj, property->key()->AsLiteral()->AsRawPropertyName(),
- feedback_index(GetCachedLoadICSlot(
- property->obj(),
- property->key()->AsLiteral()->AsRawPropertyName())));
+ const AstRawString* name =
+ property->key()->AsLiteral()->AsRawPropertyName();
+ BuildLoadNamedProperty(property, obj, name);
break;
}
case KEYED_PROPERTY: {
@@ -4726,11 +4830,7 @@ void BytecodeGenerator::VisitArgumentsObject(Variable* variable) {
// Allocate and initialize a new arguments object and assign to the
// {arguments} variable.
- CreateArgumentsType type =
- is_strict(language_mode()) || !info()->has_simple_parameters()
- ? CreateArgumentsType::kUnmappedArguments
- : CreateArgumentsType::kMappedArguments;
- builder()->CreateArguments(type);
+ builder()->CreateArguments(closure_scope()->GetArgumentsType());
BuildVariableAssignment(variable, Token::ASSIGN, HoleCheckMode::kElided);
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index c9cee39bb9..47f1f83e12 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -120,6 +120,11 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitPropertyLoadForRegister(Register obj, Property* expr,
Register destination);
+ void BuildLoadNamedProperty(Property* property, Register object,
+ const AstRawString* name);
+ void BuildStoreNamedProperty(Property* property, Register object,
+ const AstRawString* name);
+
void BuildVariableLoad(Variable* variable, HoleCheckMode hole_check_mode,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
void BuildVariableLoadForAccumulatorValue(
@@ -182,6 +187,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
ZonePtrList<Expression>* elements,
bool skip_constants);
+ void BuildCreateObjectLiteral(Register literal, uint8_t flags, size_t entry);
void AllocateTopLevelRegisters();
void VisitArgumentsObject(Variable* variable);
void VisitRestArgumentsArray(Variable* rest);
@@ -277,6 +283,11 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void AddToEagerLiteralsIfEager(FunctionLiteral* literal);
+ // Checks if the visited expression is one shot, i.e executed only once. Any
+ // expression either in a top level code or an IIFE that is not within a loop
+ // is eligible for one shot optimizations.
+ inline bool ShouldOptimizeAsOneShot() const;
+
static constexpr ToBooleanMode ToBooleanModeFromTypeHint(TypeHint type_hint) {
return type_hint == TypeHint::kBoolean ? ToBooleanMode::kAlreadyBoolean
: ToBooleanMode::kConvertToBoolean;
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index b1ae88c1ba..0e543877f7 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -251,6 +251,8 @@ namespace interpreter {
V(CreateObjectLiteral, AccumulatorUse::kNone, OperandType::kIdx, \
OperandType::kIdx, OperandType::kFlag8, OperandType::kRegOut) \
V(CreateEmptyObjectLiteral, AccumulatorUse::kWrite) \
+ V(CloneObject, AccumulatorUse::kWrite, OperandType::kReg, \
+ OperandType::kFlag8, OperandType::kIdx) \
\
/* Tagged templates */ \
V(GetTemplateObject, AccumulatorUse::kWrite, OperandType::kIdx, \
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index 8f558c4a90..3f3d38ce6e 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -25,6 +25,7 @@ namespace interpreter {
V(AsyncIteratorSymbol, async_iterator_symbol) \
V(ClassFieldsSymbol, class_fields_symbol) \
V(EmptyObjectBoilerplateDescription, empty_object_boilerplate_description) \
+ V(EmptyArrayBoilerplateDescription, empty_array_boilerplate_description) \
V(EmptyFixedArray, empty_fixed_array) \
V(HomeObjectSymbol, home_object_symbol) \
V(IteratorSymbol, iterator_symbol) \
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 6ea4ba628c..15e2b1f091 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -647,8 +647,8 @@ Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
}
Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
- Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
- BytecodeArray::kConstantPoolOffset);
+ TNode<FixedArray> constant_pool = CAST(LoadObjectField(
+ BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset));
return LoadFixedArrayElement(constant_pool, UncheckedCast<IntPtrT>(index),
LoadSensitivity::kCritical);
}
@@ -1599,16 +1599,18 @@ void InterpreterAssembler::AbortIfRegisterCountInvalid(
}
Node* InterpreterAssembler::ExportParametersAndRegisterFile(
- Node* array, const RegListNodePair& registers,
- Node* formal_parameter_count) {
+ TNode<FixedArray> array, const RegListNodePair& registers,
+ TNode<Int32T> formal_parameter_count) {
// Store the formal parameters (without receiver) followed by the
// registers into the generator's internal parameters_and_registers field.
- formal_parameter_count = ChangeInt32ToIntPtr(formal_parameter_count);
+ TNode<IntPtrT> formal_parameter_count_intptr =
+ ChangeInt32ToIntPtr(formal_parameter_count);
Node* register_count = ChangeUint32ToWord(registers.reg_count());
if (FLAG_debug_code) {
CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
RegisterLocation(Register(0))));
- AbortIfRegisterCountInvalid(array, formal_parameter_count, register_count);
+ AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
+ register_count);
}
{
@@ -1620,13 +1622,14 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile(
Node* reg_base = IntPtrAdd(
IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() - 1),
- formal_parameter_count);
+ formal_parameter_count_intptr);
Goto(&loop);
BIND(&loop);
{
Node* index = var_index.value();
- GotoIfNot(UintPtrLessThan(index, formal_parameter_count), &done_loop);
+ GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr),
+ &done_loop);
Node* reg_index = IntPtrSub(reg_base, index);
Node* value = LoadRegister(reg_index);
@@ -1657,7 +1660,7 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile(
IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
Node* value = LoadRegister(reg_index);
- Node* array_index = IntPtrAdd(formal_parameter_count, index);
+ Node* array_index = IntPtrAdd(formal_parameter_count_intptr, index);
StoreFixedArrayElement(array, array_index, value);
var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
@@ -1669,19 +1672,20 @@ Node* InterpreterAssembler::ExportParametersAndRegisterFile(
return array;
}
-Node* InterpreterAssembler::ImportRegisterFile(Node* array,
- const RegListNodePair& registers,
- Node* formal_parameter_count) {
- formal_parameter_count = ChangeInt32ToIntPtr(formal_parameter_count);
- Node* register_count = ChangeUint32ToWord(registers.reg_count());
+Node* InterpreterAssembler::ImportRegisterFile(
+ TNode<FixedArray> array, const RegListNodePair& registers,
+ TNode<Int32T> formal_parameter_count) {
+ TNode<IntPtrT> formal_parameter_count_intptr =
+ ChangeInt32ToIntPtr(formal_parameter_count);
+ TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
if (FLAG_debug_code) {
CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
RegisterLocation(Register(0))));
- AbortIfRegisterCountInvalid(array, formal_parameter_count, register_count);
+ AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
+ register_count);
}
- Variable var_index(this, MachineType::PointerRepresentation());
- var_index.Bind(IntPtrConstant(0));
+ TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
// Iterate over array and write values into register file. Also erase the
// array contents to not keep them alive artificially.
@@ -1689,19 +1693,21 @@ Node* InterpreterAssembler::ImportRegisterFile(Node* array,
Goto(&loop);
BIND(&loop);
{
- Node* index = var_index.value();
+ TNode<IntPtrT> index = var_index.value();
GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
- Node* array_index = IntPtrAdd(formal_parameter_count, index);
- Node* value = LoadFixedArrayElement(array, array_index);
+ TNode<IntPtrT> array_index =
+ IntPtrAdd(formal_parameter_count_intptr, index);
+ TNode<Object> value = LoadFixedArrayElement(array, array_index);
- Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
+ TNode<IntPtrT> reg_index =
+ IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
StoreRegister(value, reg_index);
StoreFixedArrayElement(array, array_index,
LoadRoot(Heap::kStaleRegisterRootIndex));
- var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
+ var_index = IntPtrAdd(index, IntPtrConstant(1));
Goto(&loop);
}
BIND(&done_loop);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 641d553fd2..036e920837 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -103,11 +103,11 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// - Resume copies only the registers from the generator, the arguments
// are copied by the ResumeGenerator trampoline.
compiler::Node* ExportParametersAndRegisterFile(
- compiler::Node* array, const RegListNodePair& registers,
- compiler::Node* formal_parameter_count);
- compiler::Node* ImportRegisterFile(compiler::Node* array,
+ TNode<FixedArray> array, const RegListNodePair& registers,
+ TNode<Int32T> formal_parameter_count);
+ compiler::Node* ImportRegisterFile(TNode<FixedArray> array,
const RegListNodePair& registers,
- compiler::Node* formal_parameter_count);
+ TNode<Int32T> formal_parameter_count);
// Loads from and stores to the interpreter register file.
compiler::Node* LoadRegister(Register reg);
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 74ebaabcc7..afca2a8a32 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -19,6 +19,7 @@
#include "src/interpreter/interpreter-assembler.h"
#include "src/interpreter/interpreter-intrinsics-generator.h"
#include "src/objects-inl.h"
+#include "src/objects/js-generator.h"
#include "src/objects/module.h"
namespace v8 {
@@ -680,8 +681,8 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
BIND(&if_export);
{
- Node* regular_exports =
- LoadObjectField(module, Module::kRegularExportsOffset);
+ TNode<FixedArray> regular_exports =
+ CAST(LoadObjectField(module, Module::kRegularExportsOffset));
// The actual array index is (cell_index - 1).
Node* export_index = IntPtrSub(cell_index, IntPtrConstant(1));
Node* cell = LoadFixedArrayElement(regular_exports, export_index);
@@ -691,8 +692,8 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
BIND(&if_import);
{
- Node* regular_imports =
- LoadObjectField(module, Module::kRegularImportsOffset);
+ TNode<FixedArray> regular_imports =
+ CAST(LoadObjectField(module, Module::kRegularImportsOffset));
// The actual array index is (-cell_index - 1).
Node* import_index = IntPtrSub(IntPtrConstant(-1), cell_index);
Node* cell = LoadFixedArrayElement(regular_imports, import_index);
@@ -722,8 +723,8 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
BIND(&if_export);
{
- Node* regular_exports =
- LoadObjectField(module, Module::kRegularExportsOffset);
+ TNode<FixedArray> regular_exports =
+ CAST(LoadObjectField(module, Module::kRegularExportsOffset));
// The actual array index is (cell_index - 1).
Node* export_index = IntPtrSub(cell_index, IntPtrConstant(1));
Node* cell = LoadFixedArrayElement(regular_exports, export_index);
@@ -1069,7 +1070,7 @@ IGNITION_HANDLER(BitwiseNot, InterpreterAssembler) {
// Number case.
BIND(&if_number);
TNode<Number> result =
- ChangeInt32ToTagged(Signed(Word32Not(var_word32.value())));
+ ChangeInt32ToTagged(Signed(Word32BitwiseNot(var_word32.value())));
TNode<Smi> result_type = SelectSmiConstant(
TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
BinaryOperationFeedback::kNumber);
@@ -1814,7 +1815,7 @@ IGNITION_HANDLER(TestIn, InterpreterAssembler) {
Node* object = GetAccumulator();
Node* context = GetContext();
- SetAccumulator(HasProperty(object, property, context, kHasProperty));
+ SetAccumulator(HasProperty(context, object, property, kHasProperty));
Dispatch();
}
@@ -2436,6 +2437,26 @@ IGNITION_HANDLER(CreateEmptyObjectLiteral, InterpreterAssembler) {
Dispatch();
}
+// CloneObject <source_idx> <flags> <feedback_slot>
+//
+// Allocates a new JSObject with each enumerable own property copied from
+// {source}, converting getters into data properties.
+IGNITION_HANDLER(CloneObject, InterpreterAssembler) {
+ Node* source = LoadRegisterAtOperandIndex(0);
+ Node* bytecode_flags = BytecodeOperandFlag(1);
+ Node* raw_flags =
+ DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(bytecode_flags);
+ Node* smi_flags = SmiTag(raw_flags);
+ Node* raw_slot = BytecodeOperandIdx(2);
+ Node* smi_slot = SmiTag(raw_slot);
+ Node* feedback_vector = LoadFeedbackVector();
+ Node* context = GetContext();
+ Node* result = CallBuiltin(Builtins::kCloneObjectIC, context, source,
+ smi_flags, smi_slot, feedback_vector);
+ SetAccumulator(result);
+ Dispatch();
+}
+
// GetTemplateObject <descriptor_idx> <literal_idx>
//
// Creates the template to pass for tagged templates and returns it in the
@@ -2911,7 +2932,7 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
Node* feedback_vector = LoadFeedbackVector();
// Load the next key from the enumeration array.
- Node* key = LoadFixedArrayElement(cache_array, index, 0,
+ Node* key = LoadFixedArrayElement(CAST(cache_array), index, 0,
CodeStubAssembler::SMI_PARAMETERS);
// Check if we can use the for-in fast path potentially using the enum cache.
@@ -3004,8 +3025,8 @@ IGNITION_HANDLER(Illegal, InterpreterAssembler) {
// in the accumulator.
IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
Node* generator = LoadRegisterAtOperandIndex(0);
- Node* array = LoadObjectField(
- generator, JSGeneratorObject::kParametersAndRegistersOffset);
+ TNode<FixedArray> array = CAST(LoadObjectField(
+ generator, JSGeneratorObject::kParametersAndRegistersOffset));
Node* closure = LoadRegister(Register::function_closure());
Node* context = GetContext();
RegListNodePair registers = GetRegisterListAtOperandIndex(1);
@@ -3013,9 +3034,9 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
Node* shared =
LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
- Node* formal_parameter_count =
+ TNode<Int32T> formal_parameter_count = UncheckedCast<Int32T>(
LoadObjectField(shared, SharedFunctionInfo::kFormalParameterCountOffset,
- MachineType::Uint16());
+ MachineType::Uint16()));
ExportParametersAndRegisterFile(array, registers, formal_parameter_count);
StoreObjectField(generator, JSGeneratorObject::kContextOffset, context);
@@ -3087,13 +3108,13 @@ IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
Node* shared =
LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
- Node* formal_parameter_count =
+ TNode<Int32T> formal_parameter_count = UncheckedCast<Int32T>(
LoadObjectField(shared, SharedFunctionInfo::kFormalParameterCountOffset,
- MachineType::Uint16());
+ MachineType::Uint16()));
ImportRegisterFile(
- LoadObjectField(generator,
- JSGeneratorObject::kParametersAndRegistersOffset),
+ CAST(LoadObjectField(generator,
+ JSGeneratorObject::kParametersAndRegistersOffset)),
registers, formal_parameter_count);
// Return the generator's input_or_debug_pos in the accumulator.
@@ -3106,14 +3127,16 @@ IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
} // namespace
Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
- OperandScale operand_scale) {
+ OperandScale operand_scale,
+ int builtin_index) {
Zone zone(isolate->allocator(), ZONE_NAME);
compiler::CodeAssemblerState state(
isolate, &zone, InterpreterDispatchDescriptor{}, Code::BYTECODE_HANDLER,
Bytecodes::ToString(bytecode),
FLAG_untrusted_code_mitigations
? PoisoningMitigationLevel::kPoisonCriticalOnly
- : PoisoningMitigationLevel::kDontPoison);
+ : PoisoningMitigationLevel::kDontPoison,
+ 0, builtin_index);
switch (bytecode) {
#define CALL_GENERATOR(Name, ...) \
diff --git a/deps/v8/src/interpreter/interpreter-generator.h b/deps/v8/src/interpreter/interpreter-generator.h
index 3dbdcf829d..bc3793a45f 100644
--- a/deps/v8/src/interpreter/interpreter-generator.h
+++ b/deps/v8/src/interpreter/interpreter-generator.h
@@ -13,7 +13,8 @@ namespace internal {
namespace interpreter {
extern Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
- OperandScale operand_scale);
+ OperandScale operand_scale,
+ int builtin_index);
extern Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
OperandScale operand_scale);
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index 62785d7904..55e554e2e0 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -13,6 +13,7 @@
#include "src/interpreter/interpreter-assembler.h"
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects-inl.h"
+#include "src/objects/js-generator.h"
#include "src/objects/module.h"
namespace v8 {
@@ -170,18 +171,6 @@ Node* IntrinsicsGenerator::IsTypedArray(
return IsInstanceType(input, JS_TYPED_ARRAY_TYPE);
}
-Node* IntrinsicsGenerator::IsJSWeakMap(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- Node* input = __ LoadRegisterFromRegisterList(args, 0);
- return IsInstanceType(input, JS_WEAK_MAP_TYPE);
-}
-
-Node* IntrinsicsGenerator::IsJSWeakSet(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- Node* input = __ LoadRegisterFromRegisterList(args, 0);
- return IsInstanceType(input, JS_WEAK_SET_TYPE);
-}
-
Node* IntrinsicsGenerator::IsSmi(
const InterpreterAssembler::RegListNodePair& args, Node* context) {
Node* input = __ LoadRegisterFromRegisterList(args, 0);
@@ -223,6 +212,12 @@ Node* IntrinsicsGenerator::HasProperty(
args, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty));
}
+Node* IntrinsicsGenerator::GetProperty(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsStubCall(
+ args, context, Builtins::CallableFor(isolate(), Builtins::kGetProperty));
+}
+
Node* IntrinsicsGenerator::RejectPromise(
const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index 570caca072..04f662f0df 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -26,11 +26,10 @@ namespace interpreter {
V(CreateIterResultObject, create_iter_result_object, 2) \
V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1) \
V(HasProperty, has_property, 2) \
+ V(GetProperty, get_property, 2) \
V(IsArray, is_array, 1) \
V(IsJSProxy, is_js_proxy, 1) \
V(IsJSReceiver, is_js_receiver, 1) \
- V(IsJSWeakMap, is_js_weak_map, 1) \
- V(IsJSWeakSet, is_js_weak_set, 1) \
V(IsSmi, is_smi, 1) \
V(IsTypedArray, is_typed_array, 1) \
V(RejectPromise, reject_promise, 3) \
diff --git a/deps/v8/src/interpreter/setup-interpreter-internal.cc b/deps/v8/src/interpreter/setup-interpreter-internal.cc
index 2e0a7d8ca5..8f2b565c00 100644
--- a/deps/v8/src/interpreter/setup-interpreter-internal.cc
+++ b/deps/v8/src/interpreter/setup-interpreter-internal.cc
@@ -80,7 +80,11 @@ void SetupInterpreter::InstallBytecodeHandler(Isolate* isolate,
if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
size_t index = Interpreter::GetDispatchTableIndex(bytecode, operand_scale);
- Handle<Code> code = GenerateBytecodeHandler(isolate, bytecode, operand_scale);
+ // Here we explicitly set the bytecode handler to not be a builtin with an
+ // index of kNoBuiltinId.
+ // TODO(delphick): Use builtins version instead.
+ Handle<Code> code = GenerateBytecodeHandler(isolate, bytecode, operand_scale,
+ Builtins::kNoBuiltinId);
dispatch_table[index] = code->entry();
if (FLAG_print_builtin_size) PrintBuiltinSize(bytecode, operand_scale, code);
diff --git a/deps/v8/src/intl.cc b/deps/v8/src/intl.cc
index a4d3262d74..c8548b6d48 100644
--- a/deps/v8/src/intl.cc
+++ b/deps/v8/src/intl.cc
@@ -155,10 +155,8 @@ const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
}
}
-V8_WARN_UNUSED_RESULT Object* LocaleConvertCase(Handle<String> s,
- Isolate* isolate,
- bool is_to_upper,
- const char* lang) {
+MaybeHandle<String> LocaleConvertCase(Handle<String> s, Isolate* isolate,
+ bool is_to_upper, const char* lang) {
auto case_converter = is_to_upper ? u_strToUpper : u_strToLower;
int32_t src_length = s->length();
int32_t dest_length = src_length;
@@ -166,15 +164,16 @@ V8_WARN_UNUSED_RESULT Object* LocaleConvertCase(Handle<String> s,
Handle<SeqTwoByteString> result;
std::unique_ptr<uc16[]> sap;
- if (dest_length == 0) return ReadOnlyRoots(isolate).empty_string();
+ if (dest_length == 0) return ReadOnlyRoots(isolate).empty_string_handle();
// This is not a real loop. It'll be executed only once (no overflow) or
// twice (overflow).
for (int i = 0; i < 2; ++i) {
// Case conversion can increase the string length (e.g. sharp-S => SS) so
// that we have to handle RangeError exceptions here.
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, isolate->factory()->NewRawTwoByteString(dest_length));
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawTwoByteString(dest_length),
+ String);
DisallowHeapAllocation no_gc;
DCHECK(s->IsFlat());
String::FlatContent flat = s->GetFlatContent();
@@ -192,22 +191,17 @@ V8_WARN_UNUSED_RESULT Object* LocaleConvertCase(Handle<String> s,
DCHECK(U_SUCCESS(status));
if (V8_LIKELY(status == U_STRING_NOT_TERMINATED_WARNING)) {
DCHECK(dest_length == result->length());
- return *result;
+ return result;
}
- if (U_SUCCESS(status)) {
- DCHECK(dest_length < result->length());
- return *Handle<SeqTwoByteString>::cast(
- SeqString::Truncate(result, dest_length));
- }
- return *s;
+ DCHECK(dest_length < result->length());
+ return SeqString::Truncate(result, dest_length);
}
// A stripped-down version of ConvertToLower that can only handle flat one-byte
// strings and does not allocate. Note that {src} could still be, e.g., a
// one-byte sliced string with a two-byte parent string.
// Called from TF builtins.
-V8_WARN_UNUSED_RESULT Object* ConvertOneByteToLower(String* src, String* dst,
- Isolate* isolate) {
+V8_WARN_UNUSED_RESULT String* ConvertOneByteToLower(String* src, String* dst) {
DCHECK_EQ(src->length(), dst->length());
DCHECK(src->HasOnlyOneByteChars());
DCHECK(src->IsFlat());
@@ -252,8 +246,7 @@ V8_WARN_UNUSED_RESULT Object* ConvertOneByteToLower(String* src, String* dst,
return dst;
}
-V8_WARN_UNUSED_RESULT Object* ConvertToLower(Handle<String> s,
- Isolate* isolate) {
+MaybeHandle<String> ConvertToLower(Handle<String> s, Isolate* isolate) {
if (!s->HasOnlyOneByteChars()) {
// Use a slower implementation for strings with characters beyond U+00FF.
return LocaleConvertCase(s, isolate, false, "");
@@ -275,17 +268,16 @@ V8_WARN_UNUSED_RESULT Object* ConvertToLower(Handle<String> s,
bool is_short = length < static_cast<int>(sizeof(uintptr_t));
if (is_short) {
bool is_lower_ascii = FindFirstUpperOrNonAscii(*s, length) == length;
- if (is_lower_ascii) return *s;
+ if (is_lower_ascii) return s;
}
Handle<SeqOneByteString> result =
isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
- return ConvertOneByteToLower(*s, *result, isolate);
+ return Handle<String>(ConvertOneByteToLower(*s, *result), isolate);
}
-V8_WARN_UNUSED_RESULT Object* ConvertToUpper(Handle<String> s,
- Isolate* isolate) {
+MaybeHandle<String> ConvertToUpper(Handle<String> s, Isolate* isolate) {
int32_t length = s->length();
if (s->HasOnlyOneByteChars() && length > 0) {
Handle<SeqOneByteString> result =
@@ -305,8 +297,9 @@ V8_WARN_UNUSED_RESULT Object* ConvertToUpper(Handle<String> s,
FastAsciiConvert<false>(reinterpret_cast<char*>(result->GetChars()),
reinterpret_cast<const char*>(src.start()),
length, &has_changed_character);
- if (index_to_first_unprocessed == length)
- return has_changed_character ? *result : *s;
+ if (index_to_first_unprocessed == length) {
+ return has_changed_character ? result : s;
+ }
// If not ASCII, we keep the result up to index_to_first_unprocessed and
// process the rest.
is_result_single_byte =
@@ -315,7 +308,7 @@ V8_WARN_UNUSED_RESULT Object* ConvertToUpper(Handle<String> s,
} else {
DCHECK(flat.IsTwoByte());
Vector<const uint16_t> src = flat.ToUC16Vector();
- if (ToUpperFastASCII(src, result)) return *result;
+ if (ToUpperFastASCII(src, result)) return result;
is_result_single_byte = ToUpperOneByte(src, dest, &sharp_s_count);
}
}
@@ -326,13 +319,14 @@ V8_WARN_UNUSED_RESULT Object* ConvertToUpper(Handle<String> s,
return LocaleConvertCase(s, isolate, true, "");
}
- if (sharp_s_count == 0) return *result;
+ if (sharp_s_count == 0) return result;
// We have sharp_s_count sharp-s characters, but the result is still
// in the Latin-1 range.
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ ASSIGN_RETURN_ON_EXCEPTION(
isolate, result,
- isolate->factory()->NewRawOneByteString(length + sharp_s_count));
+ isolate->factory()->NewRawOneByteString(length + sharp_s_count),
+ String);
DisallowHeapAllocation no_gc;
String::FlatContent flat = s->GetFlatContent();
if (flat.IsOneByte()) {
@@ -341,14 +335,14 @@ V8_WARN_UNUSED_RESULT Object* ConvertToUpper(Handle<String> s,
ToUpperWithSharpS(flat.ToUC16Vector(), result);
}
- return *result;
+ return result;
}
return LocaleConvertCase(s, isolate, true, "");
}
-V8_WARN_UNUSED_RESULT Object* ConvertCase(Handle<String> s, bool is_upper,
- Isolate* isolate) {
+MaybeHandle<String> ConvertCase(Handle<String> s, bool is_upper,
+ Isolate* isolate) {
return is_upper ? ConvertToUpper(s, isolate) : ConvertToLower(s, isolate);
}
diff --git a/deps/v8/src/intl.h b/deps/v8/src/intl.h
index e250e78fb0..5ec5381f40 100644
--- a/deps/v8/src/intl.h
+++ b/deps/v8/src/intl.h
@@ -30,25 +30,21 @@ enum class IcuService {
kNumberFormat,
kPluralRules,
kResourceBundle,
- kRelativeDateTimeFormatter
+ kRelativeDateTimeFormatter,
+ kListFormatter
};
const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
std::unique_ptr<uc16[]>* dest,
int32_t length);
-V8_WARN_UNUSED_RESULT Object* LocaleConvertCase(Handle<String> s,
- Isolate* isolate,
- bool is_to_upper,
- const char* lang);
-V8_WARN_UNUSED_RESULT Object* ConvertToLower(Handle<String> s,
- Isolate* isolate);
-V8_WARN_UNUSED_RESULT Object* ConvertToUpper(Handle<String> s,
- Isolate* isolate);
-V8_WARN_UNUSED_RESULT Object* ConvertCase(Handle<String> s, bool is_upper,
- Isolate* isolate);
-
-V8_WARN_UNUSED_RESULT Object* ConvertOneByteToLower(String* src, String* dst,
- Isolate* isolate);
+MaybeHandle<String> LocaleConvertCase(Handle<String> s, Isolate* isolate,
+ bool is_to_upper, const char* lang);
+MaybeHandle<String> ConvertToLower(Handle<String> s, Isolate* isolate);
+MaybeHandle<String> ConvertToUpper(Handle<String> s, Isolate* isolate);
+MaybeHandle<String> ConvertCase(Handle<String> s, bool is_upper,
+ Isolate* isolate);
+
+V8_WARN_UNUSED_RESULT String* ConvertOneByteToLower(String* src, String* dst);
const uint8_t* ToLatin1LowerTable();
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index 7a43f1367f..017032c320 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -11,17 +11,28 @@
namespace v8 {
namespace internal {
+bool Isolate::FromWritableHeapObject(HeapObject* obj, Isolate** isolate) {
+ i::MemoryChunk* chunk = i::MemoryChunk::FromHeapObject(obj);
+ if (chunk->owner()->identity() == i::RO_SPACE) {
+ *isolate = nullptr;
+ return false;
+ }
+ *isolate = chunk->heap()->isolate();
+ return true;
+}
void Isolate::set_context(Context* context) {
DCHECK(context == nullptr || context->IsContext());
thread_local_top_.context_ = context;
}
-Handle<Context> Isolate::native_context() {
+Handle<NativeContext> Isolate::native_context() {
return handle(context()->native_context(), this);
}
-Context* Isolate::raw_native_context() { return context()->native_context(); }
+NativeContext* Isolate::raw_native_context() {
+ return context()->native_context();
+}
Object* Isolate::pending_exception() {
DCHECK(has_pending_exception());
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index b33b713672..89fecce804 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -11,7 +11,7 @@
#include <sstream>
#include <unordered_map>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/assembler-inl.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/context-slot-cache.h"
@@ -20,7 +20,6 @@
#include "src/base/platform/platform.h"
#include "src/base/sys-info.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/basic-block-profiler.h"
#include "src/bootstrapper.h"
#include "src/builtins/constants-table-builder.h"
#include "src/cancelable-task.h"
@@ -42,6 +41,7 @@
#include "src/messages.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
#include "src/profiler/tracing-cpu-profiler.h"
@@ -156,9 +156,10 @@ void ThreadLocalTop::Initialize(Isolate* isolate) {
simulator_ = Simulator::current(isolate);
#endif
thread_id_ = ThreadId::Current();
+ thread_in_wasm_flag_address_ = reinterpret_cast<Address>(
+ trap_handler::GetThreadInWasmThreadLocalAddress());
}
-
void ThreadLocalTop::Free() {
wasm_caught_exception_ = nullptr;
// Match unmatched PopPromise calls.
@@ -1124,6 +1125,8 @@ void ReportBootstrappingException(Handle<Object> exception,
}
bool Isolate::is_catchable_by_wasm(Object* exception) {
+ // TODO(titzer): thread WASM features here, or just remove this check?
+ if (!FLAG_experimental_wasm_eh) return false;
if (!is_catchable_by_javascript(exception) || !exception->IsJSError())
return false;
HandleScope scope(this);
@@ -1307,7 +1310,7 @@ Object* Isolate::UnwindAndFindHandler() {
trap_handler::ClearThreadInWasm();
}
- if (!FLAG_experimental_wasm_eh || !is_catchable_by_wasm(exception)) {
+ if (!is_catchable_by_wasm(exception)) {
break;
}
int stack_slots = 0; // Will contain stack slot count of frame.
@@ -1735,18 +1738,15 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
Handle<WasmInstanceObject> instance(elements->WasmInstance(i), this);
uint32_t func_index =
static_cast<uint32_t>(elements->WasmFunctionIndex(i)->value());
+ wasm::WasmCode* wasm_code = reinterpret_cast<wasm::WasmCode*>(
+ elements->WasmCodeObject(i)->foreign_address());
int code_offset = elements->Offset(i)->value();
-
- // TODO(titzer): store a reference to the code object in FrameArray;
- // a second lookup here could lead to inconsistency.
- int byte_offset =
- FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
- instance->module_object()->native_module()->code(func_index),
- code_offset);
-
bool is_at_number_conversion =
elements->IsAsmJsWasmFrame(i) &&
elements->Flags(i)->value() & FrameArray::kAsmJsAtNumberConversion;
+ int byte_offset =
+ FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
+ wasm_code, code_offset);
int pos = WasmModuleObject::GetSourcePosition(
handle(instance->module_object(), this), func_index, byte_offset,
is_at_number_conversion);
@@ -2216,12 +2216,12 @@ void Isolate::SetAbortOnUncaughtExceptionCallback(
abort_on_uncaught_exception_callback_ = callback;
}
-Handle<Context> Isolate::GetCallingNativeContext() {
- JavaScriptFrameIterator it(this);
- if (it.done()) return Handle<Context>::null();
- JavaScriptFrame* frame = it.frame();
- Context* context = Context::cast(frame->context());
- return Handle<Context>(context->native_context(), this);
+bool Isolate::AreWasmThreadsEnabled(Handle<Context> context) {
+ if (wasm_threads_enabled_callback()) {
+ v8::Local<v8::Context> api_context = v8::Utils::ToLocal(context);
+ return wasm_threads_enabled_callback()(api_context);
+ }
+ return FLAG_experimental_wasm_threads;
}
Handle<Context> Isolate::GetIncumbentContext() {
@@ -2486,6 +2486,7 @@ Isolate::Isolate()
language_singleton_regexp_matcher_(nullptr),
language_tag_regexp_matcher_(nullptr),
language_variant_regexp_matcher_(nullptr),
+ default_locale_(""),
#endif // V8_INTL_SUPPORT
serializer_enabled_(false),
has_fatal_error_(false),
@@ -2506,7 +2507,6 @@ Isolate::Isolate()
#endif
is_running_microtasks_(false),
use_counter_callback_(nullptr),
- basic_block_profiler_(nullptr),
cancelable_task_manager_(new CancelableTaskManager()),
abort_on_uncaught_exception_callback_(nullptr),
total_regexp_code_generated_(0) {
@@ -2608,14 +2608,14 @@ void Isolate::Deinit() {
debug()->Unload();
- wasm_engine()->TearDown();
-
if (concurrent_recompilation_enabled()) {
optimizing_compile_dispatcher_->Stop();
delete optimizing_compile_dispatcher_;
optimizing_compile_dispatcher_ = nullptr;
}
+ wasm_engine()->DeleteCompileJobsOnIsolate(this);
+
heap_.mark_compact_collector()->EnsureSweepingCompleted();
heap_.memory_allocator()->unmapper()->EnsureUnmappingCompleted();
@@ -2630,6 +2630,7 @@ void Isolate::Deinit() {
if (sampler && sampler->IsActive()) sampler->Stop();
FreeThreadResources();
+ logger_->StopProfilerThread();
// We start with the heap tear down so that releasing managed objects does
// not cause a GC.
@@ -2647,9 +2648,6 @@ void Isolate::Deinit() {
runtime_profiler_ = nullptr;
}
- delete basic_block_profiler_;
- basic_block_profiler_ = nullptr;
-
delete heap_profiler_;
heap_profiler_ = nullptr;
@@ -2657,12 +2655,14 @@ void Isolate::Deinit() {
delete compiler_dispatcher_;
compiler_dispatcher_ = nullptr;
- // This stops cancelable tasks (i.e. concurrent masking tasks)
+ // This stops cancelable tasks (i.e. concurrent marking tasks)
cancelable_task_manager()->CancelAndWait();
heap_.TearDown();
logger_->TearDown();
+ wasm_engine_.reset();
+
if (FLAG_embedded_builtins) {
if (DefaultEmbeddedBlob() == nullptr && embedded_blob() != nullptr) {
// We own the embedded blob. Free it.
@@ -2876,6 +2876,19 @@ void CreateOffHeapTrampolines(Isolate* isolate) {
}
}
}
+
+void PrintEmbeddedBuiltinCandidates(Isolate* isolate) {
+ CHECK(FLAG_print_embedded_builtin_candidates);
+ bool found_a_candidate = false;
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ if (Builtins::IsIsolateIndependent(i)) continue;
+ Code* builtin = isolate->heap()->builtin(i);
+ if (!builtin->IsIsolateIndependent(isolate)) continue;
+ if (!found_a_candidate) PrintF("Found embedded builtin candidates:\n");
+ found_a_candidate = true;
+ PrintF(" %s\n", Builtins::name(i));
+ }
+}
} // namespace
void Isolate::PrepareEmbeddedBlobForSerialization() {
@@ -2963,17 +2976,10 @@ bool Isolate::Init(StartupDeserializer* des) {
DCHECK(!heap_.HasBeenSetUp());
heap_.SetUp();
- // Setup the wasm engine. Currently, there's one per Isolate by default.
+ // Setup the wasm engine.
if (wasm_engine_ == nullptr) {
- wasm_engine_.reset(
- new wasm::WasmEngine(std::unique_ptr<wasm::WasmCodeManager>(
- new wasm::WasmCodeManager(kMaxWasmCodeMemory))));
- wasm_engine_->memory_tracker()->SetAllocationResultHistogram(
- counters()->wasm_memory_allocation_result());
- wasm_engine_->memory_tracker()->SetAddressSpaceUsageHistogram(
- counters()->wasm_address_space_usage_mb());
- wasm_engine_->code_manager()->SetModuleCodeSizeHistogram(
- counters()->wasm_module_code_size_mb());
+ wasm_engine_ = wasm::WasmEngine::GetWasmEngine();
+ wasm::WasmCodeManager::InstallSamplingGCCallback(this);
}
deoptimizer_data_ = new DeoptimizerData(heap());
@@ -3018,7 +3024,7 @@ bool Isolate::Init(StartupDeserializer* des) {
set_event_logger(Logger::DefaultEventLoggerSentinel);
}
- if (FLAG_trace_turbo || FLAG_trace_turbo_graph) {
+ if (FLAG_trace_turbo || FLAG_trace_turbo_graph || FLAG_turbo_profiling) {
PrintF("Concurrent recompilation has been disabled for tracing.\n");
} else if (OptimizingCompileDispatcher::Enabled()) {
optimizing_compile_dispatcher_ = new OptimizingCompileDispatcher(this);
@@ -3044,6 +3050,9 @@ bool Isolate::Init(StartupDeserializer* des) {
setup_delegate_ = nullptr;
if (FLAG_print_builtin_size) PrintBuiltinSizes(this);
+ if (FLAG_print_embedded_builtin_candidates) {
+ PrintEmbeddedBuiltinCandidates(this);
+ }
// Finish initialization of ThreadLocal after deserialization is done.
clear_pending_exception();
@@ -3763,7 +3772,8 @@ MaybeHandle<JSPromise> NewRejectedPromise(Isolate* isolate,
MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
Handle<Script> referrer, Handle<Object> specifier) {
- v8::Local<v8::Context> api_context = v8::Utils::ToLocal(native_context());
+ v8::Local<v8::Context> api_context =
+ v8::Utils::ToLocal(Handle<Context>(native_context()));
if (host_import_module_dynamically_callback_ == nullptr) {
Handle<Object> exception =
@@ -3802,7 +3812,8 @@ Handle<JSObject> Isolate::RunHostInitializeImportMetaObjectCallback(
if (host_meta->IsTheHole(this)) {
host_meta = factory()->NewJSObjectWithNullProto();
if (host_initialize_import_meta_object_callback_ != nullptr) {
- v8::Local<v8::Context> api_context = v8::Utils::ToLocal(native_context());
+ v8::Local<v8::Context> api_context =
+ v8::Utils::ToLocal(Handle<Context>(native_context()));
host_initialize_import_meta_object_callback_(
api_context, Utils::ToLocal(module),
v8::Local<v8::Object>::Cast(v8::Utils::ToLocal(host_meta)));
@@ -3995,15 +4006,6 @@ void Isolate::CountUsage(v8::Isolate::UseCounterFeature feature) {
}
}
-
-BasicBlockProfiler* Isolate::GetOrCreateBasicBlockProfiler() {
- if (basic_block_profiler_ == nullptr) {
- basic_block_profiler_ = new BasicBlockProfiler();
- }
- return basic_block_profiler_;
-}
-
-
std::string Isolate::GetTurboCfgFileName() {
if (FLAG_trace_turbo_cfg_file == nullptr) {
std::ostringstream os;
@@ -4018,51 +4020,51 @@ std::string Isolate::GetTurboCfgFileName() {
// (number of GC since the context was detached, the context).
void Isolate::AddDetachedContext(Handle<Context> context) {
HandleScope scope(this);
- Handle<WeakCell> cell = factory()->NewWeakCell(context);
- Handle<FixedArray> detached_contexts =
- factory()->CopyFixedArrayAndGrow(factory()->detached_contexts(), 2);
- int new_length = detached_contexts->length();
- detached_contexts->set(new_length - 2, Smi::kZero);
- detached_contexts->set(new_length - 1, *cell);
+ Handle<WeakArrayList> detached_contexts = factory()->detached_contexts();
+ detached_contexts = WeakArrayList::AddToEnd(
+ this, detached_contexts, MaybeObjectHandle(Smi::kZero, this));
+ detached_contexts = WeakArrayList::AddToEnd(this, detached_contexts,
+ MaybeObjectHandle::Weak(context));
heap()->set_detached_contexts(*detached_contexts);
}
void Isolate::CheckDetachedContextsAfterGC() {
HandleScope scope(this);
- Handle<FixedArray> detached_contexts = factory()->detached_contexts();
+ Handle<WeakArrayList> detached_contexts = factory()->detached_contexts();
int length = detached_contexts->length();
if (length == 0) return;
int new_length = 0;
for (int i = 0; i < length; i += 2) {
- int mark_sweeps = Smi::ToInt(detached_contexts->get(i));
- DCHECK(detached_contexts->get(i + 1)->IsWeakCell());
- WeakCell* cell = WeakCell::cast(detached_contexts->get(i + 1));
- if (!cell->cleared()) {
- detached_contexts->set(new_length, Smi::FromInt(mark_sweeps + 1));
- detached_contexts->set(new_length + 1, cell);
+ int mark_sweeps = Smi::ToInt(detached_contexts->Get(i)->ToSmi());
+ MaybeObject* context = detached_contexts->Get(i + 1);
+ DCHECK(context->IsWeakHeapObject() || context->IsClearedWeakHeapObject());
+ if (!context->IsClearedWeakHeapObject()) {
+ detached_contexts->Set(
+ new_length, MaybeObject::FromSmi(Smi::FromInt(mark_sweeps + 1)));
+ detached_contexts->Set(new_length + 1, context);
new_length += 2;
}
- counters()->detached_context_age_in_gc()->AddSample(mark_sweeps + 1);
}
+ detached_contexts->set_length(new_length);
+ while (new_length < length) {
+ detached_contexts->Set(new_length, MaybeObject::FromSmi(Smi::kZero));
+ ++new_length;
+ }
+
if (FLAG_trace_detached_contexts) {
PrintF("%d detached contexts are collected out of %d\n",
length - new_length, length);
for (int i = 0; i < new_length; i += 2) {
- int mark_sweeps = Smi::ToInt(detached_contexts->get(i));
- DCHECK(detached_contexts->get(i + 1)->IsWeakCell());
- WeakCell* cell = WeakCell::cast(detached_contexts->get(i + 1));
+ int mark_sweeps = Smi::ToInt(detached_contexts->Get(i)->ToSmi());
+ MaybeObject* context = detached_contexts->Get(i + 1);
+ DCHECK(context->IsWeakHeapObject() || context->IsClearedWeakHeapObject());
if (mark_sweeps > 3) {
PrintF("detached context %p\n survived %d GCs (leak?)\n",
- static_cast<void*>(cell->value()), mark_sweeps);
+ static_cast<void*>(context), mark_sweeps);
}
}
}
- if (new_length == 0) {
- heap()->set_detached_contexts(ReadOnlyRoots(heap()).empty_fixed_array());
- } else if (new_length < length) {
- heap()->RightTrimFixedArray(*detached_contexts, length - new_length);
- }
}
double Isolate::LoadStartTimeMs() {
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index d94c0fde14..e199a93ec4 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -61,7 +61,6 @@ class HeapTester;
class AccessCompilerData;
class AddressToIndexHashMap;
class AstStringConstants;
-class BasicBlockProfiler;
class Bootstrapper;
class BuiltinsConstantsTableBuilder;
class CancelableTaskManager;
@@ -168,6 +167,24 @@ class WasmEngine;
} \
} while (false)
+/**
+ * RETURN_RESULT_OR_FAILURE is used in functions with return type Object* (such
+ * as "RUNTIME_FUNCTION(...) {...}" or "BUILTIN(...) {...}" ) to return either
+ * the contents of a MaybeHandle<X>, or the "exception" sentinel value.
+ * Example usage:
+ *
+ * RUNTIME_FUNCTION(Runtime_Func) {
+ * ...
+ * RETURN_RESULT_OR_FAILURE(
+ * isolate,
+ * FunctionWithReturnTypeMaybeHandleX(...));
+ * }
+ *
+ * If inside a function with return type MaybeHandle<X> use RETURN_ON_EXCEPTION
+ * instead.
+ * If inside a function with return type Handle<X>, or Maybe<X> use
+ * RETURN_ON_EXCEPTION_VALUE instead.
+ */
#define RETURN_RESULT_OR_FAILURE(isolate, call) \
do { \
Handle<Object> __result__; \
@@ -217,6 +234,36 @@ class WasmEngine;
return value; \
} while (false)
+/**
+ * RETURN_ON_EXCEPTION_VALUE conditionally returns the given value when the
+ * given MaybeHandle is empty. It is typically used in functions with return
+ * type Maybe<X> or Handle<X>. Example usage:
+ *
+ * Handle<X> Func() {
+ * ...
+ * RETURN_ON_EXCEPTION_VALUE(
+ * isolate,
+ * FunctionWithReturnTypeMaybeHandleX(...),
+ * Handle<X>());
+ * // code to handle non exception
+ * ...
+ * }
+ *
+ * Maybe<bool> Func() {
+ * ..
+ * RETURN_ON_EXCEPTION_VALUE(
+ * isolate,
+ * FunctionWithReturnTypeMaybeHandleX(...),
+ * Nothing<bool>);
+ * // code to handle non exception
+ * return Just(true);
+ * }
+ *
+ * If inside a function with return type MaybeHandle<X>, use RETURN_ON_EXCEPTION
+ * instead.
+ * If inside a function with return type Object*, use
+ * RETURN_FAILURE_ON_EXCEPTION instead.
+ */
#define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \
do { \
if ((call).is_null()) { \
@@ -225,6 +272,26 @@ class WasmEngine;
} \
} while (false)
+/**
+ * RETURN_FAILURE_ON_EXCEPTION conditionally returns the "exception" sentinel if
+ * the given MaybeHandle is empty; so it can only be used in functions with
+ * return type Object*, such as RUNTIME_FUNCTION(...) {...} or BUILTIN(...)
+ * {...}. Example usage:
+ *
+ * RUNTIME_FUNCTION(Runtime_Func) {
+ * ...
+ * RETURN_FAILURE_ON_EXCEPTION(
+ * isolate,
+ * FunctionWithReturnTypeMaybeHandleX(...));
+ * // code to handle non exception
+ * ...
+ * }
+ *
+ * If inside a function with return type MaybeHandle<X>, use RETURN_ON_EXCEPTION
+ * instead.
+ * If inside a function with return type Maybe<X> or Handle<X>, use
+ * RETURN_ON_EXCEPTION_VALUE instead.
+ */
#define RETURN_FAILURE_ON_EXCEPTION(isolate, call) \
do { \
Isolate* __isolate__ = (isolate); \
@@ -232,6 +299,26 @@ class WasmEngine;
ReadOnlyRoots(__isolate__).exception()); \
} while (false);
+/**
+ * RETURN_ON_EXCEPTION conditionally returns an empty MaybeHandle<T> if the
+ * given MaybeHandle is empty. Use it to return immediately from a function with
+ * return type MaybeHandle when an exception was thrown. Example usage:
+ *
+ * MaybeHandle<X> Func() {
+ * ...
+ * RETURN_ON_EXCEPTION(
+ * isolate,
+ * FunctionWithReturnTypeMaybeHandleY(...),
+ * X);
+ * // code to handle non exception
+ * ...
+ * }
+ *
+ * If inside a function with return type Object*, use
+ * RETURN_FAILURE_ON_EXCEPTION instead.
+ * If inside a function with return type
+ * Maybe<X> or Handle<X>, use RETURN_ON_EXCEPTION_VALUE instead.
+ */
#define RETURN_ON_EXCEPTION(isolate, call, T) \
RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
@@ -396,11 +483,13 @@ class ThreadLocalTop BASE_EMBEDDED {
// Call back function to report unsafe JS accesses.
v8::FailedAccessCheckCallback failed_access_check_callback_ = nullptr;
+ // Address of the thread-local "thread in wasm" flag.
+ Address thread_in_wasm_flag_address_ = kNullAddress;
+
private:
v8::TryCatch* try_catch_handler_ = nullptr;
};
-
#ifdef DEBUG
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
@@ -434,6 +523,7 @@ typedef std::vector<HeapObject*> DebugObjectCache;
V(ExtensionCallback, wasm_instance_callback, &NoExtension) \
V(ApiImplementationCallback, wasm_compile_streaming_callback, nullptr) \
V(WasmStreamingCallback, wasm_streaming_callback, nullptr) \
+ V(WasmThreadsEnabledCallback, wasm_threads_enabled_callback, nullptr) \
/* State for Relocatable. */ \
V(Relocatable*, relocatable_top, nullptr) \
V(DebugObjectCache*, string_stream_debug_object_cache, nullptr) \
@@ -553,6 +643,11 @@ class Isolate : private HiddenFactory {
return isolate;
}
+ // Get the isolate that the given HeapObject lives in, returning true on
+ // success. If the object is not writable (i.e. lives in read-only space),
+ // return false.
+ inline static bool FromWritableHeapObject(HeapObject* obj, Isolate** isolate);
+
// Usually called by Init(), but can be called early e.g. to allow
// testing components that require logging but not the whole
// isolate.
@@ -626,6 +721,8 @@ class Isolate : private HiddenFactory {
inline void set_wasm_caught_exception(Object* exception);
inline void clear_wasm_caught_exception();
+ bool AreWasmThreadsEnabled(Handle<Context> context);
+
THREAD_LOCAL_TOP_ADDRESS(Object*, pending_exception)
inline bool has_pending_exception();
@@ -812,7 +909,7 @@ class Isolate : private HiddenFactory {
};
CatchType PredictExceptionCatcher();
- void ScheduleThrow(Object* exception);
+ V8_EXPORT_PRIVATE void ScheduleThrow(Object* exception);
// Re-set pending message, script and positions reported to the TryCatch
// back to the TLS for re-use when rethrowing.
void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
@@ -858,12 +955,8 @@ class Isolate : private HiddenFactory {
void IterateThread(ThreadVisitor* v, char* t);
// Returns the current native context.
- inline Handle<Context> native_context();
- inline Context* raw_native_context();
-
- // Returns the native context of the calling JavaScript code. That
- // is, the native context of the top-most JavaScript frame.
- Handle<Context> GetCallingNativeContext();
+ inline Handle<NativeContext> native_context();
+ inline NativeContext* raw_native_context();
Handle<Context> GetIncumbentContext();
@@ -1111,6 +1204,13 @@ class Isolate : private HiddenFactory {
return language_variant_regexp_matcher_;
}
+ const std::string& default_locale() { return default_locale_; }
+
+ void set_default_locale(const std::string& locale) {
+ DCHECK_EQ(default_locale_.length(), 0);
+ default_locale_ = locale;
+ }
+
void set_language_tag_regexp_matchers(
icu::RegexMatcher* language_singleton_regexp_matcher,
icu::RegexMatcher* language_tag_regexp_matcher,
@@ -1279,9 +1379,6 @@ class Isolate : private HiddenFactory {
void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
void CountUsage(v8::Isolate::UseCounterFeature feature);
- BasicBlockProfiler* GetOrCreateBasicBlockProfiler();
- BasicBlockProfiler* basic_block_profiler() { return basic_block_profiler_; }
-
std::string GetTurboCfgFileName();
#if V8_SFI_HAS_UNIQUE_ID
@@ -1623,6 +1720,7 @@ class Isolate : private HiddenFactory {
icu::RegexMatcher* language_singleton_regexp_matcher_;
icu::RegexMatcher* language_tag_regexp_matcher_;
icu::RegexMatcher* language_variant_regexp_matcher_;
+ std::string default_locale_;
#endif // V8_INTL_SUPPORT
// Whether the isolate has been created for snapshotting.
@@ -1714,7 +1812,6 @@ class Isolate : private HiddenFactory {
bool is_running_microtasks_;
v8::Isolate::UseCounterCallback use_counter_callback_;
- BasicBlockProfiler* basic_block_profiler_;
std::vector<Object*> partial_snapshot_cache_;
@@ -1778,9 +1875,6 @@ class Isolate : private HiddenFactory {
friend class v8::Locker;
friend class v8::SnapshotCreator;
friend class v8::Unlocker;
- friend v8::StartupData v8::V8::CreateSnapshotDataBlob(const char*);
- friend v8::StartupData v8::V8::WarmUpSnapshotDataBlob(v8::StartupData,
- const char*);
DISALLOW_COPY_AND_ASSIGN(Isolate);
};
@@ -1947,7 +2041,8 @@ class PostponeInterruptsScope : public InterruptsScope {
// PostponeInterruptsScopes.
class SafeForInterruptsScope : public InterruptsScope {
public:
- SafeForInterruptsScope(Isolate* isolate, int intercept_mask)
+ SafeForInterruptsScope(Isolate* isolate,
+ int intercept_mask = StackGuard::ALL_INTERRUPTS)
: InterruptsScope(isolate, intercept_mask,
InterruptsScope::kRunInterrupts) {}
virtual ~SafeForInterruptsScope() = default;
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index 35db963d18..fe02afceea 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -408,106 +408,6 @@ DEFINE_METHOD(
);
-// For implementing reverse() on large, sparse arrays.
-function SparseReverse(array, len) {
- var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
- var high_counter = keys.length - 1;
- var low_counter = 0;
- while (low_counter <= high_counter) {
- var i = keys[low_counter];
- var j = keys[high_counter];
-
- var j_complement = len - j - 1;
- var low, high;
-
- if (j_complement <= i) {
- high = j;
- while (keys[--high_counter] == j) { }
- low = j_complement;
- }
- if (j_complement >= i) {
- low = i;
- while (keys[++low_counter] == i) { }
- high = len - i - 1;
- }
-
- var current_i = array[low];
- if (!IS_UNDEFINED(current_i) || low in array) {
- var current_j = array[high];
- if (!IS_UNDEFINED(current_j) || high in array) {
- array[low] = current_j;
- array[high] = current_i;
- } else {
- array[high] = current_i;
- delete array[low];
- }
- } else {
- var current_j = array[high];
- if (!IS_UNDEFINED(current_j) || high in array) {
- array[low] = current_j;
- delete array[high];
- }
- }
- }
-}
-
-function PackedArrayReverse(array, len) {
- var j = len - 1;
- for (var i = 0; i < j; i++, j--) {
- var current_i = array[i];
- var current_j = array[j];
- array[i] = current_j;
- array[j] = current_i;
- }
- return array;
-}
-
-
-function GenericArrayReverse(array, len) {
- var j = len - 1;
- for (var i = 0; i < j; i++, j--) {
- if (i in array) {
- var current_i = array[i];
- if (j in array) {
- var current_j = array[j];
- array[i] = current_j;
- array[j] = current_i;
- } else {
- array[j] = current_i;
- delete array[i];
- }
- } else {
- if (j in array) {
- var current_j = array[j];
- array[i] = current_j;
- delete array[j];
- }
- }
- }
- return array;
-}
-
-
-DEFINE_METHOD(
- GlobalArray.prototype,
- reverse() {
- var array = TO_OBJECT(this);
- var len = TO_LENGTH(array.length);
- var isArray = IS_ARRAY(array);
-
- if (UseSparseVariant(array, len, isArray, len)) {
- %NormalizeElements(array);
- SparseReverse(array, len);
- return array;
- } else if (isArray && %_HasFastPackedElements(array)) {
- return PackedArrayReverse(array, len);
- } else {
- return GenericArrayReverse(array, len);
- }
- }
-);
-
-
function ArrayShiftFallback() {
var array = TO_OBJECT(this);
var len = TO_LENGTH(array.length);
@@ -799,19 +699,6 @@ function InnerArraySort(array, length, comparefn) {
return array;
}
-DEFINE_METHOD(
- GlobalArray.prototype,
- sort(comparefn) {
- if (!IS_UNDEFINED(comparefn) && !IS_CALLABLE(comparefn)) {
- throw %make_type_error(kBadSortComparisonFunction, comparefn);
- }
-
- var array = TO_OBJECT(this);
- var length = TO_LENGTH(array.length);
- return InnerArraySort(array, length, comparefn);
- }
-);
-
DEFINE_METHOD_LEN(
GlobalArray.prototype,
@@ -853,16 +740,8 @@ DEFINE_METHOD_LEN(
}
}
// Lookup through the array.
- if (!IS_UNDEFINED(element)) {
- for (var i = max; i >= min; i--) {
- if (array[i] === element) return i;
- }
- return -1;
- }
for (var i = max; i >= min; i--) {
- if (IS_UNDEFINED(array[i]) && i in array) {
- return i;
- }
+ if (i in array && array[i] === element) return i;
}
return -1;
},
@@ -870,93 +749,6 @@ DEFINE_METHOD_LEN(
);
-// ES#sec-array.prototype.copywithin
-// (Array.prototype.copyWithin ( target, start [ , end ] )
-DEFINE_METHOD_LEN(
- GlobalArray.prototype,
- copyWithin(target, start, end) {
- var array = TO_OBJECT(this);
- var length = TO_LENGTH(array.length);
-
- target = TO_INTEGER(target);
- var to;
- if (target < 0) {
- to = MathMax(length + target, 0);
- } else {
- to = MathMin(target, length);
- }
-
- start = TO_INTEGER(start);
- var from;
- if (start < 0) {
- from = MathMax(length + start, 0);
- } else {
- from = MathMin(start, length);
- }
-
- end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
- var final;
- if (end < 0) {
- final = MathMax(length + end, 0);
- } else {
- final = MathMin(end, length);
- }
-
- var count = MathMin(final - from, length - to);
- var direction = 1;
- if (from < to && to < (from + count)) {
- direction = -1;
- from = from + count - 1;
- to = to + count - 1;
- }
-
- while (count > 0) {
- if (from in array) {
- array[to] = array[from];
- } else {
- delete array[to];
- }
- from = from + direction;
- to = to + direction;
- count--;
- }
-
- return array;
- },
- 2 /* Set function length */
-);
-
-
-// ES6, draft 04-05-14, section 22.1.3.6
-DEFINE_METHOD_LEN(
- GlobalArray.prototype,
- fill(value, start, end) {
- var array = TO_OBJECT(this);
- var length = TO_LENGTH(array.length);
-
- var i = IS_UNDEFINED(start) ? 0 : TO_INTEGER(start);
- var end = IS_UNDEFINED(end) ? length : TO_INTEGER(end);
-
- if (i < 0) {
- i += length;
- if (i < 0) i = 0;
- } else {
- if (i > length) i = length;
- }
-
- if (end < 0) {
- end += length;
- if (end < 0) end = 0;
- } else {
- if (end > length) end = length;
- }
-
- for (; i < end; i++)
- array[i] = value;
- return array;
- },
- 1 /* Set function length */
-);
// Set up unscopable properties on the Array.prototype object.
var unscopables = {
diff --git a/deps/v8/src/js/intl.js b/deps/v8/src/js/intl.js
index 82c28f79e0..db4d45c563 100644
--- a/deps/v8/src/js/intl.js
+++ b/deps/v8/src/js/intl.js
@@ -26,7 +26,6 @@ var GlobalIntlNumberFormat = GlobalIntl.NumberFormat;
var GlobalIntlCollator = GlobalIntl.Collator;
var GlobalIntlPluralRules = GlobalIntl.PluralRules;
var GlobalIntlv8BreakIterator = GlobalIntl.v8BreakIterator;
-var GlobalNumber = global.Number;
var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
var GlobalArray = global.Array;
@@ -39,7 +38,6 @@ var patternSymbol = utils.ImportNow("intl_pattern_symbol");
var resolvedSymbol = utils.ImportNow("intl_resolved_symbol");
var StringSubstr = GlobalString.prototype.substr;
var StringSubstring = GlobalString.prototype.substring;
-var ArraySlice = GlobalArray.prototype.slice;
utils.Import(function(from) {
ArrayJoin = from.ArrayJoin;
@@ -48,18 +46,10 @@ utils.Import(function(from) {
// Utilities for definitions
-macro IS_OBJECT(arg)
-(typeof(arg) === 'object')
-endmacro
-
macro NUMBER_IS_NAN(arg)
(%IS_VAR(arg) !== arg)
endmacro
-macro NUMBER_IS_FINITE(arg)
-(%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)))
-endmacro
-
// To avoid ES2015 Function name inference.
macro ANONYMOUS_FUNCTION(fn)
@@ -138,21 +128,10 @@ var AVAILABLE_LOCALES = {
'breakiterator': UNDEFINED,
'pluralrules': UNDEFINED,
'relativetimeformat': UNDEFINED,
+ 'listformat': UNDEFINED,
};
/**
- * Caches default ICU locale.
- */
-var DEFAULT_ICU_LOCALE = UNDEFINED;
-
-function GetDefaultICULocaleJS() {
- if (IS_UNDEFINED(DEFAULT_ICU_LOCALE)) {
- DEFAULT_ICU_LOCALE = %GetDefaultICULocale();
- }
- return DEFAULT_ICU_LOCALE;
-}
-
-/**
* Unicode extension regular expression.
*/
var UNICODE_EXTENSION_RE = UNDEFINED;
@@ -202,46 +181,10 @@ function GetServiceRE() {
}
/**
- * Validates a language tag against bcp47 spec.
- * Actual value is assigned on first run.
- */
-var LANGUAGE_TAG_RE = UNDEFINED;
-
-function GetLanguageTagRE() {
- if (IS_UNDEFINED(LANGUAGE_TAG_RE)) {
- BuildLanguageTagREs();
- }
- return LANGUAGE_TAG_RE;
-}
-
-/**
- * Helps find duplicate variants in the language tag.
- */
-var LANGUAGE_VARIANT_RE = UNDEFINED;
-
-function GetLanguageVariantRE() {
- if (IS_UNDEFINED(LANGUAGE_VARIANT_RE)) {
- BuildLanguageTagREs();
- }
- return LANGUAGE_VARIANT_RE;
-}
-
-/**
- * Helps find duplicate singletons in the language tag.
- */
-var LANGUAGE_SINGLETON_RE = UNDEFINED;
-
-function GetLanguageSingletonRE() {
- if (IS_UNDEFINED(LANGUAGE_SINGLETON_RE)) {
- BuildLanguageTagREs();
- }
- return LANGUAGE_SINGLETON_RE;
-}
-
-/**
* Matches valid IANA time zone names.
*/
var TIMEZONE_NAME_CHECK_RE = UNDEFINED;
+var GMT_OFFSET_TIMEZONE_NAME_CHECK_RE = UNDEFINED;
function GetTimezoneNameCheckRE() {
if (IS_UNDEFINED(TIMEZONE_NAME_CHECK_RE)) {
@@ -251,6 +194,14 @@ function GetTimezoneNameCheckRE() {
return TIMEZONE_NAME_CHECK_RE;
}
+function GetGMTOffsetTimezoneNameCheckRE() {
+ if (IS_UNDEFINED(GMT_OFFSET_TIMEZONE_NAME_CHECK_RE)) {
+ GMT_OFFSET_TIMEZONE_NAME_CHECK_RE = new GlobalRegExp(
+ '^(?:ETC/GMT)(?<offset>0|[+-](?:[0-9]|1[0-4]))$');
+ }
+ return GMT_OFFSET_TIMEZONE_NAME_CHECK_RE;
+}
+
/**
* Matches valid location parts of IANA time zone names.
*/
@@ -266,89 +217,6 @@ function GetTimezoneNameLocationPartRE() {
/**
- * Returns an intersection of locales and service supported locales.
- * Parameter locales is treated as a priority list.
- */
-function supportedLocalesOf(service, locales, options) {
- if (IS_NULL(%regexp_internal_match(GetServiceRE(), service))) {
- throw %make_error(kWrongServiceType, service);
- }
-
- // Provide defaults if matcher was not specified.
- if (IS_UNDEFINED(options)) {
- options = {__proto__: null};
- } else {
- options = TO_OBJECT(options);
- }
-
- var matcher = options.localeMatcher;
- if (!IS_UNDEFINED(matcher)) {
- matcher = TO_STRING(matcher);
- if (matcher !== 'lookup' && matcher !== 'best fit') {
- throw %make_range_error(kLocaleMatcher, matcher);
- }
- } else {
- matcher = 'best fit';
- }
-
- var requestedLocales = initializeLocaleList(locales);
-
- var availableLocales = getAvailableLocalesOf(service);
-
- // Use either best fit or lookup algorithm to match locales.
- if (matcher === 'best fit') {
- return initializeLocaleList(bestFitSupportedLocalesOf(
- requestedLocales, availableLocales));
- }
-
- return initializeLocaleList(lookupSupportedLocalesOf(
- requestedLocales, availableLocales));
-}
-
-
-/**
- * Returns the subset of the provided BCP 47 language priority list for which
- * this service has a matching locale when using the BCP 47 Lookup algorithm.
- * Locales appear in the same order in the returned list as in the input list.
- */
-function lookupSupportedLocalesOf(requestedLocales, availableLocales) {
- var matchedLocales = new InternalArray();
- for (var i = 0; i < requestedLocales.length; ++i) {
- // Remove -u- extension.
- var locale = %RegExpInternalReplace(
- GetUnicodeExtensionRE(), requestedLocales[i], '');
- do {
- if (!IS_UNDEFINED(availableLocales[locale])) {
- // Push requested locale not the resolved one.
- %_Call(ArrayPush, matchedLocales, requestedLocales[i]);
- break;
- }
- // Truncate locale if possible, if not break.
- var pos = %StringLastIndexOf(locale, '-');
- if (pos === -1) {
- break;
- }
- locale = %_Call(StringSubstring, locale, 0, pos);
- } while (true);
- }
-
- return matchedLocales;
-}
-
-
-/**
- * Returns the subset of the provided BCP 47 language priority list for which
- * this service has a matching locale when using the implementation
- * dependent algorithm.
- * Locales appear in the same order in the returned list as in the input list.
- */
-function bestFitSupportedLocalesOf(requestedLocales, availableLocales) {
- // TODO(cira): implement better best fit algorithm.
- return lookupSupportedLocalesOf(requestedLocales, availableLocales);
-}
-
-
-/**
* Returns a getOption function that extracts property value for given
* options object. If property is missing it returns defaultValue. If value
* is out of range for that property it throws RangeError.
@@ -465,7 +333,12 @@ function attemptSingleLookup(availableLocales, requestedLocale) {
var extensionMatch = %regexp_internal_match(
GetUnicodeExtensionRE(), requestedLocale);
var extension = IS_NULL(extensionMatch) ? '' : extensionMatch[0];
- return {__proto__: null, locale: availableLocale, extension: extension};
+ return {
+ __proto__: null,
+ locale: availableLocale,
+ extension: extension,
+ localeWithExtension: availableLocale + extension,
+ };
}
return UNDEFINED;
}
@@ -489,7 +362,7 @@ function lookupMatcher(service, requestedLocales) {
}
}
- var defLocale = GetDefaultICULocaleJS();
+ var defLocale = %GetDefaultICULocale();
// While ECMA-402 returns defLocale directly, we have to check if it is
// supported, as such support is not guaranteed.
@@ -502,7 +375,8 @@ function lookupMatcher(service, requestedLocales) {
return {
__proto__: null,
locale: 'und',
- extension: ''
+ extension: '',
+ localeWithExtension: 'und',
};
}
@@ -516,61 +390,6 @@ function bestFitMatcher(service, requestedLocales) {
return lookupMatcher(service, requestedLocales);
}
-
-/**
- * Parses Unicode extension into key - value map.
- * Returns empty object if the extension string is invalid.
- * We are not concerned with the validity of the values at this point.
- * 'attribute' in RFC 6047 is not supported. Keys without explicit
- * values are assigned UNDEFINED.
- * TODO(jshin): Fix the handling of 'attribute' (in RFC 6047, but none
- * has been defined so that it's not used) and boolean keys without
- * an explicit value.
- */
-function parseExtension(extension) {
- var extensionSplit = %StringSplit(extension, '-', kMaxUint32);
-
- // Assume ['', 'u', ...] input, but don't throw.
- if (extensionSplit.length <= 2 ||
- (extensionSplit[0] !== '' && extensionSplit[1] !== 'u')) {
- return {__proto__: null};
- }
-
- // Key is {2}alphanum, value is {3,8}alphanum.
- // Some keys may not have explicit values (booleans).
- var extensionMap = {__proto__: null};
- var key = UNDEFINED;
- var value = UNDEFINED;
- for (var i = 2; i < extensionSplit.length; ++i) {
- var length = extensionSplit[i].length;
- var element = extensionSplit[i];
- if (length === 2) {
- if (!IS_UNDEFINED(key)) {
- if (!(key in extensionMap)) {
- extensionMap[key] = value;
- }
- value = UNDEFINED;
- }
- key = element;
- } else if (length >= 3 && length <= 8 && !IS_UNDEFINED(key)) {
- if (IS_UNDEFINED(value)) {
- value = element;
- } else {
- value = value + "-" + element;
- }
- } else {
- // There is a value that's too long, or that doesn't have a key.
- return {__proto__: null};
- }
- }
- if (!IS_UNDEFINED(key) && !(key in extensionMap)) {
- extensionMap[key] = value;
- }
-
- return extensionMap;
-}
-
-
/**
* Populates internalOptions object with boolean key-value pairs
* from extensionMap and options.
@@ -594,7 +413,7 @@ function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
}
if (!IS_UNDEFINED(property)) {
- defineWEProperty(outOptions, property, value);
+ %DefineWEProperty(outOptions, property, value);
}
}
@@ -696,27 +515,6 @@ function getAvailableLocalesOf(service) {
/**
- * Defines a property and sets writable and enumerable to true.
- * Configurable is false by default.
- */
-function defineWEProperty(object, property, value) {
- %object_define_property(object, property,
- {value: value, writable: true, enumerable: true});
-}
-
-
-/**
- * Adds property to an object if the value is not undefined.
- * Sets configurable descriptor to false.
- */
-function addWEPropertyIfDefined(object, property, value) {
- if (!IS_UNDEFINED(value)) {
- defineWEProperty(object, property, value);
- }
-}
-
-
-/**
* Defines a property and sets writable, enumerable and configurable to true.
*/
function defineWECProperty(object, property, value) {
@@ -774,45 +572,6 @@ function toTitleCaseTimezoneLocation(location) {
return result;
}
-/**
- * Canonicalizes the language tag, or throws in case the tag is invalid.
- * ECMA 402 9.2.1 steps 7.c ii ~ v.
- */
-function canonicalizeLanguageTag(localeID) {
- // null is typeof 'object' so we have to do extra check.
- if ((!IS_STRING(localeID) && !IS_RECEIVER(localeID)) ||
- IS_NULL(localeID)) {
- throw %make_type_error(kLanguageID);
- }
-
- var localeString = TO_STRING(localeID);
-
- // Optimize for the most common case; a 2-letter language code in the
- // canonical form/lowercase that is not one of deprecated codes
- // (in, iw, ji, jw). Don't check for ~70 of 3-letter deprecated language
- // codes. Instead, let them be handled by ICU in the slow path. Besides,
- // fast-track 'fil' (3-letter canonical code).
- if ((!IS_NULL(%regexp_internal_match(/^[a-z]{2}$/, localeString)) &&
- IS_NULL(%regexp_internal_match(/^(in|iw|ji|jw)$/, localeString))) ||
- localeString === "fil") {
- return localeString;
- }
-
- if (isStructuallyValidLanguageTag(localeString) === false) {
- throw %make_range_error(kInvalidLanguageTag, localeString);
- }
-
- // ECMA 402 6.2.3
- var tag = %CanonicalizeLanguageTag(localeString);
- // TODO(jshin): This should not happen because the structural validity
- // is already checked. If that's the case, remove this.
- if (tag === 'invalid-tag') {
- throw %make_range_error(kInvalidLanguageTag, localeString);
- }
-
- return tag;
-}
-
/**
* Returns an InternalArray where all locales are canonicalized and duplicates
@@ -825,7 +584,7 @@ function canonicalizeLocaleList(locales) {
if (!IS_UNDEFINED(locales)) {
// We allow single string localeID.
if (typeof locales === 'string') {
- %_Call(ArrayPush, seen, canonicalizeLanguageTag(locales));
+ %_Call(ArrayPush, seen, %CanonicalizeLanguageTag(locales));
return seen;
}
@@ -836,7 +595,7 @@ function canonicalizeLocaleList(locales) {
if (k in o) {
var value = o[k];
- var tag = canonicalizeLanguageTag(value);
+ var tag = %CanonicalizeLanguageTag(value);
if (%ArrayIndexOf(seen, tag, 0) === -1) {
%_Call(ArrayPush, seen, tag);
@@ -848,111 +607,16 @@ function canonicalizeLocaleList(locales) {
return seen;
}
-function initializeLocaleList(locales) {
- return freezeArray(canonicalizeLocaleList(locales));
-}
-
-/**
- * Check the structural Validity of the language tag per ECMA 402 6.2.2:
- * - Well-formed per RFC 5646 2.1
- * - There are no duplicate variant subtags
- * - There are no duplicate singletion (extension) subtags
- *
- * One extra-check is done (from RFC 5646 2.2.9): the tag is compared
- * against the list of grandfathered tags. However, subtags for
- * primary/extended language, script, region, variant are not checked
- * against the IANA language subtag registry.
- *
- * ICU is too permissible and lets invalid tags, like
- * hant-cmn-cn, through.
- *
- * Returns false if the language tag is invalid.
- */
-function isStructuallyValidLanguageTag(locale) {
- // Check if it's well-formed, including grandfadered tags.
- if (IS_NULL(%regexp_internal_match(GetLanguageTagRE(), locale))) {
- return false;
- }
-
- locale = %StringToLowerCaseIntl(locale);
-
- // Just return if it's a x- form. It's all private.
- if (%StringIndexOf(locale, 'x-', 0) === 0) {
- return true;
- }
-
- // Check if there are any duplicate variants or singletons (extensions).
-
- // Remove private use section.
- locale = %StringSplit(locale, '-x-', kMaxUint32)[0];
-
- // Skip language since it can match variant regex, so we start from 1.
- // We are matching i-klingon here, but that's ok, since i-klingon-klingon
- // is not valid and would fail LANGUAGE_TAG_RE test.
- var variants = new InternalArray();
- var extensions = new InternalArray();
- var parts = %StringSplit(locale, '-', kMaxUint32);
- for (var i = 1; i < parts.length; i++) {
- var value = parts[i];
- if (!IS_NULL(%regexp_internal_match(GetLanguageVariantRE(), value)) &&
- extensions.length === 0) {
- if (%ArrayIndexOf(variants, value, 0) === -1) {
- %_Call(ArrayPush, variants, value);
- } else {
- return false;
- }
- }
-
- if (!IS_NULL(%regexp_internal_match(GetLanguageSingletonRE(), value))) {
- if (%ArrayIndexOf(extensions, value, 0) === -1) {
- %_Call(ArrayPush, extensions, value);
- } else {
- return false;
- }
- }
- }
-
- return true;
- }
+// TODO(ftang): remove the %InstallToContext once
+// initializeLocaleList is available in C++
+// https://bugs.chromium.org/p/v8/issues/detail?id=7987
+%InstallToContext([
+ "canonicalize_locale_list", canonicalizeLocaleList
+]);
-/**
- * Builds a regular expresion that validates the language tag
- * against bcp47 spec.
- * Uses http://tools.ietf.org/html/bcp47, section 2.1, ABNF.
- * Runs on load and initializes the global REs.
- */
-function BuildLanguageTagREs() {
- var alpha = '[a-zA-Z]';
- var digit = '[0-9]';
- var alphanum = '(' + alpha + '|' + digit + ')';
- var regular = '(art-lojban|cel-gaulish|no-bok|no-nyn|zh-guoyu|zh-hakka|' +
- 'zh-min|zh-min-nan|zh-xiang)';
- var irregular = '(en-GB-oed|i-ami|i-bnn|i-default|i-enochian|i-hak|' +
- 'i-klingon|i-lux|i-mingo|i-navajo|i-pwn|i-tao|i-tay|' +
- 'i-tsu|sgn-BE-FR|sgn-BE-NL|sgn-CH-DE)';
- var grandfathered = '(' + irregular + '|' + regular + ')';
- var privateUse = '(x(-' + alphanum + '{1,8})+)';
-
- var singleton = '(' + digit + '|[A-WY-Za-wy-z])';
- LANGUAGE_SINGLETON_RE = new GlobalRegExp('^' + singleton + '$', 'i');
-
- var extension = '(' + singleton + '(-' + alphanum + '{2,8})+)';
-
- var variant = '(' + alphanum + '{5,8}|(' + digit + alphanum + '{3}))';
- LANGUAGE_VARIANT_RE = new GlobalRegExp('^' + variant + '$', 'i');
-
- var region = '(' + alpha + '{2}|' + digit + '{3})';
- var script = '(' + alpha + '{4})';
- var extLang = '(' + alpha + '{3}(-' + alpha + '{3}){0,2})';
- var language = '(' + alpha + '{2,3}(-' + extLang + ')?|' + alpha + '{4}|' +
- alpha + '{5,8})';
- var langTag = language + '(-' + script + ')?(-' + region + ')?(-' +
- variant + ')*(-' + extension + ')*(-' + privateUse + ')?';
-
- var languageTag =
- '^(' + langTag + '|' + privateUse + '|' + grandfathered + ')$';
- LANGUAGE_TAG_RE = new GlobalRegExp(languageTag, 'i');
+function initializeLocaleList(locales) {
+ return freezeArray(canonicalizeLocaleList(locales));
}
// ECMA 402 section 8.2.1
@@ -964,140 +628,12 @@ DEFINE_METHOD(
);
/**
- * Initializes the given object so it's a valid Collator instance.
- * Useful for subclassing.
- */
-function CreateCollator(locales, options) {
- if (IS_UNDEFINED(options)) {
- options = {__proto__: null};
- }
-
- var getOption = getGetOption(options, 'collator');
-
- var internalOptions = {__proto__: null};
-
- defineWEProperty(internalOptions, 'usage', getOption(
- 'usage', 'string', ['sort', 'search'], 'sort'));
-
- var sensitivity = getOption('sensitivity', 'string',
- ['base', 'accent', 'case', 'variant']);
- if (IS_UNDEFINED(sensitivity) && internalOptions.usage === 'sort') {
- sensitivity = 'variant';
- }
- defineWEProperty(internalOptions, 'sensitivity', sensitivity);
-
- defineWEProperty(internalOptions, 'ignorePunctuation', getOption(
- 'ignorePunctuation', 'boolean', UNDEFINED, false));
-
- var locale = resolveLocale('collator', locales, options);
-
- // TODO(jshin): ICU now can take kb, kc, etc. Switch over to using ICU
- // directly. See Collator::InitializeCollator and
- // Collator::CreateICUCollator in src/objects/intl-objects.cc
- // ICU can't take kb, kc... parameters through localeID, so we need to pass
- // them as options.
- // One exception is -co- which has to be part of the extension, but only for
- // usage: sort, and its value can't be 'standard' or 'search'.
- var extensionMap = parseExtension(locale.extension);
-
- /**
- * Map of Unicode extensions to option properties, and their values and types,
- * for a collator.
- */
- var COLLATOR_KEY_MAP = {
- __proto__: null,
- 'kn': { __proto__: null, 'property': 'numeric', 'type': 'boolean'},
- 'kf': { __proto__: null, 'property': 'caseFirst', 'type': 'string',
- 'values': ['false', 'lower', 'upper']}
- };
-
- setOptions(
- options, extensionMap, COLLATOR_KEY_MAP, getOption, internalOptions);
-
- var collation = 'default';
- var extension = '';
- if (HAS_OWN_PROPERTY(extensionMap, 'co') && internalOptions.usage === 'sort') {
-
- /**
- * Allowed -u-co- values. List taken from:
- * http://unicode.org/repos/cldr/trunk/common/bcp47/collation.xml
- */
- var ALLOWED_CO_VALUES = [
- 'big5han', 'dict', 'direct', 'ducet', 'gb2312', 'phonebk', 'phonetic',
- 'pinyin', 'reformed', 'searchjl', 'stroke', 'trad', 'unihan', 'zhuyin'
- ];
-
- if (%ArrayIndexOf(ALLOWED_CO_VALUES, extensionMap.co, 0) !== -1) {
- extension = '-u-co-' + extensionMap.co;
- // ICU can't tell us what the collation is, so save user's input.
- collation = extensionMap.co;
- }
- } else if (internalOptions.usage === 'search') {
- extension = '-u-co-search';
- }
- defineWEProperty(internalOptions, 'collation', collation);
-
- var requestedLocale = locale.locale + extension;
-
- // We define all properties C++ code may produce, to prevent security
- // problems. If malicious user decides to redefine Object.prototype.locale
- // we can't just use plain x.locale = 'us' or in C++ Set("locale", "us").
- // %object_define_properties will either succeed defining or throw an error.
- var resolved = %object_define_properties({__proto__: null}, {
- caseFirst: {writable: true},
- collation: {value: internalOptions.collation, writable: true},
- ignorePunctuation: {writable: true},
- locale: {writable: true},
- numeric: {writable: true},
- requestedLocale: {value: requestedLocale, writable: true},
- sensitivity: {writable: true},
- strength: {writable: true},
- usage: {value: internalOptions.usage, writable: true}
- });
-
- var collator = %CreateCollator(requestedLocale, internalOptions, resolved);
-
- %MarkAsInitializedIntlObjectOfType(collator, COLLATOR_TYPE);
- collator[resolvedSymbol] = resolved;
-
- return collator;
-}
-
-
-/**
- * Constructs Intl.Collator object given optional locales and options
- * parameters.
- *
- * @constructor
- */
-function CollatorConstructor() {
- return IntlConstruct(this, GlobalIntlCollator, CreateCollator, new.target,
- arguments);
-}
-%SetCode(GlobalIntlCollator, CollatorConstructor);
-
-
-/**
* Collator resolvedOptions method.
*/
DEFINE_METHOD(
GlobalIntlCollator.prototype,
resolvedOptions() {
- var methodName = 'resolvedOptions';
- if(!IS_RECEIVER(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver, methodName, this);
- }
- var coll = %IntlUnwrapReceiver(this, COLLATOR_TYPE, GlobalIntlCollator,
- methodName, false);
- return {
- locale: coll[resolvedSymbol].locale,
- usage: coll[resolvedSymbol].usage,
- sensitivity: coll[resolvedSymbol].sensitivity,
- ignorePunctuation: coll[resolvedSymbol].ignorePunctuation,
- numeric: coll[resolvedSymbol].numeric,
- caseFirst: coll[resolvedSymbol].caseFirst,
- collation: coll[resolvedSymbol].collation
- };
+ return %CollatorResolvedOptions(this);
}
);
@@ -1111,188 +647,59 @@ DEFINE_METHOD(
DEFINE_METHOD(
GlobalIntlCollator,
supportedLocalesOf(locales) {
- return supportedLocalesOf('collator', locales, arguments[1]);
+ return %SupportedLocalesOf('collator', locales, arguments[1]);
}
);
-/**
- * When the compare method is called with two arguments x and y, it returns a
- * Number other than NaN that represents the result of a locale-sensitive
- * String comparison of x with y.
- * The result is intended to order String values in the sort order specified
- * by the effective locale and collation options computed during construction
- * of this Collator object, and will be negative, zero, or positive, depending
- * on whether x comes before y in the sort order, the Strings are equal under
- * the sort order, or x comes after y in the sort order, respectively.
- */
-function compare(collator, x, y) {
- return %InternalCompare(collator, TO_STRING(x), TO_STRING(y));
-};
-
-
-AddBoundMethod(GlobalIntlCollator, 'compare', compare, 2, COLLATOR_TYPE, false);
-
-function PluralRulesConstructor() {
- if (IS_UNDEFINED(new.target)) {
- throw %make_type_error(kConstructorNotFunction, "PluralRules");
- }
-
- var locales = arguments[0];
- var options = arguments[1];
-
- if (IS_UNDEFINED(options)) {
- options = {__proto__: null};
- }
-
- var getOption = getGetOption(options, 'pluralrules');
-
- var locale = resolveLocale('pluralrules', locales, options);
-
- var internalOptions = {__proto__: null};
- defineWEProperty(internalOptions, 'type', getOption(
- 'type', 'string', ['cardinal', 'ordinal'], 'cardinal'));
-
- SetNumberFormatDigitOptions(internalOptions, options, 0, 3);
-
- var requestedLocale = locale.locale;
- var resolved = %object_define_properties({__proto__: null}, {
- type: {value: internalOptions.type, writable: true},
- locale: {writable: true},
- maximumFractionDigits: {writable: true},
- minimumFractionDigits: {writable: true},
- minimumIntegerDigits: {writable: true},
- requestedLocale: {value: requestedLocale, writable: true},
- });
- if (HAS_OWN_PROPERTY(internalOptions, 'minimumSignificantDigits')) {
- defineWEProperty(resolved, 'minimumSignificantDigits', UNDEFINED);
- }
- if (HAS_OWN_PROPERTY(internalOptions, 'maximumSignificantDigits')) {
- defineWEProperty(resolved, 'maximumSignificantDigits', UNDEFINED);
- }
- defineWEProperty(resolved, 'pluralCategories', []);
- var pluralRules = %CreatePluralRules(requestedLocale, internalOptions,
- resolved);
-
- %MarkAsInitializedIntlObjectOfType(pluralRules, PLURAL_RULES_TYPE);
- pluralRules[resolvedSymbol] = resolved;
-
- return pluralRules;
-}
-%SetCode(GlobalIntlPluralRules, PluralRulesConstructor);
-
DEFINE_METHOD(
GlobalIntlPluralRules.prototype,
resolvedOptions() {
- if (!%IsInitializedIntlObjectOfType(this, PLURAL_RULES_TYPE)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Intl.PluralRules.prototype.resolvedOptions',
- this);
- }
-
- var result = {
- locale: this[resolvedSymbol].locale,
- type: this[resolvedSymbol].type,
- minimumIntegerDigits: this[resolvedSymbol].minimumIntegerDigits,
- minimumFractionDigits: this[resolvedSymbol].minimumFractionDigits,
- maximumFractionDigits: this[resolvedSymbol].maximumFractionDigits,
- };
-
- if (HAS_OWN_PROPERTY(this[resolvedSymbol], 'minimumSignificantDigits')) {
- defineWECProperty(result, 'minimumSignificantDigits',
- this[resolvedSymbol].minimumSignificantDigits);
- }
-
- if (HAS_OWN_PROPERTY(this[resolvedSymbol], 'maximumSignificantDigits')) {
- defineWECProperty(result, 'maximumSignificantDigits',
- this[resolvedSymbol].maximumSignificantDigits);
- }
-
- defineWECProperty(result, 'pluralCategories',
- %_Call(ArraySlice, this[resolvedSymbol].pluralCategories));
- return result;
+ return %PluralRulesResolvedOptions(this);
}
);
DEFINE_METHOD(
GlobalIntlPluralRules,
supportedLocalesOf(locales) {
- return supportedLocalesOf('pluralrules', locales, arguments[1]);
+ return %SupportedLocalesOf('pluralrules', locales, arguments[1]);
}
);
DEFINE_METHOD(
GlobalIntlPluralRules.prototype,
select(value) {
- if (!%IsInitializedIntlObjectOfType(this, PLURAL_RULES_TYPE)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Intl.PluralRules.prototype.select',
- this);
- }
-
return %PluralRulesSelect(this, TO_NUMBER(value) + 0);
}
);
-/**
- * Verifies that the input is a well-formed ISO 4217 currency code.
- * Don't uppercase to test. It could convert invalid code into a valid one.
- * For example \u00DFP (Eszett+P) becomes SSP.
- */
-function isWellFormedCurrencyCode(currency) {
- return typeof currency === "string" && currency.length === 3 &&
- IS_NULL(%regexp_internal_match(/[^A-Za-z]/, currency));
-}
-
-
-function defaultNumberOption(value, min, max, fallback, property) {
- if (!IS_UNDEFINED(value)) {
- value = TO_NUMBER(value);
- if (NUMBER_IS_NAN(value) || value < min || value > max) {
- throw %make_range_error(kPropertyValueOutOfRange, property);
- }
- return %math_floor(value);
- }
-
- return fallback;
-}
-
-/**
- * Returns the valid digit count for a property, or throws RangeError on
- * a value out of the range.
- */
-function getNumberOption(options, property, min, max, fallback) {
- var value = options[property];
- return defaultNumberOption(value, min, max, fallback, property);
-}
-
// ECMA 402 #sec-setnfdigitoptions
// SetNumberFormatDigitOptions ( intlObj, options, mnfdDefault, mxfdDefault )
function SetNumberFormatDigitOptions(internalOptions, options,
mnfdDefault, mxfdDefault) {
// Digit ranges.
- var mnid = getNumberOption(options, 'minimumIntegerDigits', 1, 21, 1);
- defineWEProperty(internalOptions, 'minimumIntegerDigits', mnid);
+ var mnid = %GetNumberOption(options, 'minimumIntegerDigits', 1, 21, 1);
+ %DefineWEProperty(internalOptions, 'minimumIntegerDigits', mnid);
- var mnfd = getNumberOption(options, 'minimumFractionDigits', 0, 20,
+ var mnfd = %GetNumberOption(options, 'minimumFractionDigits', 0, 20,
mnfdDefault);
- defineWEProperty(internalOptions, 'minimumFractionDigits', mnfd);
+ %DefineWEProperty(internalOptions, 'minimumFractionDigits', mnfd);
var mxfdActualDefault = MathMax(mnfd, mxfdDefault);
- var mxfd = getNumberOption(options, 'maximumFractionDigits', mnfd, 20,
+ var mxfd = %GetNumberOption(options, 'maximumFractionDigits', mnfd, 20,
mxfdActualDefault);
- defineWEProperty(internalOptions, 'maximumFractionDigits', mxfd);
+ %DefineWEProperty(internalOptions, 'maximumFractionDigits', mxfd);
var mnsd = options['minimumSignificantDigits'];
var mxsd = options['maximumSignificantDigits'];
if (!IS_UNDEFINED(mnsd) || !IS_UNDEFINED(mxsd)) {
- mnsd = defaultNumberOption(mnsd, 1, 21, 1, 'minimumSignificantDigits');
- defineWEProperty(internalOptions, 'minimumSignificantDigits', mnsd);
+ mnsd = %DefaultNumberOption(mnsd, 1, 21, 1, 'minimumSignificantDigits');
+ %DefineWEProperty(internalOptions, 'minimumSignificantDigits', mnsd);
- mxsd = defaultNumberOption(mxsd, mnsd, 21, 21, 'maximumSignificantDigits');
- defineWEProperty(internalOptions, 'maximumSignificantDigits', mxsd);
+ mxsd = %DefaultNumberOption(mxsd, mnsd, 21, 21, 'maximumSignificantDigits');
+ %DefineWEProperty(internalOptions, 'maximumSignificantDigits', mxsd);
}
}
@@ -1303,6 +710,8 @@ function SetNumberFormatDigitOptions(internalOptions, options,
function CreateNumberFormat(locales, options) {
if (IS_UNDEFINED(options)) {
options = {__proto__: null};
+ } else {
+ options = TO_OBJECT(options);
}
var getOption = getGetOption(options, 'numberformat');
@@ -1310,11 +719,11 @@ function CreateNumberFormat(locales, options) {
var locale = resolveLocale('numberformat', locales, options);
var internalOptions = {__proto__: null};
- defineWEProperty(internalOptions, 'style', getOption(
+ %DefineWEProperty(internalOptions, 'style', getOption(
'style', 'string', ['decimal', 'percent', 'currency'], 'decimal'));
var currency = getOption('currency', 'string');
- if (!IS_UNDEFINED(currency) && !isWellFormedCurrencyCode(currency)) {
+ if (!IS_UNDEFINED(currency) && !%IsWellFormedCurrencyCode(currency)) {
throw %make_range_error(kInvalidCurrencyCode, currency);
}
@@ -1327,8 +736,8 @@ function CreateNumberFormat(locales, options) {
var currencyDisplay = getOption(
'currencyDisplay', 'string', ['code', 'symbol', 'name'], 'symbol');
if (internalOptions.style === 'currency') {
- defineWEProperty(internalOptions, 'currency', %StringToUpperCaseIntl(currency));
- defineWEProperty(internalOptions, 'currencyDisplay', currencyDisplay);
+ %DefineWEProperty(internalOptions, 'currency', %StringToUpperCaseIntl(currency));
+ %DefineWEProperty(internalOptions, 'currencyDisplay', currencyDisplay);
mnfdDefault = mxfdDefault = %CurrencyDigits(internalOptions.currency);
} else {
@@ -1340,12 +749,12 @@ function CreateNumberFormat(locales, options) {
mxfdDefault);
// Grouping.
- defineWEProperty(internalOptions, 'useGrouping', getOption(
+ %DefineWEProperty(internalOptions, 'useGrouping', getOption(
'useGrouping', 'boolean', UNDEFINED, true));
// ICU prefers options to be passed using -u- extension key/values for
// number format, so we need to build that.
- var extensionMap = parseExtension(locale.extension);
+ var extensionMap = %ParseExtension(locale.extension);
/**
* Map of Unicode extensions to option properties, and their values and types,
@@ -1373,10 +782,10 @@ function CreateNumberFormat(locales, options) {
useGrouping: {writable: true}
});
if (HAS_OWN_PROPERTY(internalOptions, 'minimumSignificantDigits')) {
- defineWEProperty(resolved, 'minimumSignificantDigits', UNDEFINED);
+ %DefineWEProperty(resolved, 'minimumSignificantDigits', UNDEFINED);
}
if (HAS_OWN_PROPERTY(internalOptions, 'maximumSignificantDigits')) {
- defineWEProperty(resolved, 'maximumSignificantDigits', UNDEFINED);
+ %DefineWEProperty(resolved, 'maximumSignificantDigits', UNDEFINED);
}
var numberFormat = %CreateNumberFormat(requestedLocale, internalOptions,
resolved);
@@ -1459,23 +868,10 @@ DEFINE_METHOD(
DEFINE_METHOD(
GlobalIntlNumberFormat,
supportedLocalesOf(locales) {
- return supportedLocalesOf('numberformat', locales, arguments[1]);
+ return %SupportedLocalesOf('numberformat', locales, arguments[1]);
}
);
-
-/**
- * Returns a String value representing the result of calling ToNumber(value)
- * according to the effective locale and the formatting options of this
- * NumberFormat.
- */
-function formatNumber(formatter, value) {
- // Spec treats -0 and +0 as 0.
- var number = TO_NUMBER(value) + 0;
-
- return %InternalNumberFormat(formatter, number);
-}
-
/**
* Returns a string that matches LDML representation of the options object.
*/
@@ -1605,13 +1001,13 @@ function fromLDMLString(ldmlString) {
function appendToDateTimeObject(options, option, match, pairs) {
if (IS_NULL(match)) {
if (!HAS_OWN_PROPERTY(options, option)) {
- defineWEProperty(options, option, UNDEFINED);
+ %DefineWEProperty(options, option, UNDEFINED);
}
return options;
}
var property = match[0];
- defineWEProperty(options, option, pairs[property]);
+ %DefineWEProperty(options, option, pairs[property]);
return options;
}
@@ -1687,7 +1083,7 @@ function CreateDateTimeFormat(locales, options) {
var locale = resolveLocale('dateformat', locales, options);
- options = toDateTimeOptions(options, 'any', 'date');
+ options = %ToDateTimeOptions(options, 'any', 'date');
var getOption = getGetOption(options, 'dateformat');
@@ -1707,7 +1103,7 @@ function CreateDateTimeFormat(locales, options) {
// ICU prefers options to be passed using -u- extension key/values, so
// we need to build that.
var internalOptions = {__proto__: null};
- var extensionMap = parseExtension(locale.extension);
+ var extensionMap = %ParseExtension(locale.extension);
/**
* Map of Unicode extensions to option properties, and their values and types,
@@ -1748,7 +1144,7 @@ function CreateDateTimeFormat(locales, options) {
{__proto__: null, skeleton: ldmlString, timeZone: tz}, resolved);
if (resolved.timeZone === "Etc/Unknown") {
- throw %make_range_error(kUnsupportedTimeZone, tz);
+ throw %make_range_error(kInvalidTimeZone, tz);
}
%MarkAsInitializedIntlObjectOfType(dateFormat, DATE_TIME_FORMAT_TYPE);
@@ -1836,33 +1232,12 @@ DEFINE_METHOD(
DEFINE_METHOD(
GlobalIntlDateTimeFormat,
supportedLocalesOf(locales) {
- return supportedLocalesOf('dateformat', locales, arguments[1]);
+ return %SupportedLocalesOf('dateformat', locales, arguments[1]);
}
);
/**
- * Returns a String value representing the result of calling ToNumber(date)
- * according to the effective locale and the formatting options of this
- * DateTimeFormat.
- */
-function formatDate(formatter, dateValue) {
- var dateMs;
- if (IS_UNDEFINED(dateValue)) {
- dateMs = %DateCurrentTime();
- } else {
- dateMs = TO_NUMBER(dateValue);
- }
-
- return %InternalDateFormat(formatter, dateMs);
-}
-
-// Length is 1 as specified in ECMA 402 v2+
-AddBoundMethod(GlobalIntlDateTimeFormat, 'format', formatDate, 1, DATE_TIME_FORMAT_TYPE,
- true);
-
-
-/**
* Returns canonical Area/Location(/Location) name, or throws an exception
* if the zone name is invalid IANA name.
*/
@@ -1882,18 +1257,28 @@ function canonicalizeTimeZoneID(tzID) {
return 'UTC';
}
- // TODO(jshin): Add support for Etc/GMT[+-]([1-9]|1[0-2])
-
// We expect only _, '-' and / beside ASCII letters.
- // All inputs should conform to Area/Location(/Location)* from now on.
- var match = %regexp_internal_match(GetTimezoneNameCheckRE(), tzID);
- if (IS_NULL(match)) throw %make_range_error(kExpectedTimezoneID, tzID);
+ // All inputs should conform to Area/Location(/Location)*, or Etc/GMT* .
+ // TODO(jshin): 1. Support 'GB-Eire", 'EST5EDT", "ROK', 'US/*', 'NZ' and many
+ // other aliases/linked names when moving timezone validation code to C++.
+ // See crbug.com/364374 and crbug.com/v8/8007 .
+ // 2. Resolve the difference betwee CLDR/ICU and IANA time zone db.
+ // See http://unicode.org/cldr/trac/ticket/9892 and crbug.com/645807 .
+ let match = %regexp_internal_match(GetTimezoneNameCheckRE(), tzID);
+ if (IS_NULL(match)) {
+ let match =
+ %regexp_internal_match(GetGMTOffsetTimezoneNameCheckRE(), upperID);
+ if (!IS_NULL(match) && match.length == 2)
+ return "Etc/GMT" + match.groups.offset;
+ else
+ throw %make_range_error(kInvalidTimeZone, tzID);
+ }
- var result = toTitleCaseTimezoneLocation(match[1]) + '/' +
+ let result = toTitleCaseTimezoneLocation(match[1]) + '/' +
toTitleCaseTimezoneLocation(match[2]);
if (!IS_UNDEFINED(match[3]) && 3 < match.length) {
- var locations = %StringSplit(match[3], '/', kMaxUint32);
+ let locations = %StringSplit(match[3], '/', kMaxUint32);
// The 1st element is empty. Starts with i=1.
for (var i = 1; i < locations.length; i++) {
result = result + '/' + toTitleCaseTimezoneLocation(locations[i]);
@@ -1916,7 +1301,7 @@ function CreateBreakIterator(locales, options) {
var internalOptions = {__proto__: null};
- defineWEProperty(internalOptions, 'type', getOption(
+ %DefineWEProperty(internalOptions, 'type', getOption(
'type', 'string', ['character', 'word', 'sentence', 'line'], 'word'));
var locale = resolveLocale('breakiterator', locales, options);
@@ -1987,21 +1372,12 @@ DEFINE_METHOD(
throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
- return supportedLocalesOf('breakiterator', locales, arguments[1]);
+ return %SupportedLocalesOf('breakiterator', locales, arguments[1]);
}
);
/**
- * Adopts text to segment using the iterator. Old text, if present,
- * gets discarded.
- */
-function adoptText(iterator, text) {
- %BreakIteratorAdoptText(iterator, TO_STRING(text));
-}
-
-
-/**
* Returns index of the first break in the string and moves current pointer.
*/
function first(iterator) {
@@ -2033,8 +1409,6 @@ function breakType(iterator) {
}
-AddBoundMethod(GlobalIntlv8BreakIterator, 'adoptText', adoptText, 1,
- BREAK_ITERATOR_TYPE, false);
AddBoundMethod(GlobalIntlv8BreakIterator, 'first', first, 0,
BREAK_ITERATOR_TYPE, false);
AddBoundMethod(GlobalIntlv8BreakIterator, 'next', next, 0,
@@ -2100,103 +1474,11 @@ function cachedOrNewService(service, locales, options, defaults) {
return new savedObjects[service](locales, useOptions);
}
-function LocaleConvertCase(s, locales, isToUpper) {
- // ECMA 402 section 13.1.2 steps 1 through 12.
- var language;
- // Optimize for the most common two cases. initializeLocaleList() can handle
- // them as well, but it's rather slow accounting for over 60% of
- // toLocale{U,L}Case() and about 40% of toLocale{U,L}Case("<locale>").
- if (IS_UNDEFINED(locales)) {
- language = GetDefaultICULocaleJS();
- } else if (IS_STRING(locales)) {
- language = canonicalizeLanguageTag(locales);
- } else {
- var locales = initializeLocaleList(locales);
- language = locales.length > 0 ? locales[0] : GetDefaultICULocaleJS();
- }
-
- // StringSplit is slower than this.
- var pos = %StringIndexOf(language, '-', 0);
- if (pos !== -1) {
- language = %_Call(StringSubstring, language, 0, pos);
- }
-
- return %StringLocaleConvertCase(s, isToUpper, language);
-}
-
-/**
- * Compares this and that, and returns less than 0, 0 or greater than 0 value.
- * Overrides the built-in method.
- */
-DEFINE_METHOD(
- GlobalString.prototype,
- localeCompare(that) {
- if (IS_NULL_OR_UNDEFINED(this)) {
- throw %make_type_error(kMethodInvokedOnNullOrUndefined);
- }
-
- var locales = arguments[1];
- var options = arguments[2];
- var collator = cachedOrNewService('collator', locales, options);
- return compare(collator, this, that);
- }
-);
-
-DEFINE_METHODS_LEN(
- GlobalString.prototype,
- {
- toLocaleLowerCase(locales) {
- REQUIRE_OBJECT_COERCIBLE(this, "String.prototype.toLocaleLowerCase");
- return LocaleConvertCase(TO_STRING(this), locales, false);
- }
-
- toLocaleUpperCase(locales) {
- REQUIRE_OBJECT_COERCIBLE(this, "String.prototype.toLocaleUpperCase");
- return LocaleConvertCase(TO_STRING(this), locales, true);
- }
- },
- 0 /* Set function length of both methods. */
-);
-
-
-/**
- * Formats a Number object (this) using locale and options values.
- * If locale or options are omitted, defaults are used.
- */
-DEFINE_METHOD(
- GlobalNumber.prototype,
- toLocaleString() {
- if (!(this instanceof GlobalNumber) && typeof(this) !== 'number') {
- throw %make_type_error(kMethodInvokedOnWrongType, "Number");
- }
-
- var locales = arguments[0];
- var options = arguments[1];
- var numberFormat = cachedOrNewService('numberformat', locales, options);
- return formatNumber(numberFormat, this);
- }
-);
-
-
-/**
- * Returns actual formatted date or fails if date parameter is invalid.
- */
-function toLocaleDateTime(date, locales, options, required, defaults, service) {
- if (!(date instanceof GlobalDate)) {
- throw %make_type_error(kMethodInvokedOnWrongType, "Date");
- }
-
- var dateValue = TO_NUMBER(date);
- if (NUMBER_IS_NAN(dateValue)) return 'Invalid Date';
-
- var internalOptions = toDateTimeOptions(options, required, defaults);
-
- var dateFormat =
- cachedOrNewService(service, locales, options, internalOptions);
-
- return formatDate(dateFormat, date);
-}
-
+// TODO(ftang) remove the %InstallToContext once
+// cachedOrNewService is available in C++
+%InstallToContext([
+ "cached_or_new_service", cachedOrNewService
+]);
/**
* Formats a Date object (this) using locale and options values.
@@ -2208,7 +1490,7 @@ DEFINE_METHOD(
toLocaleString() {
var locales = arguments[0];
var options = arguments[1];
- return toLocaleDateTime(
+ return %ToLocaleDateTime(
this, locales, options, 'any', 'all', 'dateformatall');
}
);
@@ -2224,7 +1506,7 @@ DEFINE_METHOD(
toLocaleDateString() {
var locales = arguments[0];
var options = arguments[1];
- return toLocaleDateTime(
+ return %ToLocaleDateTime(
this, locales, options, 'date', 'date', 'dateformatdate');
}
);
@@ -2240,7 +1522,7 @@ DEFINE_METHOD(
toLocaleTimeString() {
var locales = arguments[0];
var options = arguments[1];
- return toLocaleDateTime(
+ return %ToLocaleDateTime(
this, locales, options, 'time', 'time', 'dateformattime');
}
);
diff --git a/deps/v8/src/js/macros.py b/deps/v8/src/js/macros.py
index 9e5c6d72d6..8e533c38bc 100644
--- a/deps/v8/src/js/macros.py
+++ b/deps/v8/src/js/macros.py
@@ -33,9 +33,6 @@ define READ_ONLY = 1;
define DONT_ENUM = 2;
define DONT_DELETE = 4;
-# 2^53 - 1
-define kMaxSafeInteger = 9007199254740991;
-
# 2^32 - 1
define kMaxUint32 = 4294967295;
@@ -45,15 +42,12 @@ define kMaxUint32 = 4294967295;
# It will *not* generate a runtime typeof call for the most important
# values of 'bar'.
macro IS_ARRAY(arg) = (%_IsArray(arg));
-macro IS_FUNCTION(arg) = (%IsFunction(arg));
macro IS_NULL(arg) = (arg === null);
macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
macro IS_UNDEFINED(arg) = (arg === (void 0));
-macro IS_WEAKMAP(arg) = (%_IsJSWeakMap(arg));
-macro IS_WEAKSET(arg) = (%_IsJSWeakSet(arg));
# Macro for ES queries of the type: "Type(O) is Object."
macro IS_RECEIVER(arg) = (%_IsJSReceiver(arg));
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
index 2c25b6c58a..65662c8083 100644
--- a/deps/v8/src/js/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -53,7 +53,7 @@ utils.Import(function(from) {
function ValidateTypedArray(array, methodName) {
if (!IS_TYPEDARRAY(array)) throw %make_type_error(kNotTypedArray);
- if (%_ArrayBufferViewWasNeutered(array))
+ if (%ArrayBufferViewWasNeutered(array))
throw %make_type_error(kDetachedOperation, methodName);
}
@@ -67,7 +67,7 @@ DEFINE_METHOD(
var locales = arguments[0];
var options = arguments[1];
- var length = %_TypedArrayGetLength(this);
+ var length = %TypedArrayGetLength(this);
return InnerArrayToLocaleString(this, length, locales, options);
}
);
@@ -79,7 +79,7 @@ DEFINE_METHOD(
join(separator) {
ValidateTypedArray(this, "%TypedArray%.prototype.join");
- var length = %_TypedArrayGetLength(this);
+ var length = %TypedArrayGetLength(this);
return InnerArrayJoin(separator, this, length);
}
diff --git a/deps/v8/src/json-stringifier.cc b/deps/v8/src/json-stringifier.cc
index b1d95422b0..91a1b7201b 100644
--- a/deps/v8/src/json-stringifier.cc
+++ b/deps/v8/src/json-stringifier.cc
@@ -5,14 +5,128 @@
#include "src/json-stringifier.h"
#include "src/conversions.h"
+#include "src/heap/heap-inl.h"
#include "src/lookup.h"
#include "src/messages.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-inl.h"
+#include "src/string-builder-inl.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
+class JsonStringifier BASE_EMBEDDED {
+ public:
+ explicit JsonStringifier(Isolate* isolate);
+
+ ~JsonStringifier() { DeleteArray(gap_); }
+
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Stringify(Handle<Object> object,
+ Handle<Object> replacer,
+ Handle<Object> gap);
+
+ private:
+ enum Result { UNCHANGED, SUCCESS, EXCEPTION };
+
+ bool InitializeReplacer(Handle<Object> replacer);
+ bool InitializeGap(Handle<Object> gap);
+
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> ApplyToJsonFunction(
+ Handle<Object> object, Handle<Object> key);
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> ApplyReplacerFunction(
+ Handle<Object> value, Handle<Object> key, Handle<Object> initial_holder);
+
+ // Entry point to serialize the object.
+ V8_INLINE Result SerializeObject(Handle<Object> obj) {
+ return Serialize_<false>(obj, false, factory()->empty_string());
+ }
+
+ // Serialize an array element.
+ // The index may serve as argument for the toJSON function.
+ V8_INLINE Result SerializeElement(Isolate* isolate, Handle<Object> object,
+ int i) {
+ return Serialize_<false>(object, false,
+ Handle<Object>(Smi::FromInt(i), isolate));
+ }
+
+ // Serialize a object property.
+ // The key may or may not be serialized depending on the property.
+ // The key may also serve as argument for the toJSON function.
+ V8_INLINE Result SerializeProperty(Handle<Object> object, bool deferred_comma,
+ Handle<String> deferred_key) {
+ DCHECK(!deferred_key.is_null());
+ return Serialize_<true>(object, deferred_comma, deferred_key);
+ }
+
+ template <bool deferred_string_key>
+ Result Serialize_(Handle<Object> object, bool comma, Handle<Object> key);
+
+ V8_INLINE void SerializeDeferredKey(bool deferred_comma,
+ Handle<Object> deferred_key);
+
+ Result SerializeSmi(Smi* object);
+
+ Result SerializeDouble(double number);
+ V8_INLINE Result SerializeHeapNumber(Handle<HeapNumber> object) {
+ return SerializeDouble(object->value());
+ }
+
+ Result SerializeJSValue(Handle<JSValue> object);
+
+ V8_INLINE Result SerializeJSArray(Handle<JSArray> object);
+ V8_INLINE Result SerializeJSObject(Handle<JSObject> object);
+
+ Result SerializeJSProxy(Handle<JSProxy> object);
+ Result SerializeJSReceiverSlow(Handle<JSReceiver> object);
+ Result SerializeArrayLikeSlow(Handle<JSReceiver> object, uint32_t start,
+ uint32_t length);
+
+ void SerializeString(Handle<String> object);
+
+ template <typename SrcChar, typename DestChar>
+ V8_INLINE static void SerializeStringUnchecked_(
+ Vector<const SrcChar> src,
+ IncrementalStringBuilder::NoExtend<DestChar>* dest);
+
+ template <typename SrcChar, typename DestChar>
+ V8_INLINE void SerializeString_(Handle<String> string);
+
+ template <typename Char>
+ V8_INLINE static bool DoNotEscape(Char c);
+
+ V8_INLINE void NewLine();
+ V8_INLINE void Indent() { indent_++; }
+ V8_INLINE void Unindent() { indent_--; }
+ V8_INLINE void Separator(bool first);
+
+ Handle<JSReceiver> CurrentHolder(Handle<Object> value,
+ Handle<Object> inital_holder);
+
+ Result StackPush(Handle<Object> object);
+ void StackPop();
+
+ Factory* factory() { return isolate_->factory(); }
+
+ Isolate* isolate_;
+ IncrementalStringBuilder builder_;
+ Handle<String> tojson_string_;
+ Handle<JSArray> stack_;
+ Handle<FixedArray> property_list_;
+ Handle<JSReceiver> replacer_function_;
+ uc16* gap_;
+ int indent_;
+
+ static const int kJsonEscapeTableEntrySize = 8;
+ static const char* const JsonEscapeTable;
+};
+
+MaybeHandle<Object> JsonStringify(Isolate* isolate, Handle<Object> object,
+ Handle<Object> replacer, Handle<Object> gap) {
+ JsonStringifier stringifier(isolate);
+ return stringifier.Stringify(object, replacer, gap);
+}
+
// Translation table to escape Latin1 characters.
// Table entries start at a multiple of 8 and are null-terminated.
const char* const JsonStringifier::JsonEscapeTable =
@@ -112,7 +226,9 @@ bool JsonStringifier::InitializeReplacer(Handle<Object> replacer) {
Handle<Object> length_obj;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, length_obj,
- Object::GetLengthFromArrayLike(isolate_, replacer), false);
+ Object::GetLengthFromArrayLike(isolate_,
+ Handle<JSReceiver>::cast(replacer)),
+ false);
uint32_t length;
if (!length_obj->ToUint32(&length)) length = kMaxUInt32;
for (uint32_t i = 0; i < length; i++) {
@@ -606,7 +722,9 @@ JsonStringifier::Result JsonStringifier::SerializeJSProxy(
Handle<Object> length_object;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, length_object,
- Object::GetLengthFromArrayLike(isolate_, object), EXCEPTION);
+ Object::GetLengthFromArrayLike(isolate_,
+ Handle<JSReceiver>::cast(object)),
+ EXCEPTION);
uint32_t length;
if (!length_object->ToUint32(&length)) {
// Technically, we need to be able to handle lengths outside the
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index e9b97c7d1f..7532255c1a 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -6,117 +6,14 @@
#define V8_JSON_STRINGIFIER_H_
#include "src/objects.h"
-#include "src/string-builder.h"
namespace v8 {
namespace internal {
-class JsonStringifier BASE_EMBEDDED {
- public:
- explicit JsonStringifier(Isolate* isolate);
-
- ~JsonStringifier() { DeleteArray(gap_); }
-
- V8_WARN_UNUSED_RESULT MaybeHandle<Object> Stringify(Handle<Object> object,
- Handle<Object> replacer,
- Handle<Object> gap);
-
- private:
- enum Result { UNCHANGED, SUCCESS, EXCEPTION };
-
- bool InitializeReplacer(Handle<Object> replacer);
- bool InitializeGap(Handle<Object> gap);
-
- V8_WARN_UNUSED_RESULT MaybeHandle<Object> ApplyToJsonFunction(
- Handle<Object> object, Handle<Object> key);
- V8_WARN_UNUSED_RESULT MaybeHandle<Object> ApplyReplacerFunction(
- Handle<Object> value, Handle<Object> key, Handle<Object> initial_holder);
-
- // Entry point to serialize the object.
- V8_INLINE Result SerializeObject(Handle<Object> obj) {
- return Serialize_<false>(obj, false, factory()->empty_string());
- }
-
- // Serialize an array element.
- // The index may serve as argument for the toJSON function.
- V8_INLINE Result SerializeElement(Isolate* isolate, Handle<Object> object,
- int i) {
- return Serialize_<false>(object,
- false,
- Handle<Object>(Smi::FromInt(i), isolate));
- }
-
- // Serialize a object property.
- // The key may or may not be serialized depending on the property.
- // The key may also serve as argument for the toJSON function.
- V8_INLINE Result SerializeProperty(Handle<Object> object, bool deferred_comma,
- Handle<String> deferred_key) {
- DCHECK(!deferred_key.is_null());
- return Serialize_<true>(object, deferred_comma, deferred_key);
- }
-
- template <bool deferred_string_key>
- Result Serialize_(Handle<Object> object, bool comma, Handle<Object> key);
-
- V8_INLINE void SerializeDeferredKey(bool deferred_comma,
- Handle<Object> deferred_key);
-
- Result SerializeSmi(Smi* object);
-
- Result SerializeDouble(double number);
- V8_INLINE Result SerializeHeapNumber(Handle<HeapNumber> object) {
- return SerializeDouble(object->value());
- }
-
- Result SerializeJSValue(Handle<JSValue> object);
-
- V8_INLINE Result SerializeJSArray(Handle<JSArray> object);
- V8_INLINE Result SerializeJSObject(Handle<JSObject> object);
-
- Result SerializeJSProxy(Handle<JSProxy> object);
- Result SerializeJSReceiverSlow(Handle<JSReceiver> object);
- Result SerializeArrayLikeSlow(Handle<JSReceiver> object, uint32_t start,
- uint32_t length);
-
- void SerializeString(Handle<String> object);
-
- template <typename SrcChar, typename DestChar>
- V8_INLINE static void SerializeStringUnchecked_(
- Vector<const SrcChar> src,
- IncrementalStringBuilder::NoExtend<DestChar>* dest);
-
- template <typename SrcChar, typename DestChar>
- V8_INLINE void SerializeString_(Handle<String> string);
-
- template <typename Char>
- V8_INLINE static bool DoNotEscape(Char c);
-
- V8_INLINE void NewLine();
- V8_INLINE void Indent() { indent_++; }
- V8_INLINE void Unindent() { indent_--; }
- V8_INLINE void Separator(bool first);
-
- Handle<JSReceiver> CurrentHolder(Handle<Object> value,
- Handle<Object> inital_holder);
-
- Result StackPush(Handle<Object> object);
- void StackPop();
-
- Factory* factory() { return isolate_->factory(); }
-
- Isolate* isolate_;
- IncrementalStringBuilder builder_;
- Handle<String> tojson_string_;
- Handle<JSArray> stack_;
- Handle<FixedArray> property_list_;
- Handle<JSReceiver> replacer_function_;
- uc16* gap_;
- int indent_;
-
- static const int kJsonEscapeTableEntrySize = 8;
- static const char* const JsonEscapeTable;
-};
-
+V8_WARN_UNUSED_RESULT MaybeHandle<Object> JsonStringify(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> replacer,
+ Handle<Object> gap);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/keys.cc b/deps/v8/src/keys.cc
index 689f4ac3df..e92902cfb5 100644
--- a/deps/v8/src/keys.cc
+++ b/deps/v8/src/keys.cc
@@ -5,7 +5,8 @@
#include "src/keys.h"
#include "src/api-arguments-inl.h"
-#include "src/elements.h"
+#include "src/elements-inl.h"
+#include "src/handles-inl.h"
#include "src/heap/factory.h"
#include "src/identity-map.h"
#include "src/isolate-inl.h"
@@ -60,6 +61,10 @@ Handle<FixedArray> KeyAccumulator::GetKeys(GetKeysConversion convert) {
return result;
}
+Handle<OrderedHashSet> KeyAccumulator::keys() {
+ return Handle<OrderedHashSet>::cast(keys_);
+}
+
void KeyAccumulator::AddKey(Object* key, AddKeyConversion convert) {
AddKey(handle(key, isolate_), convert);
}
diff --git a/deps/v8/src/keys.h b/deps/v8/src/keys.h
index 5abbaac5cd..c8db24a217 100644
--- a/deps/v8/src/keys.h
+++ b/deps/v8/src/keys.h
@@ -12,6 +12,8 @@
namespace v8 {
namespace internal {
+class JSProxy;
+
enum AddKeyConversion { DO_NOT_CONVERT, CONVERT_TO_ARRAY_INDEX };
// This is a helper class for JSReceiver::GetKeys which collects and sorts keys.
@@ -101,7 +103,7 @@ class KeyAccumulator final BASE_EMBEDDED {
Handle<FixedArray> keys);
bool IsShadowed(Handle<Object> key);
bool HasShadowingKeys();
- Handle<OrderedHashSet> keys() { return Handle<OrderedHashSet>::cast(keys_); }
+ Handle<OrderedHashSet> keys();
Isolate* isolate_;
// keys_ is either an Handle<OrderedHashSet> or in the case of own JSProxy
diff --git a/deps/v8/src/label.h b/deps/v8/src/label.h
index cf81e6c303..489f93d76b 100644
--- a/deps/v8/src/label.h
+++ b/deps/v8/src/label.h
@@ -32,8 +32,8 @@ class Label {
// In debug builds, the old Label has to be cleared in order to avoid a DCHECK
// failure in it's destructor.
#ifdef DEBUG
- Label(Label&& other) { *this = std::move(other); }
- Label& operator=(Label&& other) {
+ Label(Label&& other) V8_NOEXCEPT { *this = std::move(other); }
+ Label& operator=(Label&& other) V8_NOEXCEPT {
pos_ = other.pos_;
near_link_pos_ = other.near_link_pos_;
other.Unuse();
@@ -41,8 +41,8 @@ class Label {
return *this;
}
#else
- Label(Label&&) = default;
- Label& operator=(Label&&) = default;
+ Label(Label&&) V8_NOEXCEPT = default;
+ Label& operator=(Label&&) V8_NOEXCEPT = default;
#endif
#endif
diff --git a/deps/v8/src/layout-descriptor-inl.h b/deps/v8/src/layout-descriptor-inl.h
index e37b4a2edf..ebed59ef2c 100644
--- a/deps/v8/src/layout-descriptor-inl.h
+++ b/deps/v8/src/layout-descriptor-inl.h
@@ -205,6 +205,17 @@ LayoutDescriptor* LayoutDescriptor::Initialize(
return layout_descriptor;
}
+int LayoutDescriptor::number_of_layout_words() {
+ return length() / kUInt32Size;
+}
+
+uint32_t LayoutDescriptor::get_layout_word(int index) const {
+ return get_uint32(index);
+}
+
+void LayoutDescriptor::set_layout_word(int index, uint32_t value) {
+ set_uint32(index, value);
+}
// LayoutDescriptorHelper is a helper class for querying whether inobject
// property at offset is Double or not.
@@ -235,6 +246,7 @@ bool LayoutDescriptorHelper::IsTagged(int offset_in_bytes) {
return layout_descriptor_->IsTagged(field_index);
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/layout-descriptor.cc b/deps/v8/src/layout-descriptor.cc
index 684b7d5cd6..766b7d81c2 100644
--- a/deps/v8/src/layout-descriptor.cc
+++ b/deps/v8/src/layout-descriptor.cc
@@ -243,7 +243,7 @@ LayoutDescriptor* LayoutDescriptor::Trim(Heap* heap, Map* map,
CalculateCapacity(map, descriptors, num_descriptors);
// It must not become fast-mode descriptor here, because otherwise it has to
// be fast pointer layout descriptor already but it's is slow mode now.
- DCHECK_LT(kSmiValueSize, layout_descriptor_length);
+ DCHECK_LT(kBitsInSmiLayout, layout_descriptor_length);
// Trim, clean and reinitialize this slow-mode layout descriptor.
int new_backing_store_length =
diff --git a/deps/v8/src/layout-descriptor.h b/deps/v8/src/layout-descriptor.h
index 0e75096197..54c8ff09bd 100644
--- a/deps/v8/src/layout-descriptor.h
+++ b/deps/v8/src/layout-descriptor.h
@@ -101,9 +101,10 @@ class LayoutDescriptor : public ByteArray {
SmiValuesAre32Bits() ? 32 : kSmiValueSize - 1;
static const int kBitsPerLayoutWord = 32;
- int number_of_layout_words() { return length() / kUInt32Size; }
- uint32_t get_layout_word(int index) const { return get_uint32(index); }
- void set_layout_word(int index, uint32_t value) { set_uint32(index, value); }
+
+ V8_INLINE int number_of_layout_words();
+ V8_INLINE uint32_t get_layout_word(int index) const;
+ V8_INLINE void set_layout_word(int index, uint32_t value);
V8_INLINE static Handle<LayoutDescriptor> New(Isolate* isolate, int length);
V8_INLINE static LayoutDescriptor* FromSmi(Smi* smi);
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index ce204807aa..3c29bd7eaa 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -61,9 +61,6 @@ bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate,
behavior);
}
-void EnsureEventLoopInitialized(v8::Platform* platform, v8::Isolate* isolate) {
-}
-
void RunIdleTasks(v8::Platform* platform, v8::Isolate* isolate,
double idle_time_in_seconds) {
static_cast<DefaultPlatform*>(platform)->RunIdleTasks(isolate,
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
index e0a6a1234c..daaebc291a 100644
--- a/deps/v8/src/libplatform/tracing/tracing-controller.cc
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include <stdio.h>
+#include <stdlib.h>
#include <string.h>
#include "include/libplatform/v8-tracing.h"
@@ -41,7 +42,20 @@ v8::base::AtomicWord g_category_index = g_num_builtin_categories;
TracingController::TracingController() {}
-TracingController::~TracingController() { StopTracing(); }
+TracingController::~TracingController() {
+ StopTracing();
+
+ {
+ // Free memory for category group names allocated via strdup.
+ base::LockGuard<base::Mutex> lock(mutex_.get());
+ for (size_t i = g_category_index - 1; i >= g_num_builtin_categories; --i) {
+ const char* group = g_category_groups[i];
+ g_category_groups[i] = nullptr;
+ free(const_cast<char*>(group));
+ }
+ g_category_index = g_num_builtin_categories;
+ }
+}
void TracingController::Initialize(TraceBuffer* trace_buffer) {
trace_buffer_.reset(trace_buffer);
@@ -63,15 +77,13 @@ uint64_t TracingController::AddTraceEvent(
const uint64_t* arg_values,
std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
unsigned int flags) {
- uint64_t handle = 0;
- if (mode_ != DISABLED) {
- TraceObject* trace_object = trace_buffer_->AddTraceEvent(&handle);
- if (trace_object) {
- trace_object->Initialize(
- phase, category_enabled_flag, name, scope, id, bind_id, num_args,
- arg_names, arg_types, arg_values, arg_convertables, flags,
- CurrentTimestampMicroseconds(), CurrentCpuTimestampMicroseconds());
- }
+ uint64_t handle;
+ TraceObject* trace_object = trace_buffer_->AddTraceEvent(&handle);
+ if (trace_object) {
+ trace_object->Initialize(
+ phase, category_enabled_flag, name, scope, id, bind_id, num_args,
+ arg_names, arg_types, arg_values, arg_convertables, flags,
+ CurrentTimestampMicroseconds(), CurrentCpuTimestampMicroseconds());
}
return handle;
}
@@ -83,15 +95,13 @@ uint64_t TracingController::AddTraceEventWithTimestamp(
const uint64_t* arg_values,
std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
unsigned int flags, int64_t timestamp) {
- uint64_t handle = 0;
- if (mode_ != DISABLED) {
- TraceObject* trace_object = trace_buffer_->AddTraceEvent(&handle);
- if (trace_object) {
- trace_object->Initialize(phase, category_enabled_flag, name, scope, id,
- bind_id, num_args, arg_names, arg_types,
- arg_values, arg_convertables, flags, timestamp,
- CurrentCpuTimestampMicroseconds());
- }
+ uint64_t handle;
+ TraceObject* trace_object = trace_buffer_->AddTraceEvent(&handle);
+ if (trace_object) {
+ trace_object->Initialize(phase, category_enabled_flag, name, scope, id,
+ bind_id, num_args, arg_names, arg_types,
+ arg_values, arg_convertables, flags, timestamp,
+ CurrentCpuTimestampMicroseconds());
}
return handle;
}
@@ -189,17 +199,21 @@ const uint8_t* TracingController::GetCategoryGroupEnabledInternal(
DCHECK(!strchr(category_group, '"'));
// The g_category_groups is append only, avoid using a lock for the fast path.
- size_t current_category_index = v8::base::Acquire_Load(&g_category_index);
+ size_t category_index = base::Acquire_Load(&g_category_index);
// Search for pre-existing category group.
- for (size_t i = 0; i < current_category_index; ++i) {
+ for (size_t i = 0; i < category_index; ++i) {
if (strcmp(g_category_groups[i], category_group) == 0) {
return &g_category_group_enabled[i];
}
}
+ // Slow path. Grab the lock.
+ base::LockGuard<base::Mutex> lock(mutex_.get());
+
+ // Check the list again with lock in hand.
unsigned char* category_group_enabled = nullptr;
- size_t category_index = base::Acquire_Load(&g_category_index);
+ category_index = base::Acquire_Load(&g_category_index);
for (size_t i = 0; i < category_index; ++i) {
if (strcmp(g_category_groups[i], category_group) == 0) {
return &g_category_group_enabled[i];
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index 492606475e..f374ccddaf 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -714,7 +714,8 @@ void Sampler::DoSample() {
zx_handle_t profiled_thread = platform_data()->profiled_thread();
if (profiled_thread == ZX_HANDLE_INVALID) return;
- if (zx_task_suspend(profiled_thread) != ZX_OK) return;
+ zx_handle_t suspend_token = ZX_HANDLE_INVALID;
+ if (zx_task_suspend_token(profiled_thread, &suspend_token) != ZX_OK) return;
// Wait for the target thread to become suspended, or to exit.
// TODO(wez): There is currently no suspension count for threads, so there
@@ -726,7 +727,7 @@ void Sampler::DoSample() {
profiled_thread, ZX_THREAD_SUSPENDED | ZX_THREAD_TERMINATED,
zx_deadline_after(ZX_MSEC(100)), &signals);
if (suspended != ZX_OK || (signals & ZX_THREAD_SUSPENDED) == 0) {
- zx_task_resume(profiled_thread, 0);
+ zx_handle_close(suspend_token);
return;
}
@@ -747,7 +748,7 @@ void Sampler::DoSample() {
SampleStack(state);
}
- zx_task_resume(profiled_thread, 0);
+ zx_handle_close(suspend_token);
}
// TODO(wez): Remove this once the Fuchsia SDK has rolled.
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index c5551fcac1..4cdb854731 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -9,6 +9,7 @@
#include "src/objects-inl.h"
#include "src/string-stream.h"
#include "src/utils.h"
+#include "src/vector.h"
#include "src/version.h"
namespace v8 {
@@ -87,22 +88,71 @@ Log::MessageBuilder::MessageBuilder(Log* log)
DCHECK_NOT_NULL(log_->format_buffer_);
}
+void Log::MessageBuilder::AppendString(String* str,
+ base::Optional<int> length_limit) {
+ if (str == nullptr) return;
+
+ DisallowHeapAllocation no_gc; // Ensure string stays valid.
+ int length = str->length();
+ if (length_limit) length = std::min(length, *length_limit);
+ for (int i = 0; i < length; i++) {
+ uint16_t c = str->Get(i);
+ if (c <= 0xFF) {
+ AppendCharacter(static_cast<char>(c));
+ } else {
+ // Escape non-ascii characters.
+ AppendRawFormatString("\\u%04x", c & 0xFFFF);
+ }
+ }
+}
+
+void Log::MessageBuilder::AppendString(Vector<const char> str) {
+ for (auto i = str.begin(); i < str.end(); i++) AppendCharacter(*i);
+}
+
+void Log::MessageBuilder::AppendString(const char* str) {
+ if (str == nullptr) return;
+ AppendString(str, strlen(str));
+}
+
+void Log::MessageBuilder::AppendString(const char* str, size_t length) {
+ if (str == nullptr) return;
+
+ for (size_t i = 0; i < length; i++) {
+ DCHECK_NE(str[i], '\0');
+ AppendCharacter(str[i]);
+ }
+}
-void Log::MessageBuilder::Append(const char* format, ...) {
+void Log::MessageBuilder::AppendFormatString(const char* format, ...) {
va_list args;
va_start(args, format);
- AppendVA(format, args);
+ const int length = FormatStringIntoBuffer(format, args);
va_end(args);
+ for (int i = 0; i < length; i++) {
+ DCHECK_NE(log_->format_buffer_[i], '\0');
+ AppendCharacter(log_->format_buffer_[i]);
+ }
}
-
-void Log::MessageBuilder::AppendVA(const char* format, va_list args) {
- Vector<char> buf(log_->format_buffer_, Log::kMessageBufferSize);
- int length = v8::internal::VSNPrintF(buf, format, args);
- // {length} is -1 if output was truncated.
- if (length == -1) length = Log::kMessageBufferSize;
- DCHECK_LE(length, Log::kMessageBufferSize);
- AppendStringPart(log_->format_buffer_, length);
+void Log::MessageBuilder::AppendCharacter(char c) {
+ if (c >= 32 && c <= 126) {
+ if (c == ',') {
+ // Escape commas to avoid adding column separators.
+ AppendRawFormatString("\\x2C");
+ } else if (c == '\\') {
+ AppendRawFormatString("\\\\");
+ } else {
+ // Safe, printable ascii character.
+ AppendRawCharacter(c);
+ }
+ } else if (c == '\n') {
+ // Escape newlines to avoid adding row separators.
+ AppendRawFormatString("\\n");
+ } else {
+ // Escape non-printable characters.
+ AppendRawFormatString("\\x%02x", c & 0xFF);
+ }
}
void Log::MessageBuilder::AppendSymbolName(Symbol* symbol) {
@@ -111,15 +161,17 @@ void Log::MessageBuilder::AppendSymbolName(Symbol* symbol) {
os << "symbol(";
if (!symbol->name()->IsUndefined()) {
os << "\"";
- AppendDetailed(String::cast(symbol->name()), false);
+ AppendSymbolNameDetails(String::cast(symbol->name()), false);
os << "\" ";
}
os << "hash " << std::hex << symbol->Hash() << std::dec << ")";
}
-void Log::MessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
+void Log::MessageBuilder::AppendSymbolNameDetails(String* str,
+ bool show_impl_info) {
if (str == nullptr) return;
- DisallowHeapAllocation no_gc; // Ensure string stay valid.
+
+ DisallowHeapAllocation no_gc; // Ensure string stays valid.
OFStream& os = log_->os_;
int limit = str->length();
if (limit > 0x1000) limit = 0x1000;
@@ -129,62 +181,32 @@ void Log::MessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
if (StringShape(str).IsInternalized()) os << '#';
os << ':' << str->length() << ':';
}
- AppendStringPart(str, limit);
-}
-
-void Log::MessageBuilder::AppendString(String* str) {
- if (str == nullptr) return;
- int len = str->length();
- AppendStringPart(str, len);
-}
-
-void Log::MessageBuilder::AppendString(const char* string) {
- if (string == nullptr) return;
- for (const char* p = string; *p != '\0'; p++) {
- this->AppendCharacter(*p);
- }
+ AppendString(str, limit);
}
-void Log::MessageBuilder::AppendStringPart(String* str, int len) {
- DCHECK_LE(len, str->length());
- DisallowHeapAllocation no_gc; // Ensure string stay valid.
- // TODO(cbruni): unify escaping.
- for (int i = 0; i < len; i++) {
- uc32 c = str->Get(i);
- if (c <= 0xFF) {
- AppendCharacter(static_cast<char>(c));
- } else {
- // Escape any non-ascii range characters.
- Append("\\u%04x", c);
- }
- }
+int Log::MessageBuilder::FormatStringIntoBuffer(const char* format,
+ va_list args) {
+ Vector<char> buf(log_->format_buffer_, Log::kMessageBufferSize);
+ int length = v8::internal::VSNPrintF(buf, format, args);
+ // |length| is -1 if output was truncated.
+ if (length == -1) length = Log::kMessageBufferSize;
+ DCHECK_LE(length, Log::kMessageBufferSize);
+ DCHECK_GE(length, 0);
+ return length;
}
-void Log::MessageBuilder::AppendStringPart(const char* str, size_t len) {
- for (size_t i = 0; i < len; i++) {
- DCHECK_NE(str[i], '\0');
- this->AppendCharacter(str[i]);
+void Log::MessageBuilder::AppendRawFormatString(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ const int length = FormatStringIntoBuffer(format, args);
+ va_end(args);
+ for (int i = 0; i < length; i++) {
+ DCHECK_NE(log_->format_buffer_[i], '\0');
+ AppendRawCharacter(log_->format_buffer_[i]);
}
}
-void Log::MessageBuilder::AppendCharacter(char c) {
- OFStream& os = log_->os_;
- // A log entry (separate by commas) cannot contain commas or line-breaks.
- if (c >= 32 && c <= 126) {
- if (c == ',') {
- // Escape commas (log field separator) directly.
- os << "\\x2C";
- } else {
- // Directly append any printable ascii character.
- os << c;
- }
- } else if (c == '\n') {
- os << "\\n";
- } else {
- // Escape any non-printable characters.
- Append("\\x%02x", c);
- }
-}
+void Log::MessageBuilder::AppendRawCharacter(char c) { log_->os_ << c; }
void Log::MessageBuilder::WriteToLogFile() { log_->os_ << std::endl; }
@@ -235,7 +257,8 @@ Log::MessageBuilder& Log::MessageBuilder::operator<<<Name*>(Name* name) {
template <>
Log::MessageBuilder& Log::MessageBuilder::operator<<<LogSeparator>(
LogSeparator separator) {
- log_->os_ << ',';
+ // Skip escaping to create a new column.
+ this->AppendRawCharacter(',');
return *this;
}
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index 9f530293d4..bd56aaf418 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -11,6 +11,7 @@
#include "src/allocation.h"
#include "src/base/compiler-specific.h"
+#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/flags.h"
#include "src/ostreams.h"
@@ -19,6 +20,8 @@ namespace v8 {
namespace internal {
class Logger;
+template <typename T>
+class Vector;
enum class LogSeparator { kSeparator };
@@ -53,8 +56,8 @@ class Log {
static const char* const kLogToTemporaryFile;
static const char* const kLogToConsole;
- // Utility class for formatting log messages. It fills the message into the
- // static buffer in Log.
+ // Utility class for formatting log messages. It escapes the given messages
+ // and then appends them to the static buffer in Log.
class MessageBuilder BASE_EMBEDDED {
public:
// Create a message builder starting from position 0.
@@ -62,26 +65,15 @@ class Log {
explicit MessageBuilder(Log* log);
~MessageBuilder() { }
- // Append string data to the log message.
- void PRINTF_FORMAT(2, 3) Append(const char* format, ...);
-
- // Append string data to the log message.
- void PRINTF_FORMAT(2, 0) AppendVA(const char* format, va_list args);
-
+ void AppendString(String* str,
+ base::Optional<int> length_limit = base::nullopt);
+ void AppendString(Vector<const char> str);
+ void AppendString(const char* str);
+ void AppendString(const char* str, size_t length);
+ void PRINTF_FORMAT(2, 3) AppendFormatString(const char* format, ...);
+ void AppendCharacter(char c);
void AppendSymbolName(Symbol* symbol);
- void AppendDetailed(String* str, bool show_impl_info);
-
- // Append and escape a full string.
- void AppendString(String* source);
- void AppendString(const char* string);
-
- // Append and escpae a portion of a string.
- void AppendStringPart(String* source, int len);
- void AppendStringPart(const char* str, size_t len);
-
- void AppendCharacter(const char character);
-
// Delegate insertion to the underlying {log_}.
// All appended strings are escaped to maintain one-line log entries.
template <typename T>
@@ -94,6 +86,16 @@ class Log {
void WriteToLogFile();
private:
+ // Prints the format string into |log_->format_buffer_|. Returns the length
+ // of the result, or kMessageBufferSize if it was truncated.
+ int PRINTF_FORMAT(2, 0)
+ FormatStringIntoBuffer(const char* format, va_list args);
+
+ void AppendSymbolNameDetails(String* str, bool show_impl_info);
+
+ void PRINTF_FORMAT(2, 3) AppendRawFormatString(const char* format, ...);
+ void AppendRawCharacter(const char character);
+
Log* log_;
base::LockGuard<base::Mutex> lock_guard_;
};
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index f19897b27b..a3f484fd59 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -8,7 +8,7 @@
#include <memory>
#include <sstream>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/bailout-reason.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
@@ -127,21 +127,10 @@ class CodeEventLogger::NameBuffer {
void AppendString(String* str) {
if (str == nullptr) return;
- int uc16_length = Min(str->length(), kUtf16BufferSize);
- String::WriteToFlat(str, utf16_buffer, 0, uc16_length);
- int previous = unibrow::Utf16::kNoPreviousCharacter;
- for (int i = 0; i < uc16_length && utf8_pos_ < kUtf8BufferSize; ++i) {
- uc16 c = utf16_buffer[i];
- if (c <= unibrow::Utf8::kMaxOneByteChar) {
- utf8_buffer_[utf8_pos_++] = static_cast<char>(c);
- } else {
- int char_length = unibrow::Utf8::Length(c, previous);
- if (utf8_pos_ + char_length > kUtf8BufferSize) break;
- unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c, previous);
- utf8_pos_ += char_length;
- }
- previous = c;
- }
+ int length = 0;
+ std::unique_ptr<char[]> c_str =
+ str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, &length);
+ AppendBytes(c_str.get(), length);
}
void AppendBytes(const char* bytes, int size) {
@@ -188,7 +177,6 @@ class CodeEventLogger::NameBuffer {
int utf8_pos_;
char utf8_buffer_[kUtf8BufferSize];
- uc16 utf16_buffer[kUtf16BufferSize];
};
CodeEventLogger::CodeEventLogger(Isolate* isolate)
@@ -1023,7 +1011,7 @@ void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
if (!log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
msg << name << kNext;
- msg.Append("%" V8PRIdPTR, value);
+ msg.AppendFormatString("%" V8PRIdPTR, value);
msg.WriteToLogFile();
}
@@ -1283,7 +1271,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
if (name.is_empty()) {
msg << "<unknown wasm>";
} else {
- msg.AppendStringPart(name.start(), name.length());
+ msg.AppendString(name);
}
// We have to add two extra fields that allow the tick processor to group
// events for the same wasm function, even if it gets compiled again. For
@@ -1499,7 +1487,8 @@ void Logger::ResourceEvent(const char* name, const char* tag) {
if (base::OS::GetUserTime(&sec, &usec) != -1) {
msg << sec << kNext << usec << kNext;
}
- msg.Append("%.0f", V8::GetCurrentPlatform()->CurrentClockTimeMillis());
+ msg.AppendFormatString("%.0f",
+ V8::GetCurrentPlatform()->CurrentClockTimeMillis());
msg.WriteToLogFile();
}
@@ -1545,7 +1534,7 @@ void Logger::FunctionEvent(const char* reason, int script_id, double time_delta,
AppendFunctionMessage(msg, reason, script_id, time_delta, start_position,
end_position, &timer_);
if (function_name_length > 0) {
- msg.AppendStringPart(function_name, function_name_length);
+ msg.AppendString(function_name, function_name_length);
}
msg.WriteToLogFile();
}
@@ -2027,17 +2016,20 @@ sampler::Sampler* Logger::sampler() {
return ticker_;
}
-
-FILE* Logger::TearDown() {
- if (!is_initialized_) return nullptr;
- is_initialized_ = false;
-
- // Stop the profiler before closing the file.
+void Logger::StopProfilerThread() {
if (profiler_ != nullptr) {
profiler_->Disengage();
delete profiler_;
profiler_ = nullptr;
}
+}
+
+FILE* Logger::TearDown() {
+ if (!is_initialized_) return nullptr;
+ is_initialized_ = false;
+
+ // Stop the profiler thread before closing the file.
+ StopProfilerThread();
delete ticker_;
ticker_ = nullptr;
@@ -2239,9 +2231,6 @@ void ExistingCodeLogger::LogExistingFunction(
#endif
CALL_CODE_EVENT_HANDLER(CallbackEvent(shared->DebugName(), entry_point))
}
- } else {
- CALL_CODE_EVENT_HANDLER(CodeCreateEvent(
- tag, *code, *shared, ReadOnlyRoots(isolate_).empty_string()))
}
}
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 3bc54a5926..5ce7418364 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -144,6 +144,8 @@ class Logger : public CodeEventListener {
sampler::Sampler* sampler();
+ void StopProfilerThread();
+
// Frees resources acquired in SetUp.
// When a temporary file is used for the log, returns its stream descriptor,
// leaving the file open.
diff --git a/deps/v8/src/lookup-inl.h b/deps/v8/src/lookup-inl.h
new file mode 100644
index 0000000000..6e95d06b0f
--- /dev/null
+++ b/deps/v8/src/lookup-inl.h
@@ -0,0 +1,144 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LOOKUP_INL_H_
+#define V8_LOOKUP_INL_H_
+
+#include "src/lookup.h"
+
+#include "src/handles-inl.h"
+#include "src/heap/factory-inl.h"
+#include "src/objects-inl.h"
+#include "src/objects/api-callbacks.h"
+#include "src/objects/name-inl.h"
+#include "src/objects/map-inl.h"
+
+namespace v8 {
+namespace internal {
+
+LookupIterator::LookupIterator(Handle<Object> receiver, Handle<Name> name,
+ Handle<JSReceiver> holder,
+ Configuration configuration)
+ : LookupIterator(holder->GetIsolate(), receiver, name, holder,
+ configuration) {}
+
+LookupIterator::LookupIterator(Isolate* isolate, Handle<Object> receiver,
+ Handle<Name> name, Handle<JSReceiver> holder,
+ Configuration configuration)
+ : configuration_(ComputeConfiguration(configuration, name)),
+ interceptor_state_(InterceptorState::kUninitialized),
+ property_details_(PropertyDetails::Empty()),
+ isolate_(isolate),
+ name_(isolate_->factory()->InternalizeName(name)),
+ receiver_(receiver),
+ initial_holder_(holder),
+ // kMaxUInt32 isn't a valid index.
+ index_(kMaxUInt32),
+ number_(static_cast<uint32_t>(DescriptorArray::kNotFound)) {
+#ifdef DEBUG
+ uint32_t index; // Assert that the name is not an array index.
+ DCHECK(!name->AsArrayIndex(&index));
+#endif // DEBUG
+ Start<false>();
+}
+
+LookupIterator LookupIterator::PropertyOrElement(
+ Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
+ Handle<JSReceiver> holder, Configuration configuration) {
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ LookupIterator it =
+ LookupIterator(isolate, receiver, index, holder, configuration);
+ it.name_ = name;
+ return it;
+ }
+ return LookupIterator(receiver, name, holder, configuration);
+}
+
+LookupIterator LookupIterator::PropertyOrElement(
+ Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
+ Configuration configuration) {
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ LookupIterator it = LookupIterator(isolate, receiver, index, configuration);
+ it.name_ = name;
+ return it;
+ }
+ return LookupIterator(isolate, receiver, name, configuration);
+}
+
+Handle<Name> LookupIterator::GetName() {
+ if (name_.is_null()) {
+ DCHECK(IsElement());
+ name_ = factory()->Uint32ToString(index_);
+ }
+ return name_;
+}
+
+bool LookupIterator::is_dictionary_holder() const {
+ return !holder_->HasFastProperties();
+}
+
+bool LookupIterator::ExtendingNonExtensible(Handle<JSReceiver> receiver) {
+ DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
+ return !receiver->map()->is_extensible() &&
+ (IsElement() || !name_->IsPrivate());
+}
+
+bool LookupIterator::IsCacheableTransition() {
+ DCHECK_EQ(TRANSITION, state_);
+ return transition_->IsPropertyCell() ||
+ (transition_map()->is_dictionary_map() &&
+ !GetStoreTarget<JSReceiver>()->HasFastProperties()) ||
+ transition_map()->GetBackPointer()->IsMap();
+}
+
+void LookupIterator::UpdateProtector() {
+ if (IsElement()) return;
+ // This list must be kept in sync with
+ // CodeStubAssembler::CheckForAssociatedProtector!
+ ReadOnlyRoots roots(heap());
+ if (*name_ == roots.is_concat_spreadable_symbol() ||
+ *name_ == roots.constructor_string() || *name_ == roots.next_string() ||
+ *name_ == roots.species_symbol() || *name_ == roots.iterator_symbol() ||
+ *name_ == roots.resolve_string() || *name_ == roots.then_string()) {
+ InternalUpdateProtector();
+ }
+}
+
+LookupIterator::Configuration LookupIterator::ComputeConfiguration(
+ Configuration configuration, Handle<Name> name) {
+ return name->IsPrivate() ? OWN_SKIP_INTERCEPTOR : configuration;
+}
+
+Handle<JSReceiver> LookupIterator::GetRoot(Isolate* isolate,
+ Handle<Object> receiver,
+ uint32_t index) {
+ if (receiver->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver);
+ return GetRootForNonJSReceiver(isolate, receiver, index);
+}
+
+template <class T>
+Handle<T> LookupIterator::GetStoreTarget() const {
+ DCHECK(receiver_->IsJSReceiver());
+ if (receiver_->IsJSGlobalProxy()) {
+ Map* map = JSGlobalProxy::cast(*receiver_)->map();
+ if (map->has_hidden_prototype()) {
+ return handle(JSGlobalObject::cast(map->prototype()), isolate_);
+ }
+ }
+ return Handle<T>::cast(receiver_);
+}
+inline Handle<InterceptorInfo> LookupIterator::GetInterceptor() const {
+ DCHECK_EQ(INTERCEPTOR, state_);
+ InterceptorInfo* result =
+ IsElement() ? GetInterceptor<true>(JSObject::cast(*holder_))
+ : GetInterceptor<false>(JSObject::cast(*holder_));
+ return handle(result, isolate_);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_LOOKUP_INL_H_
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 32261c91d1..fb3d1263d7 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -256,7 +256,7 @@ namespace {
bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, JSReceiver* holder) {
static uint32_t context_slots[] = {
-#define TYPED_ARRAY_CONTEXT_SLOTS(Type, type, TYPE, ctype, size) \
+#define TYPED_ARRAY_CONTEXT_SLOTS(Type, type, TYPE, ctype) \
Context::TYPE##_ARRAY_FUN_INDEX,
TYPED_ARRAYS(TYPED_ARRAY_CONTEXT_SLOTS)
@@ -398,7 +398,7 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
if (IsElement()) {
ElementsKind kind = holder_obj->GetElementsKind();
ElementsKind to = value->OptimalElementsKind();
- if (IsHoleyOrDictionaryElementsKind(kind)) to = GetHoleyElementsKind(to);
+ if (IsHoleyElementsKind(kind)) to = GetHoleyElementsKind(to);
to = GetMoreGeneralElementsKind(kind, to);
if (kind != to) {
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index 36ba78be27..c1d4dd4460 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -49,31 +49,13 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
: LookupIterator(isolate, receiver, name, GetRoot(isolate, receiver),
configuration) {}
- LookupIterator(Handle<Object> receiver, Handle<Name> name,
- Handle<JSReceiver> holder,
- Configuration configuration = DEFAULT)
- : LookupIterator(holder->GetIsolate(), receiver, name, holder,
- configuration) {}
+ inline LookupIterator(Handle<Object> receiver, Handle<Name> name,
+ Handle<JSReceiver> holder,
+ Configuration configuration = DEFAULT);
- LookupIterator(Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
- Handle<JSReceiver> holder,
- Configuration configuration = DEFAULT)
- : configuration_(ComputeConfiguration(configuration, name)),
- interceptor_state_(InterceptorState::kUninitialized),
- property_details_(PropertyDetails::Empty()),
- isolate_(isolate),
- name_(isolate_->factory()->InternalizeName(name)),
- receiver_(receiver),
- initial_holder_(holder),
- // kMaxUInt32 isn't a valid index.
- index_(kMaxUInt32),
- number_(static_cast<uint32_t>(DescriptorArray::kNotFound)) {
-#ifdef DEBUG
- uint32_t index; // Assert that the name is not an array index.
- DCHECK(!name->AsArrayIndex(&index));
-#endif // DEBUG
- Start<false>();
- }
+ inline LookupIterator(Isolate* isolate, Handle<Object> receiver,
+ Handle<Name> name, Handle<JSReceiver> holder,
+ Configuration configuration = DEFAULT);
LookupIterator(Isolate* isolate, Handle<Object> receiver, uint32_t index,
Configuration configuration = DEFAULT)
@@ -96,31 +78,13 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
Start<true>();
}
- static LookupIterator PropertyOrElement(
+ static inline LookupIterator PropertyOrElement(
Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
- Configuration configuration = DEFAULT) {
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- LookupIterator it =
- LookupIterator(isolate, receiver, index, configuration);
- it.name_ = name;
- return it;
- }
- return LookupIterator(isolate, receiver, name, configuration);
- }
+ Configuration configuration = DEFAULT);
- static LookupIterator PropertyOrElement(
+ static inline LookupIterator PropertyOrElement(
Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
- Handle<JSReceiver> holder, Configuration configuration = DEFAULT) {
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- LookupIterator it =
- LookupIterator(isolate, receiver, index, holder, configuration);
- it.name_ = name;
- return it;
- }
- return LookupIterator(receiver, name, holder, configuration);
- }
+ Handle<JSReceiver> holder, Configuration configuration = DEFAULT);
static LookupIterator PropertyOrElement(
Isolate* isolate, Handle<Object> receiver, Handle<Object> key,
@@ -147,13 +111,7 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
DCHECK(!IsElement());
return name_;
}
- Handle<Name> GetName() {
- if (name_.is_null()) {
- DCHECK(IsElement());
- name_ = factory()->Uint32ToString(index_);
- }
- return name_;
- }
+ inline Handle<Name> GetName();
uint32_t index() const { return index_; }
bool IsElement() const { return index_ != kMaxUInt32; }
@@ -170,17 +128,8 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
Handle<Object> GetReceiver() const { return receiver_; }
template <class T>
- Handle<T> GetStoreTarget() const {
- DCHECK(receiver_->IsJSReceiver());
- if (receiver_->IsJSGlobalProxy()) {
- Map* map = JSGlobalProxy::cast(*receiver_)->map();
- if (map->has_hidden_prototype()) {
- return handle(JSGlobalObject::cast(map->prototype()), isolate_);
- }
- }
- return Handle<T>::cast(receiver_);
- }
- bool is_dictionary_holder() const { return !holder_->HasFastProperties(); }
+ inline Handle<T> GetStoreTarget() const;
+ inline bool is_dictionary_holder() const;
Handle<Map> transition_map() const {
DCHECK_EQ(TRANSITION, state_);
return Handle<Map>::cast(transition_);
@@ -206,23 +155,13 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
bool HasAccess() const;
/* PROPERTY */
- bool ExtendingNonExtensible(Handle<JSReceiver> receiver) {
- DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
- return !receiver->map()->is_extensible() &&
- (IsElement() || !name_->IsPrivate());
- }
+ inline bool ExtendingNonExtensible(Handle<JSReceiver> receiver);
void PrepareForDataProperty(Handle<Object> value);
void PrepareTransitionToDataProperty(Handle<JSReceiver> receiver,
Handle<Object> value,
PropertyAttributes attributes,
Object::StoreFromKeyed store_mode);
- bool IsCacheableTransition() {
- DCHECK_EQ(TRANSITION, state_);
- return transition_->IsPropertyCell() ||
- (transition_map()->is_dictionary_map() &&
- !GetStoreTarget<JSReceiver>()->HasFastProperties()) ||
- transition_map()->GetBackPointer()->IsMap();
- }
+ inline bool IsCacheableTransition();
void ApplyTransitionToDataProperty(Handle<JSReceiver> receiver);
void ReconfigureDataProperty(Handle<Object> value,
PropertyAttributes attributes);
@@ -255,28 +194,11 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
int GetConstantIndex() const;
Handle<PropertyCell> GetPropertyCell() const;
Handle<Object> GetAccessors() const;
- inline Handle<InterceptorInfo> GetInterceptor() const {
- DCHECK_EQ(INTERCEPTOR, state_);
- InterceptorInfo* result =
- IsElement() ? GetInterceptor<true>(JSObject::cast(*holder_))
- : GetInterceptor<false>(JSObject::cast(*holder_));
- return handle(result, isolate_);
- }
+ inline Handle<InterceptorInfo> GetInterceptor() const;
Handle<InterceptorInfo> GetInterceptorForFailedAccessCheck() const;
Handle<Object> GetDataValue() const;
void WriteDataValue(Handle<Object> value, bool initializing_store);
- inline void UpdateProtector() {
- if (IsElement()) return;
- // This list must be kept in sync with
- // CodeStubAssembler::CheckForAssociatedProtector!
- ReadOnlyRoots roots(heap());
- if (*name_ == roots.is_concat_spreadable_symbol() ||
- *name_ == roots.constructor_string() || *name_ == roots.next_string() ||
- *name_ == roots.species_symbol() || *name_ == roots.iterator_symbol() ||
- *name_ == roots.resolve_string() || *name_ == roots.then_string()) {
- InternalUpdateProtector();
- }
- }
+ inline void UpdateProtector();
// Lookup a 'cached' private property for an accessor.
// If not found returns false and leaves the LookupIterator unmodified.
@@ -350,19 +272,14 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
return number_;
}
- static Configuration ComputeConfiguration(
- Configuration configuration, Handle<Name> name) {
- return name->IsPrivate() ? OWN_SKIP_INTERCEPTOR : configuration;
- }
+ static inline Configuration ComputeConfiguration(
+ Configuration configuration, Handle<Name> name);
static Handle<JSReceiver> GetRootForNonJSReceiver(
Isolate* isolate, Handle<Object> receiver, uint32_t index = kMaxUInt32);
- inline static Handle<JSReceiver> GetRoot(Isolate* isolate,
+ static inline Handle<JSReceiver> GetRoot(Isolate* isolate,
Handle<Object> receiver,
- uint32_t index = kMaxUInt32) {
- if (receiver->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver);
- return GetRootForNonJSReceiver(isolate, receiver, index);
- }
+ uint32_t index = kMaxUInt32);
State NotFound(JSReceiver* const holder) const;
diff --git a/deps/v8/src/map-updater.cc b/deps/v8/src/map-updater.cc
index a0ac5d3cd0..470215a310 100644
--- a/deps/v8/src/map-updater.cc
+++ b/deps/v8/src/map-updater.cc
@@ -24,6 +24,20 @@ inline bool EqualImmutableValues(Object* obj1, Object* obj2) {
} // namespace
+MapUpdater::MapUpdater(Isolate* isolate, Handle<Map> old_map)
+ : isolate_(isolate),
+ old_map_(old_map),
+ old_descriptors_(old_map->instance_descriptors(), isolate_),
+ old_nof_(old_map_->NumberOfOwnDescriptors()),
+ new_elements_kind_(old_map_->elements_kind()),
+ is_transitionable_fast_elements_kind_(
+ IsTransitionableFastElementsKind(new_elements_kind_)) {
+ // We shouldn't try to update remote objects.
+ DCHECK(!old_map->FindRootMap(isolate)
+ ->GetConstructor()
+ ->IsFunctionTemplateInfo());
+}
+
Name* MapUpdater::GetKey(int descriptor) const {
return old_descriptors_->GetKey(descriptor);
}
@@ -231,9 +245,7 @@ MapUpdater::State MapUpdater::FindRootMap() {
state_ = kEnd;
result_map_ = handle(
JSFunction::cast(root_map_->GetConstructor())->initial_map(), isolate_);
- if (from_kind != to_kind) {
- result_map_ = Map::AsElementsKind(isolate_, result_map_, to_kind);
- }
+ result_map_ = Map::AsElementsKind(isolate_, result_map_, to_kind);
DCHECK(result_map_->is_dictionary_map());
return state_;
}
@@ -293,9 +305,7 @@ MapUpdater::State MapUpdater::FindRootMap() {
}
// From here on, use the map with correct elements kind as root map.
- if (from_kind != to_kind) {
- root_map_ = Map::AsElementsKind(isolate_, root_map_, to_kind);
- }
+ root_map_ = Map::AsElementsKind(isolate_, root_map_, to_kind);
state_ = kAtRootMap;
return state_; // Not done yet.
}
@@ -508,7 +518,8 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
isolate_, instance_type, &next_constness, &next_representation,
&next_field_type);
- MaybeObjectHandle wrapped_type(Map::WrapFieldType(next_field_type));
+ MaybeObjectHandle wrapped_type(
+ Map::WrapFieldType(isolate_, next_field_type));
Descriptor d;
if (next_kind == kData) {
d = Descriptor::DataField(key, current_offset, next_attributes,
@@ -561,7 +572,8 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
!Map::IsInplaceGeneralizableField(
next_constness, next_representation, *next_field_type));
- MaybeObjectHandle wrapped_type(Map::WrapFieldType(next_field_type));
+ MaybeObjectHandle wrapped_type(
+ Map::WrapFieldType(isolate_, next_field_type));
Descriptor d;
if (next_kind == kData) {
DCHECK_IMPLIES(!FLAG_track_constant_fields,
diff --git a/deps/v8/src/map-updater.h b/deps/v8/src/map-updater.h
index 5dcb018373..52be931bdf 100644
--- a/deps/v8/src/map-updater.h
+++ b/deps/v8/src/map-updater.h
@@ -44,19 +44,7 @@ namespace internal {
// replace its transition tree with a new branch for the updated descriptors.
class MapUpdater {
public:
- MapUpdater(Isolate* isolate, Handle<Map> old_map)
- : isolate_(isolate),
- old_map_(old_map),
- old_descriptors_(old_map->instance_descriptors(), isolate_),
- old_nof_(old_map_->NumberOfOwnDescriptors()),
- new_elements_kind_(old_map_->elements_kind()),
- is_transitionable_fast_elements_kind_(
- IsTransitionableFastElementsKind(new_elements_kind_)) {
- // We shouldn't try to update remote objects.
- DCHECK(!old_map->FindRootMap(isolate)
- ->GetConstructor()
- ->IsFunctionTemplateInfo());
- }
+ MapUpdater(Isolate* isolate, Handle<Map> old_map);
// Prepares for reconfiguring of a property at |descriptor| to data field
// with given |attributes| and |representation|/|field_type| and
diff --git a/deps/v8/src/maybe-handles-inl.h b/deps/v8/src/maybe-handles-inl.h
new file mode 100644
index 0000000000..c9c8c88700
--- /dev/null
+++ b/deps/v8/src/maybe-handles-inl.h
@@ -0,0 +1,86 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAYBE_HANDLES_INL_H_
+#define V8_MAYBE_HANDLES_INL_H_
+
+#include "src/maybe-handles.h"
+
+#include "src/handles-inl.h"
+#include "src/objects/maybe-object-inl.h"
+
+namespace v8 {
+namespace internal {
+template <typename T>
+MaybeHandle<T>::MaybeHandle(T* object, Isolate* isolate)
+ : MaybeHandle(handle(object, isolate)) {}
+
+MaybeObjectHandle::MaybeObjectHandle()
+ : reference_type_(HeapObjectReferenceType::STRONG),
+ handle_(Handle<Object>::null()) {}
+
+MaybeObjectHandle::MaybeObjectHandle(MaybeObject* object, Isolate* isolate) {
+ HeapObject* heap_object;
+ DCHECK(!object->IsClearedWeakHeapObject());
+ if (object->ToWeakHeapObject(&heap_object)) {
+ handle_ = handle(heap_object, isolate);
+ reference_type_ = HeapObjectReferenceType::WEAK;
+ } else {
+ handle_ = handle(object->ToObject(), isolate);
+ reference_type_ = HeapObjectReferenceType::STRONG;
+ }
+}
+
+MaybeObjectHandle::MaybeObjectHandle(Handle<Object> object)
+ : reference_type_(HeapObjectReferenceType::STRONG), handle_(object) {}
+
+MaybeObjectHandle::MaybeObjectHandle(Object* object, Isolate* isolate)
+ : reference_type_(HeapObjectReferenceType::STRONG),
+ handle_(object, isolate) {}
+
+MaybeObjectHandle::MaybeObjectHandle(Object* object,
+ HeapObjectReferenceType reference_type,
+ Isolate* isolate)
+ : reference_type_(reference_type), handle_(handle(object, isolate)) {}
+
+MaybeObjectHandle::MaybeObjectHandle(Handle<Object> object,
+ HeapObjectReferenceType reference_type)
+ : reference_type_(reference_type), handle_(object) {}
+
+MaybeObjectHandle MaybeObjectHandle::Weak(Handle<Object> object) {
+ return MaybeObjectHandle(object, HeapObjectReferenceType::WEAK);
+}
+
+MaybeObjectHandle MaybeObjectHandle::Weak(Object* object, Isolate* isolate) {
+ return MaybeObjectHandle(object, HeapObjectReferenceType::WEAK, isolate);
+}
+
+MaybeObject* MaybeObjectHandle::operator*() const {
+ if (reference_type_ == HeapObjectReferenceType::WEAK) {
+ return HeapObjectReference::Weak(*handle_.ToHandleChecked());
+ } else {
+ return MaybeObject::FromObject(*handle_.ToHandleChecked());
+ }
+}
+
+MaybeObject* MaybeObjectHandle::operator->() const {
+ if (reference_type_ == HeapObjectReferenceType::WEAK) {
+ return HeapObjectReference::Weak(*handle_.ToHandleChecked());
+ } else {
+ return MaybeObject::FromObject(*handle_.ToHandleChecked());
+ }
+}
+
+Handle<Object> MaybeObjectHandle::object() const {
+ return handle_.ToHandleChecked();
+}
+
+inline MaybeObjectHandle handle(MaybeObject* object, Isolate* isolate) {
+ return MaybeObjectHandle(object, isolate);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAYBE_HANDLES_INL_H_
diff --git a/deps/v8/src/maybe-handles.h b/deps/v8/src/maybe-handles.h
new file mode 100644
index 0000000000..d4b639e18e
--- /dev/null
+++ b/deps/v8/src/maybe-handles.h
@@ -0,0 +1,120 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAYBE_HANDLES_H_
+#define V8_MAYBE_HANDLES_H_
+
+#include <type_traits>
+
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// A Handle can be converted into a MaybeHandle. Converting a MaybeHandle
+// into a Handle requires checking that it does not point to nullptr. This
+// ensures nullptr checks before use.
+//
+// Also note that Handles do not provide default equality comparison or hashing
+// operators on purpose. Such operators would be misleading, because intended
+// semantics is ambiguous between Handle location and object identity.
+template <typename T>
+class MaybeHandle final {
+ public:
+ V8_INLINE MaybeHandle() {}
+
+ // Constructor for handling automatic up casting from Handle.
+ // Ex. Handle<JSArray> can be passed when MaybeHandle<Object> is expected.
+ template <typename S, typename = typename std::enable_if<
+ std::is_convertible<S*, T*>::value>::type>
+ V8_INLINE MaybeHandle(Handle<S> handle)
+ : location_(reinterpret_cast<T**>(handle.location_)) {}
+
+ // Constructor for handling automatic up casting.
+ // Ex. MaybeHandle<JSArray> can be passed when Handle<Object> is expected.
+ template <typename S, typename = typename std::enable_if<
+ std::is_convertible<S*, T*>::value>::type>
+ V8_INLINE MaybeHandle(MaybeHandle<S> maybe_handle)
+ : location_(reinterpret_cast<T**>(maybe_handle.location_)) {}
+
+ V8_INLINE MaybeHandle(T* object, Isolate* isolate);
+
+ V8_INLINE void Assert() const { DCHECK_NOT_NULL(location_); }
+ V8_INLINE void Check() const { CHECK_NOT_NULL(location_); }
+
+ V8_INLINE Handle<T> ToHandleChecked() const {
+ Check();
+ return Handle<T>(location_);
+ }
+
+ // Convert to a Handle with a type that can be upcasted to.
+ template <typename S>
+ V8_INLINE bool ToHandle(Handle<S>* out) const {
+ if (location_ == nullptr) {
+ *out = Handle<T>::null();
+ return false;
+ } else {
+ *out = Handle<T>(location_);
+ return true;
+ }
+ }
+
+ // Returns the raw address where this handle is stored. This should only be
+ // used for hashing handles; do not ever try to dereference it.
+ V8_INLINE Address address() const { return bit_cast<Address>(location_); }
+
+ bool is_null() const { return location_ == nullptr; }
+
+ protected:
+ T** location_ = nullptr;
+
+ // MaybeHandles of different classes are allowed to access each
+ // other's location_.
+ template <typename>
+ friend class MaybeHandle;
+};
+
+// A handle which contains a potentially weak pointer. Keeps it alive (strongly)
+// while the MaybeObjectHandle is alive.
+class MaybeObjectHandle {
+ public:
+ inline MaybeObjectHandle();
+ inline MaybeObjectHandle(MaybeObject* object, Isolate* isolate);
+ inline MaybeObjectHandle(Object* object, Isolate* isolate);
+ inline explicit MaybeObjectHandle(Handle<Object> object);
+
+ static inline MaybeObjectHandle Weak(Object* object, Isolate* isolate);
+ static inline MaybeObjectHandle Weak(Handle<Object> object);
+
+ inline MaybeObject* operator*() const;
+ inline MaybeObject* operator->() const;
+ inline Handle<Object> object() const;
+
+ bool is_identical_to(const MaybeObjectHandle& other) const {
+ Handle<Object> this_handle;
+ Handle<Object> other_handle;
+ return reference_type_ == other.reference_type_ &&
+ handle_.ToHandle(&this_handle) ==
+ other.handle_.ToHandle(&other_handle) &&
+ this_handle.is_identical_to(other_handle);
+ }
+
+ bool is_null() const { return handle_.is_null(); }
+
+ private:
+ inline MaybeObjectHandle(Object* object,
+ HeapObjectReferenceType reference_type,
+ Isolate* isolate);
+ inline MaybeObjectHandle(Handle<Object> object,
+ HeapObjectReferenceType reference_type);
+
+ HeapObjectReferenceType reference_type_;
+ MaybeHandle<Object> handle_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAYBE_HANDLES_H_
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 4fd27f9807..a1c228ec59 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -6,12 +6,13 @@
#include <memory>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/execution.h"
#include "src/isolate-inl.h"
#include "src/keys.h"
#include "src/objects/frame-array-inl.h"
-#include "src/string-builder.h"
+#include "src/objects/js-array-inl.h"
+#include "src/string-builder-inl.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
@@ -649,8 +650,8 @@ void WasmStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
if (array->IsWasmInterpretedFrame(frame_ix)) {
code_ = nullptr;
} else {
- code_ = wasm_instance_->module_object()->native_module()->code(
- wasm_func_index_);
+ code_ = reinterpret_cast<wasm::WasmCode*>(
+ array->WasmCodeObject(frame_ix)->foreign_address());
}
offset_ = array->Offset(frame_ix)->value();
}
@@ -1029,7 +1030,7 @@ Handle<String> MessageTemplate::FormatMessage(Isolate* isolate,
Factory* factory = isolate->factory();
Handle<String> result_string = Object::NoSideEffectsToString(isolate, arg);
MaybeHandle<String> maybe_result_string = MessageTemplate::FormatMessage(
- template_index, result_string, factory->empty_string(),
+ isolate, template_index, result_string, factory->empty_string(),
factory->empty_string());
if (!maybe_result_string.ToHandle(&result_string)) {
DCHECK(isolate->has_pending_exception());
@@ -1058,12 +1059,11 @@ const char* MessageTemplate::TemplateString(int template_index) {
}
}
-
-MaybeHandle<String> MessageTemplate::FormatMessage(int template_index,
+MaybeHandle<String> MessageTemplate::FormatMessage(Isolate* isolate,
+ int template_index,
Handle<String> arg0,
Handle<String> arg1,
Handle<String> arg2) {
- Isolate* isolate = arg0->GetIsolate();
const char* template_string = TemplateString(template_index);
if (template_string == nullptr) {
isolate->ThrowIllegalOperation();
@@ -1228,8 +1228,8 @@ Handle<String> FormatMessage(Isolate* isolate, int template_index,
isolate->native_context()->IncrementErrorsThrown();
Handle<String> msg;
- if (!MessageTemplate::FormatMessage(template_index, arg0_str, arg1_str,
- arg2_str)
+ if (!MessageTemplate::FormatMessage(isolate, template_index, arg0_str,
+ arg1_str, arg2_str)
.ToHandle(&msg)) {
DCHECK(isolate->has_pending_exception());
isolate->clear_pending_exception();
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 68078bb373..030fc0b926 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -263,6 +263,7 @@ class ErrorUtils : public AllStatic {
T(Unsupported, "Not supported") \
T(WrongServiceType, "Internal error, wrong service type: %") \
T(WrongValueType, "Internal error. Wrong value type.") \
+ T(IcuError, "Internal error. Icu error.") \
/* TypeError */ \
T(ApplyNonFunction, \
"Function.prototype.apply was called on %, which is a % and not a " \
@@ -273,6 +274,7 @@ class ErrorUtils : public AllStatic {
"Derived ArrayBuffer constructor created a buffer which was too small") \
T(ArrayBufferSpeciesThis, \
"ArrayBuffer subclass returned this from species constructor") \
+ T(ArrayItemNotType, "array %[%] is not type %") \
T(AwaitNotInAsyncFunction, "await is only valid in async function") \
T(AtomicsWaitNotAllowed, "Atomics.wait cannot be called in this context") \
T(BadSortComparisonFunction, \
@@ -345,6 +347,7 @@ class ErrorUtils : public AllStatic {
T(LocaleNotEmpty, \
"First argument to Intl.Locale constructor can't be empty or missing") \
T(LocaleBadParameters, "Incorrect locale information provided") \
+ T(ListFormatBadParameters, "Incorrect ListFormat information provided") \
T(MapperFunctionNonCallable, "flatMap mapper function is not callable") \
T(MethodCalledOnWrongObject, \
"Method % called on a non-object or on a wrong type of object.") \
@@ -372,6 +375,7 @@ class ErrorUtils : public AllStatic {
"% is not a function or its return value is not iterable") \
T(NotCallableOrAsyncIterable, \
"% is not a function or its return value is not async iterable") \
+ T(NotFiniteNumber, "Value need to be finite number for %()") \
T(NotIterable, "% is not iterable") \
T(NotAsyncIterable, "% is not async iterable") \
T(NotPropertyName, "% is not a valid property name") \
@@ -531,8 +535,6 @@ class ErrorUtils : public AllStatic {
T(BigIntNegativeExponent, "Exponent must be positive") \
T(BigIntTooBig, "Maximum BigInt size exceeded") \
T(DateRange, "Provided date is not in valid range.") \
- T(ExpectedTimezoneID, \
- "Expected Area/Location(/Location)* for time zone, got %") \
T(ExpectedLocation, \
"Expected letters optionally connected with underscores or hyphens for " \
"a location, got %") \
@@ -554,6 +556,7 @@ class ErrorUtils : public AllStatic {
T(InvalidWeakSetValue, "Invalid value used in weak set") \
T(InvalidStringLength, "Invalid string length") \
T(InvalidTimeValue, "Invalid time value") \
+ T(InvalidTimeZone, "Invalid time zone specified: %") \
T(InvalidTypedArrayAlignment, "% of % should be a multiple of %") \
T(InvalidTypedArrayIndex, "Invalid typed array index") \
T(InvalidTypedArrayLength, "Invalid typed array length: %") \
@@ -574,7 +577,6 @@ class ErrorUtils : public AllStatic {
T(ToRadixFormatRange, "toString() radix argument must be between 2 and 36") \
T(TypedArraySetOffsetOutOfBounds, "offset is out of bounds") \
T(TypedArraySetSourceTooLarge, "Source is too large") \
- T(UnsupportedTimeZone, "Unsupported time zone specified %") \
T(ValueOutOfRange, "Value % out of range for % options property %") \
/* SyntaxError */ \
T(AmbiguousExport, \
@@ -733,6 +735,7 @@ class ErrorUtils : public AllStatic {
/* Wasm errors (currently Error) */ \
T(WasmTrapUnreachable, "unreachable") \
T(WasmTrapMemOutOfBounds, "memory access out of bounds") \
+ T(WasmTrapUnalignedAccess, "operation does not support unaligned accesses") \
T(WasmTrapDivByZero, "divide by zero") \
T(WasmTrapDivUnrepresentable, "divide result unrepresentable") \
T(WasmTrapRemByZero, "remainder by zero") \
@@ -777,7 +780,7 @@ class MessageTemplate {
static const char* TemplateString(int template_index);
- static MaybeHandle<String> FormatMessage(int template_index,
+ static MaybeHandle<String> FormatMessage(Isolate* isolate, int template_index,
Handle<String> arg0,
Handle<String> arg1,
Handle<String> arg2);
diff --git a/deps/v8/src/mips/OWNERS b/deps/v8/src/mips/OWNERS
index 4ce9d7f91d..8bbcab4c2d 100644
--- a/deps/v8/src/mips/OWNERS
+++ b/deps/v8/src/mips/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com \ No newline at end of file
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com \ No newline at end of file
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index e9f84b6100..a0ca03f3cf 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -101,15 +101,12 @@ Address RelocInfo::target_address_address() {
// On R6 we don't move to the end of the instructions to be patched, but one
// instruction before, because if these instructions are at the end of the
// code object it can cause errors in the deserializer.
- return pc_ + (Assembler::kInstructionsFor32BitConstant - 1) *
- Assembler::kInstrSize;
+ return pc_ + (Assembler::kInstructionsFor32BitConstant - 1) * kInstrSize;
} else {
- return pc_ +
- Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize;
+ return pc_ + Assembler::kInstructionsFor32BitConstant * kInstrSize;
}
}
-
Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
}
@@ -144,8 +141,8 @@ int Assembler::deserialization_special_target_size(
void Assembler::set_target_internal_reference_encoded_at(Address pc,
Address target) {
- Instr instr1 = Assembler::instr_at(pc + 0 * Assembler::kInstrSize);
- Instr instr2 = Assembler::instr_at(pc + 1 * Assembler::kInstrSize);
+ Instr instr1 = Assembler::instr_at(pc + 0 * kInstrSize);
+ Instr instr2 = Assembler::instr_at(pc + 1 * kInstrSize);
DCHECK(Assembler::IsLui(instr1));
DCHECK(Assembler::IsOri(instr2) || Assembler::IsJicOrJialc(instr2));
instr1 &= ~kImm16Mask;
@@ -157,16 +154,13 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
uint32_t lui_offset_u, jic_offset_u;
Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
- Assembler::instr_at_put(pc + 0 * Assembler::kInstrSize,
- instr1 | lui_offset_u);
- Assembler::instr_at_put(pc + 1 * Assembler::kInstrSize,
- instr2 | jic_offset_u);
+ Assembler::instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u);
+ Assembler::instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
} else {
// Encoded internal references are lui/ori load of 32-bit absolute address.
- Assembler::instr_at_put(pc + 0 * Assembler::kInstrSize,
+ Assembler::instr_at_put(pc + 0 * kInstrSize,
instr1 | ((imm >> kLuiShift) & kImm16Mask));
- Assembler::instr_at_put(pc + 1 * Assembler::kInstrSize,
- instr2 | (imm & kImm16Mask));
+ Assembler::instr_at_put(pc + 1 * kInstrSize, instr2 | (imm & kImm16Mask));
}
// Currently used only by deserializer, and all code will be flushed
@@ -180,7 +174,7 @@ void Assembler::deserialization_set_target_internal_reference_at(
set_target_internal_reference_encoded_at(pc, target);
} else {
DCHECK(mode == RelocInfo::INTERNAL_REFERENCE);
- Memory::Address_at(pc) = target;
+ Memory<Address>(pc) = target;
}
}
@@ -204,9 +198,7 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
- heap->incremental_marking()->RecordWriteIntoCode(host(), this,
- HeapObject::cast(target));
- heap->RecordWriteIntoCode(host(), this, target);
+ WriteBarrierForCode(host(), this, target);
}
}
@@ -225,13 +217,13 @@ void RelocInfo::set_target_external_reference(
Address RelocInfo::target_internal_reference() {
if (rmode_ == INTERNAL_REFERENCE) {
- return Memory::Address_at(pc_);
+ return Memory<Address>(pc_);
} else {
// Encoded internal references are lui/ori or lui/jic load of 32-bit
// absolute address.
DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED);
- Instr instr1 = Assembler::instr_at(pc_ + 0 * Assembler::kInstrSize);
- Instr instr2 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
+ Instr instr1 = Assembler::instr_at(pc_ + 0 * kInstrSize);
+ Instr instr2 = Assembler::instr_at(pc_ + 1 * kInstrSize);
DCHECK(Assembler::IsLui(instr1));
DCHECK(Assembler::IsOri(instr2) || Assembler::IsJicOrJialc(instr2));
if (Assembler::IsJicOrJialc(instr2)) {
@@ -271,9 +263,10 @@ Address RelocInfo::target_off_heap_target() {
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
- IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
+ IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) ||
+ IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
- Memory::Address_at(pc_) = kNullAddress;
+ Memory<Address>(pc_) = kNullAddress;
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, kNullAddress);
} else {
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index bc7dd6bdc1..2c04430509 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -554,6 +554,12 @@ bool Assembler::IsBc(Instr instr) {
return opcode == BC || opcode == BALC;
}
+bool Assembler::IsNal(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rt_field = GetRtField(instr);
+ uint32_t rs_field = GetRsField(instr);
+ return opcode == REGIMM && rt_field == BLTZAL && rs_field == 0;
+}
bool Assembler::IsBzc(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
@@ -850,29 +856,58 @@ int Assembler::target_at(int pos, bool is_internal) {
}
}
// Check we have a branch or jump instruction.
- DCHECK(IsBranch(instr) || IsLui(instr));
+ DCHECK(IsBranch(instr) || IsLui(instr) || IsMov(instr, t8, ra));
if (IsBranch(instr)) {
return AddBranchOffset(pos, instr);
- } else {
- Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
- Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
- DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
- int32_t imm;
- if (IsJicOrJialc(instr2)) {
- imm = CreateTargetAddress(instr1, instr2);
- } else {
- imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
- imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
- }
-
- if (imm == kEndOfJumpChain) {
+ } else if (IsMov(instr, t8, ra)) {
+ int32_t imm32;
+ Instr instr_lui = instr_at(pos + 2 * kInstrSize);
+ Instr instr_ori = instr_at(pos + 3 * kInstrSize);
+ DCHECK(IsLui(instr_lui));
+ DCHECK(IsOri(instr_ori));
+ imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ if (imm32 == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
+ }
+ return pos + Assembler::kLongBranchPCOffset + imm32;
+ } else {
+ DCHECK(IsLui(instr));
+ if (IsNal(instr_at(pos + kInstrSize))) {
+ int32_t imm32;
+ Instr instr_lui = instr_at(pos + 0 * kInstrSize);
+ Instr instr_ori = instr_at(pos + 2 * kInstrSize);
+ DCHECK(IsLui(instr_lui));
+ DCHECK(IsOri(instr_ori));
+ imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ if (imm32 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ }
+ return pos + Assembler::kLongBranchPCOffset + imm32;
} else {
- uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
- int32_t delta = instr_address - imm;
- DCHECK(pos > delta);
- return pos - delta;
+ Instr instr1 = instr_at(pos + 0 * kInstrSize);
+ Instr instr2 = instr_at(pos + 1 * kInstrSize);
+ DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
+ int32_t imm;
+ if (IsJicOrJialc(instr2)) {
+ imm = CreateTargetAddress(instr1, instr2);
+ } else {
+ imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
+ }
+
+ if (imm == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
+ int32_t delta = instr_address - imm;
+ DCHECK(pos > delta);
+ return pos - delta;
+ }
}
}
return 0;
@@ -916,8 +951,8 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
instr = SetBranchOffset(pos, target_pos, instr);
instr_at_put(pos, instr);
} else if (IsMov(instr, t8, ra)) {
- Instr instr_lui = instr_at(pos + 4 * Assembler::kInstrSize);
- Instr instr_ori = instr_at(pos + 5 * Assembler::kInstrSize);
+ Instr instr_lui = instr_at(pos + 2 * kInstrSize);
+ Instr instr_ori = instr_at(pos + 3 * kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
@@ -929,8 +964,16 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
Instr instr_b = BEQ;
instr_b = SetBranchOffset(pos, target_pos, instr_b);
- instr_at_put(pos, instr_b);
- instr_at_put(pos + 1 * Assembler::kInstrSize, 0);
+ Instr instr_j = instr_at(pos + 5 * kInstrSize);
+ Instr instr_branch_delay;
+
+ if (IsJump(instr_j)) {
+ instr_branch_delay = instr_at(pos + 6 * kInstrSize);
+ } else {
+ instr_branch_delay = instr_at(pos + 7 * kInstrSize);
+ }
+ instr_at_put(pos + 0 * kInstrSize, instr_b);
+ instr_at_put(pos + 1 * kInstrSize, instr_branch_delay);
} else {
int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
DCHECK_EQ(imm & 3, 0);
@@ -938,31 +981,60 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
- instr_at_put(pos + 4 * Assembler::kInstrSize,
- instr_lui | ((imm >> 16) & kImm16Mask));
- instr_at_put(pos + 5 * Assembler::kInstrSize,
- instr_ori | (imm & kImm16Mask));
+ instr_at_put(pos + 2 * kInstrSize,
+ instr_lui | ((imm >> kLuiShift) & kImm16Mask));
+ instr_at_put(pos + 3 * kInstrSize, instr_ori | (imm & kImm16Mask));
}
} else {
- Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
- Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
- DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
- uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
- DCHECK_EQ(imm & 3, 0);
- DCHECK(IsLui(instr1) && (IsJicOrJialc(instr2) || IsOri(instr2)));
- instr1 &= ~kImm16Mask;
- instr2 &= ~kImm16Mask;
-
- if (IsJicOrJialc(instr2)) {
- uint32_t lui_offset_u, jic_offset_u;
- UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
- instr_at_put(pos + 0 * Assembler::kInstrSize, instr1 | lui_offset_u);
- instr_at_put(pos + 1 * Assembler::kInstrSize, instr2 | jic_offset_u);
+ DCHECK(IsLui(instr));
+ if (IsNal(instr_at(pos + kInstrSize))) {
+ Instr instr_lui = instr_at(pos + 0 * kInstrSize);
+ Instr instr_ori = instr_at(pos + 2 * kInstrSize);
+ DCHECK(IsLui(instr_lui));
+ DCHECK(IsOri(instr_ori));
+ int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
+ DCHECK_EQ(imm & 3, 0);
+ if (is_int16(imm + Assembler::kLongBranchPCOffset -
+ Assembler::kBranchPCOffset)) {
+ // Optimize by converting to regular branch and link with 16-bit
+ // offset.
+ Instr instr_b = REGIMM | BGEZAL; // Branch and link.
+ instr_b = SetBranchOffset(pos, target_pos, instr_b);
+ // Correct ra register to point to one instruction after jalr from
+ // TurboAssembler::BranchAndLinkLong.
+ Instr instr_a = ADDIU | ra.code() << kRsShift | ra.code() << kRtShift |
+ kOptimizedBranchAndLinkLongReturnOffset;
+
+ instr_at_put(pos, instr_b);
+ instr_at_put(pos + 1 * kInstrSize, instr_a);
+ } else {
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
+
+ instr_at_put(pos + 0 * kInstrSize,
+ instr_lui | ((imm >> kLuiShift) & kImm16Mask));
+ instr_at_put(pos + 2 * kInstrSize, instr_ori | (imm & kImm16Mask));
+ }
} else {
- instr_at_put(pos + 0 * Assembler::kInstrSize,
- instr1 | ((imm & kHiMask) >> kLuiShift));
- instr_at_put(pos + 1 * Assembler::kInstrSize,
- instr2 | (imm & kImm16Mask));
+ Instr instr1 = instr_at(pos + 0 * kInstrSize);
+ Instr instr2 = instr_at(pos + 1 * kInstrSize);
+ DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
+ uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
+ DCHECK_EQ(imm & 3, 0);
+ DCHECK(IsLui(instr1) && (IsJicOrJialc(instr2) || IsOri(instr2)));
+ instr1 &= ~kImm16Mask;
+ instr2 &= ~kImm16Mask;
+
+ if (IsJicOrJialc(instr2)) {
+ uint32_t lui_offset_u, jic_offset_u;
+ UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
+ instr_at_put(pos + 0 * kInstrSize, instr1 | lui_offset_u);
+ instr_at_put(pos + 1 * kInstrSize, instr2 | jic_offset_u);
+ } else {
+ instr_at_put(pos + 0 * kInstrSize,
+ instr1 | ((imm & kHiMask) >> kLuiShift));
+ instr_at_put(pos + 1 * kInstrSize, instr2 | (imm & kImm16Mask));
+ }
}
}
}
@@ -1421,6 +1493,28 @@ uint32_t Assembler::jump_address(Label* L) {
return imm;
}
+uint32_t Assembler::branch_long_offset(Label* L) {
+ int32_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ return kEndOfJumpChain;
+ }
+ }
+
+ DCHECK(is_int32(static_cast<int64_t>(target_pos) -
+ static_cast<int64_t>(pc_offset() + kLongBranchPCOffset)));
+ int32_t offset = target_pos - (pc_offset() + kLongBranchPCOffset);
+ DCHECK_EQ(offset & 3, 0);
+
+ return offset;
+}
int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
int32_t target_pos;
@@ -2228,7 +2322,7 @@ void Assembler::sc(Register rd, const MemOperand& rs) {
}
void Assembler::lui(Register rd, int32_t j) {
- DCHECK(is_uint16(j));
+ DCHECK(is_uint16(j) || is_int16(j));
GenInstrImmediate(LUI, zero_reg, rd, j);
}
@@ -3657,8 +3751,8 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
} else {
DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
if (IsLui(instr)) {
- Instr instr1 = instr_at(pc + 0 * Assembler::kInstrSize);
- Instr instr2 = instr_at(pc + 1 * Assembler::kInstrSize);
+ Instr instr1 = instr_at(pc + 0 * kInstrSize);
+ Instr instr2 = instr_at(pc + 1 * kInstrSize);
DCHECK(IsOri(instr2) || IsJicOrJialc(instr2));
int32_t imm;
if (IsJicOrJialc(instr2)) {
@@ -3679,13 +3773,12 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
if (IsJicOrJialc(instr2)) {
uint32_t lui_offset_u, jic_offset_u;
Assembler::UnpackTargetAddressUnsigned(imm, lui_offset_u, jic_offset_u);
- instr_at_put(pc + 0 * Assembler::kInstrSize, instr1 | lui_offset_u);
- instr_at_put(pc + 1 * Assembler::kInstrSize, instr2 | jic_offset_u);
+ instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u);
+ instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
} else {
- instr_at_put(pc + 0 * Assembler::kInstrSize,
+ instr_at_put(pc + 0 * kInstrSize,
instr1 | ((imm >> kLuiShift) & kImm16Mask));
- instr_at_put(pc + 1 * Assembler::kInstrSize,
- instr2 | (imm & kImm16Mask));
+ instr_at_put(pc + 1 * kInstrSize, instr2 | (imm & kImm16Mask));
}
return 2; // Number of instructions patched.
} else {
@@ -3833,26 +3926,19 @@ void Assembler::CheckTrampolinePool() {
int pool_start = pc_offset();
for (int i = 0; i < unbound_labels_count_; i++) {
{
- // printf("Generate trampoline %d\n", i);
- // Buffer growth (and relocation) must be blocked for internal
- // references until associated instructions are emitted and
- // available to be patched.
if (IsMipsArchVariant(kMips32r6)) {
bc(&after_pool);
nop();
} else {
- Label find_pc;
or_(t8, ra, zero_reg);
- bal(&find_pc);
- or_(t9, ra, zero_reg);
- bind(&find_pc);
- or_(ra, t8, zero_reg);
- lui(t8, 0);
- ori(t8, t8, 0);
- addu(t9, t9, t8);
+ nal(); // Read PC into ra register.
+ lui(t9, 0); // Branch delay slot.
+ ori(t9, t9, 0);
+ addu(t9, ra, t9);
// Instruction jr will take or_ from the next trampoline.
// in its branch delay slot. This is the expected behavior
// in order to decrease size of trampoline pool.
+ or_(ra, t8, zero_reg);
jr(t9);
}
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index ea34e7a440..5a51522940 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -226,8 +226,8 @@ class Register : public RegisterBase<Register, kRegAfterLast> {
};
// s7: context register
-// s3: lithium scratch
-// s4: lithium scratch2
+// s3: scratch register
+// s4: scratch register 2
#define DECLARE_REGISTER(R) \
constexpr Register R = Register::from_code<kRegCode_##R>();
GENERAL_REGISTERS(DECLARE_REGISTER)
@@ -305,6 +305,7 @@ DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
#undef DECLARE_DOUBLE_REGISTER
constexpr DoubleRegister no_freg = DoubleRegister::no_reg();
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
// SIMD registers.
typedef MSARegister Simd128Register;
@@ -475,8 +476,7 @@ class MemOperand : public Operand {
friend class Assembler;
};
-
-class Assembler : public AssemblerBase {
+class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the
@@ -557,6 +557,7 @@ class Assembler : public AssemblerBase {
return branch_offset26(L) >> 2;
}
uint32_t jump_address(Label* L);
+ uint32_t branch_long_offset(Label* L);
// Puts a labels target address at the given position.
// The high 8 bits are set to zero.
@@ -606,16 +607,19 @@ class Assembler : public AssemblerBase {
Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
- // Size of an instruction.
- static constexpr int kInstrSize = sizeof(Instr);
-
// Difference between address of current opcode and target address offset.
- static constexpr int kBranchPCOffset = 4;
+ static constexpr int kBranchPCOffset = kInstrSize;
// Difference between address of current opcode and target address offset,
// when we are generatinga sequence of instructions for long relative PC
// branches
- static constexpr int kLongBranchPCOffset = 12;
+ static constexpr int kLongBranchPCOffset = 3 * kInstrSize;
+
+ // Adjust ra register in branch delay slot of bal instruction so to skip
+ // instructions not needed after optimization of PIC in
+ // TurboAssembler::BranchAndLink method.
+
+ static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 4 * kInstrSize;
// Here we are patching the address in the LUI/ORI instruction pair.
// These values are used in the serialization process and must be zero for
@@ -623,6 +627,7 @@ class Assembler : public AssemblerBase {
// are split across two consecutive instructions and don't exist separately
// in the code, so the serializer should not step forwards in memory after
// a target is resolved and written.
+
static constexpr int kSpecialTargetSize = 0;
// Number of consecutive instructions used to store 32bit constant. This
@@ -650,7 +655,7 @@ class Assembler : public AssemblerBase {
static constexpr int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
static constexpr int kTrampolineSlotsSize =
- IsMipsArchVariant(kMips32r6) ? 2 * kInstrSize : 8 * kInstrSize;
+ IsMipsArchVariant(kMips32r6) ? 2 * kInstrSize : 7 * kInstrSize;
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
@@ -752,6 +757,7 @@ class Assembler : public AssemblerBase {
bltc(rs, rt, shifted_branch_offset(L));
}
void bltzal(Register rs, int16_t offset);
+ void nal() { bltzal(zero_reg, 0); }
void blezalc(Register rt, int16_t offset);
inline void blezalc(Register rt, Label* L) {
blezalc(rt, shifted_branch_offset(L));
@@ -1759,6 +1765,7 @@ class Assembler : public AssemblerBase {
static bool IsBranch(Instr instr);
static bool IsMsaBranch(Instr instr);
static bool IsBc(Instr instr);
+ static bool IsNal(Instr instr);
static bool IsBzc(Instr instr);
static bool IsBeq(Instr instr);
static bool IsBne(Instr instr);
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 8fc1c35cc7..3da00d4748 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -4,7 +4,7 @@
#if V8_TARGET_ARCH_MIPS
-#include "src/api-arguments.h"
+#include "src/api-arguments-inl.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
@@ -250,7 +250,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// The entry hook is a "push ra" instruction, followed by a call.
// Note: on MIPS "push" is 2 instruction
const int32_t kReturnAddressDistanceFromFunctionStart =
- Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
+ Assembler::kCallTargetAddressOffset + (2 * kInstrSize);
// This should contain all kJSCallerSaved registers.
const RegList kSavedRegs =
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 4641090cbc..d6b47990f8 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -542,7 +542,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(desc));
+ DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
@@ -569,7 +569,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(desc));
+ DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index 6478b4e7c4..54eb0a6eb0 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -1120,30 +1120,6 @@ enum MSABranchDF {
MSA_BRANCH_V
};
-// Commute a condition such that {a cond b == b cond' a}.
-inline Condition CommuteCondition(Condition cc) {
- switch (cc) {
- case Uless:
- return Ugreater;
- case Ugreater:
- return Uless;
- case Ugreater_equal:
- return Uless_equal;
- case Uless_equal:
- return Ugreater_equal;
- case less:
- return greater;
- case greater:
- return less;
- case greater_equal:
- return less_equal;
- case less_equal:
- return greater_equal;
- default:
- return cc;
- }
-}
-
// ----- Coprocessor conditions.
enum FPUCondition {
@@ -1244,11 +1220,12 @@ static constexpr uint64_t OpcodeToBitNumber(Opcode opcode) {
return 1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift);
}
+constexpr uint8_t kInstrSize = 4;
+constexpr uint8_t kInstrSizeLog2 = 2;
+
class InstructionBase {
public:
enum {
- kInstrSize = 4,
- kInstrSizeLog2 = 2,
// On MIPS PC cannot actually be directly accessed. We behave as if PC was
// always the value of the current instruction being executed.
kPCReadOffset = 0
@@ -1707,14 +1684,14 @@ class Instruction : public InstructionGetters<InstructionBase> {
// C/C++ argument slots size.
const int kCArgSlotCount = 4;
-const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize;
+const int kCArgsSlotsSize = kCArgSlotCount * kInstrSize;
const int kInvalidStackOffset = -1;
// JS argument slots size.
-const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
+const int kJSArgsSlotsSize = 0 * kInstrSize;
// Assembly builtins argument slots size.
-const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
+const int kBArgsSlotsSize = 0 * kInstrSize;
-const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
+const int kBranchReturnOffset = 2 * kInstrSize;
InstructionBase::Type InstructionBase::InstructionType() const {
switch (OpcodeFieldRaw()) {
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index f64953de12..58fd212f78 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -237,9 +237,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Maximum size of a table entry generated below.
#ifdef _MIPS_ARCH_MIPS32R6
-const int Deoptimizer::table_entry_size_ = 2 * Assembler::kInstrSize;
+const int Deoptimizer::table_entry_size_ = 2 * kInstrSize;
#else
-const int Deoptimizer::table_entry_size_ = 3 * Assembler::kInstrSize;
+const int Deoptimizer::table_entry_size_ = 3 * kInstrSize;
#endif
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
@@ -252,10 +252,10 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
#ifdef _MIPS_ARCH_MIPS32R6
int kMaxEntriesBranchReach =
- (1 << (kImm26Bits - 2)) / (table_entry_size_ / Assembler::kInstrSize);
+ (1 << (kImm26Bits - 2)) / (table_entry_size_ / kInstrSize);
#else
- int kMaxEntriesBranchReach = (1 << (kImm16Bits - 2))/
- (table_entry_size_ / Assembler::kInstrSize);
+ int kMaxEntriesBranchReach =
+ (1 << (kImm16Bits - 2)) / (table_entry_size_ / kInstrSize);
#endif
if (count() <= kMaxEntriesBranchReach) {
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index d2f8ebb0ee..0049f9fa91 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -2704,7 +2704,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
UNSUPPORTED_MIPS();
}
}
- return Instruction::kInstrSize;
+ return kInstrSize;
}
@@ -2752,13 +2752,6 @@ const char* NameConverter::NameInCode(byte* addr) const {
//------------------------------------------------------------------------------
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-
-Disassembler::~Disassembler() {}
-
-
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte* instruction) {
v8::internal::Decoder d(converter_, buffer);
@@ -2771,10 +2764,10 @@ int Disassembler::ConstantPoolSizeAt(byte* instruction) {
return -1;
}
-
-void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
+ UnimplementedOpcodeAction unimplemented_action) {
NameConverter converter;
- Disassembler d(converter);
+ Disassembler d(converter, unimplemented_action);
for (byte* pc = begin; pc < end;) {
v8::internal::EmbeddedVector<char, 128> buffer;
buffer[0] = '\0';
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index e0d122fc49..31b5f82895 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -246,30 +246,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-namespace {
-
-void InterpreterCEntryDescriptor_InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // argument count (argc)
- a2, // address of first argument (argv)
- a1 // the runtime function to call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace
-
-void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
-void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index c254e4a78e..35a9959ddb 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -21,6 +21,7 @@
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/snapshot.h"
+#include "src/wasm/wasm-code-manager.h"
namespace v8 {
namespace internal {
@@ -3781,6 +3782,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
@@ -3793,38 +3795,11 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
}
-int TurboAssembler::CallSize(Register target, int16_t offset, Condition cond,
- Register rs, const Operand& rt,
- BranchDelaySlot bd) {
- int size = 0;
-
- if (cond == cc_always) {
- size += 1;
- } else {
- size += 3;
- }
-
- if (bd == PROTECT && !IsMipsArchVariant(kMips32r6)) size += 1;
-
- if (!IsMipsArchVariant(kMips32r6) && offset != 0) {
- size += 1;
- }
-
- return size * kInstrSize;
-}
-
-
// Note: To call gcc-compiled C code on mips, you must call through t9.
void TurboAssembler::Call(Register target, int16_t offset, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
DCHECK(is_int16(offset));
-#ifdef DEBUG
- int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
-#endif
-
BlockTrampolinePoolScope block_trampoline_pool(this);
- Label start;
- bind(&start);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
if (cond == cc_always) {
jialc(target, offset);
@@ -3847,11 +3822,6 @@ void TurboAssembler::Call(Register target, int16_t offset, Condition cond,
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
-
-#ifdef DEBUG
- DCHECK_EQ(size + CallSize(target, offset, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
-#endif
}
// Note: To call gcc-compiled C code on mips, you must call through t9.
@@ -3859,13 +3829,7 @@ void TurboAssembler::Call(Register target, Register base, int16_t offset,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
DCHECK(is_uint16(offset));
-#ifdef DEBUG
- int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
-#endif
-
BlockTrampolinePoolScope block_trampoline_pool(this);
- Label start;
- bind(&start);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
if (cond == cc_always) {
jialc(base, offset);
@@ -3890,29 +3854,12 @@ void TurboAssembler::Call(Register target, Register base, int16_t offset,
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
-
-#ifdef DEBUG
- DCHECK_EQ(size + CallSize(target, offset, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
-#endif
-}
-
-int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
- Condition cond, Register rs, const Operand& rt,
- BranchDelaySlot bd) {
- int size = CallSize(t9, 0, cond, rs, rt, bd);
- if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always)
- return size + 1 * kInstrSize;
- else
- return size + 2 * kInstrSize;
}
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
CheckBuffer();
BlockTrampolinePoolScope block_trampoline_pool(this);
- Label start;
- bind(&start);
int32_t target_int = static_cast<int32_t>(target);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always) {
uint32_t lui_offset, jialc_offset;
@@ -3926,15 +3873,6 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
Call(t9, 0, cond, rs, rt, bd);
}
- DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
-}
-
-int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond, Register rs, const Operand& rt,
- BranchDelaySlot bd) {
- AllowDeferredHandleDereference using_raw_address;
- return CallSize(code.address(), rmode, cond, rs, rt, bd);
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
@@ -3951,6 +3889,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
@@ -3960,13 +3899,9 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
}
}
- Label start;
- bind(&start);
DCHECK(RelocInfo::IsCodeTarget(rmode));
AllowDeferredHandleDereference embedding_raw_address;
Call(code.address(), rmode, cond, rs, rt, bd);
- DCHECK_EQ(CallSize(code, rmode, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
}
void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt,
@@ -3979,41 +3914,21 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
(!L->is_bound() || is_near_r6(L))) {
BranchShortHelperR6(0, L);
} else {
+ // Generate position independent long branch.
BlockTrampolinePoolScope block_trampoline_pool(this);
- uint32_t imm32;
- imm32 = jump_address(L);
- if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
- uint32_t lui_offset, jic_offset;
- UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
- {
- BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal
- // references until associated instructions are emitted and
- // available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
- lui(scratch, lui_offset);
- jic(scratch, jic_offset);
- }
- CheckBuffer();
- } else {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
- {
- BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal
- // references until associated instructions are emitted and
- // available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- lui(scratch, (imm32 & kHiMask) >> kLuiShift);
- ori(scratch, scratch, (imm32 & kImm16Mask));
- }
- CheckBuffer();
- jr(scratch);
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT) nop();
+ int32_t imm32;
+ imm32 = branch_long_offset(L);
+ or_(t8, ra, zero_reg);
+ nal(); // Read PC into ra register.
+ lui(t9, (imm32 & kHiMask) >> kLuiShift); // Branch delay slot.
+ ori(t9, t9, (imm32 & kImm16Mask));
+ addu(t9, ra, t9);
+ if (bdslot == USE_DELAY_SLOT) {
+ or_(ra, t8, zero_reg);
}
+ jr(t9);
+ // Emit a or_ in the branch delay slot if it's protected.
+ if (bdslot == PROTECT) or_(ra, t8, zero_reg);
}
}
@@ -4022,41 +3937,17 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
(!L->is_bound() || is_near_r6(L))) {
BranchAndLinkShortHelperR6(0, L);
} else {
+ // Generate position independent long branch and link.
BlockTrampolinePoolScope block_trampoline_pool(this);
- uint32_t imm32;
- imm32 = jump_address(L);
- if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
- uint32_t lui_offset, jialc_offset;
- UnpackTargetAddressUnsigned(imm32, lui_offset, jialc_offset);
- {
- BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal
- // references until associated instructions are emitted and
- // available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
- lui(scratch, lui_offset);
- jialc(scratch, jialc_offset);
- }
- CheckBuffer();
- } else {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
- {
- BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal
- // references until associated instructions are emitted and
- // available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- lui(scratch, (imm32 & kHiMask) >> kLuiShift);
- ori(scratch, scratch, (imm32 & kImm16Mask));
- }
- CheckBuffer();
- jalr(scratch);
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT) nop();
- }
+ int32_t imm32;
+ imm32 = branch_long_offset(L);
+ lui(t8, (imm32 & kHiMask) >> kLuiShift);
+ nal(); // Read PC into ra register.
+ ori(t8, t8, (imm32 & kImm16Mask)); // Branch delay slot.
+ addu(t8, ra, t8);
+ jalr(t8);
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT) nop();
}
}
@@ -4755,6 +4646,15 @@ void TurboAssembler::Abort(AbortReason reason) {
return;
}
+ if (should_abort_hard()) {
+ // We don't care if we constructed a frame. Just pretend we did.
+ FrameScope assume_frame(this, StackFrame::NONE);
+ PrepareCallCFunction(0, a0);
+ li(a0, Operand(static_cast<int>(reason)));
+ CallCFunction(ExternalReference::abort_with_reason(), 1);
+ return;
+ }
+
Move(a0, Smi::FromInt(static_cast<int>(reason)));
// Disable stub call restrictions to always allow calls to abort.
@@ -5475,7 +5375,7 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) {
- And(scratch, object, Operand(~Page::kPageAlignmentMask));
+ And(scratch, object, Operand(~kPageAlignmentMask));
lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
Branch(condition_met, cc, scratch, Operand(zero_reg));
@@ -5505,42 +5405,20 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
UNREACHABLE();
}
-bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
- Register reg5, Register reg6, Register reg7, Register reg8,
- Register reg9, Register reg10) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
- reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
- reg10.is_valid();
-
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
- if (reg7.is_valid()) regs |= reg7.bit();
- if (reg8.is_valid()) regs |= reg8.bit();
- if (reg9.is_valid()) regs |= reg9.bit();
- if (reg10.is_valid()) regs |= reg10.bit();
- int n_of_non_aliasing_regs = NumRegs(regs);
-
- return n_of_valid_regs != n_of_non_aliasing_regs;
-}
-
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
// This push on ra and the pop below together ensure that we restore the
// register ra, which is needed while computing the code start address.
push(ra);
- // The bal instruction puts the address of the current instruction into
+ // The nal instruction puts the address of the current instruction into
// the return address (ra) register, which we can use later on.
- Label current;
- bal(&current);
- nop();
+ if (IsMipsArchVariant(kMips32r6)) {
+ addiupc(ra, 1);
+ } else {
+ nal();
+ nop();
+ }
int pc = pc_offset();
- bind(&current);
li(dst, pc);
subu(dst, ra, dst);
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 248dd4f905..f6c371923f 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -35,11 +35,11 @@ constexpr Register kJavaScriptCallExtraArg1Register = a2;
constexpr Register kOffHeapTrampolineRegister = at;
constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0;
+constexpr Register kRuntimeCallArgvRegister = a2;
constexpr Register kWasmInstanceRegister = a0;
// Forward declarations
-enum class AbortReason;
-class JumpTarget;
+enum class AbortReason : uint8_t;
// Reserved Register Usage Summary.
//
@@ -60,14 +60,6 @@ enum LeaveExitFrameMode {
NO_EMIT_RETURN = false
};
-// Flags used for AllocateHeapNumber
-enum TaggingMode {
- // Tag the result.
- TAG_RESULT,
- // Don't tag
- DONT_TAG_RESULT
-};
-
// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
enum BranchDelaySlot {
USE_DELAY_SLOT,
@@ -96,13 +88,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg5 = no_reg,
Register reg6 = no_reg);
-bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
- Register reg4 = no_reg, Register reg5 = no_reg,
- Register reg6 = no_reg, Register reg7 = no_reg,
- Register reg8 = no_reg, Register reg9 = no_reg,
- Register reg10 = no_reg);
-
-
// -----------------------------------------------------------------------------
// Static helper functions.
@@ -131,7 +116,7 @@ inline MemOperand CFunctionArgumentOperand(int index) {
return MemOperand(sp, offset);
}
-class TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
@@ -264,14 +249,9 @@ class TurboAssembler : public TurboAssemblerBase {
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
- static int CallSize(Register target, int16_t offset = 0, COND_ARGS);
void Call(Register target, int16_t offset = 0, COND_ARGS);
void Call(Register target, Register base, int16_t offset = 0, COND_ARGS);
- static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
- int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- COND_ARGS);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
COND_ARGS);
@@ -1209,7 +1189,7 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
bind(&here);
addu(scratch, scratch, ra);
pop(ra);
- lw(scratch, MemOperand(scratch, 6 * v8::internal::Assembler::kInstrSize));
+ lw(scratch, MemOperand(scratch, 6 * v8::internal::kInstrSize));
}
jr(scratch);
nop(); // Branch delay slot nop.
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index d58b899755..13f5f38f0d 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -395,7 +395,7 @@ void MipsDebugger::Debug() {
} else {
// Allow si to jump over generated breakpoints.
PrintF("/!\\ Jumping over generated breakpoint.\n");
- sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
+ sim_->set_pc(sim_->get_pc() + kInstrSize);
}
} else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
// Execute the one instruction we broke at with breakpoints disabled.
@@ -562,7 +562,7 @@ void MipsDebugger::Debug() {
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstrSize);
+ end = cur + (10 * kInstrSize);
} else if (argc == 2) {
int regnum = Registers::Number(arg1);
if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) {
@@ -571,7 +571,7 @@ void MipsDebugger::Debug() {
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(value);
// Disassemble 10 instructions at <arg1>.
- end = cur + (10 * Instruction::kInstrSize);
+ end = cur + (10 * kInstrSize);
}
} else {
// The argument is the number of instructions.
@@ -579,7 +579,7 @@ void MipsDebugger::Debug() {
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
// Disassemble <arg1> instructions.
- end = cur + (value * Instruction::kInstrSize);
+ end = cur + (value * kInstrSize);
}
}
} else {
@@ -587,7 +587,7 @@ void MipsDebugger::Debug() {
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte*>(value1);
- end = cur + (value2 * Instruction::kInstrSize);
+ end = cur + (value2 * kInstrSize);
}
}
@@ -595,7 +595,7 @@ void MipsDebugger::Debug() {
dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
buffer.start());
- cur += Instruction::kInstrSize;
+ cur += kInstrSize;
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
@@ -622,12 +622,10 @@ void MipsDebugger::Debug() {
PrintF("No flags on MIPS !\n");
} else if (strcmp(cmd, "stop") == 0) {
int32_t value;
- intptr_t stop_pc = sim_->get_pc() -
- 2 * Instruction::kInstrSize;
+ intptr_t stop_pc = sim_->get_pc() - 2 * kInstrSize;
Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
Instruction* msg_address =
- reinterpret_cast<Instruction*>(stop_pc +
- Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(stop_pc + kInstrSize);
if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
// Remove the current stop.
if (sim_->IsStopInstruction(stop_instr)) {
@@ -696,20 +694,20 @@ void MipsDebugger::Debug() {
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstrSize);
+ end = cur + (10 * kInstrSize);
} else if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(value);
// no length parameter passed, assume 10 instructions
- end = cur + (10 * Instruction::kInstrSize);
+ end = cur + (10 * kInstrSize);
}
} else {
int32_t value1;
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte*>(value1);
- end = cur + (value2 * Instruction::kInstrSize);
+ end = cur + (value2 * kInstrSize);
}
}
@@ -717,7 +715,7 @@ void MipsDebugger::Debug() {
dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
buffer.start());
- cur += Instruction::kInstrSize;
+ cur += kInstrSize;
}
} else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
PrintF("cont\n");
@@ -871,8 +869,7 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
if (cache_hit) {
// Check that the data in memory matches the contents of the I-cache.
CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr),
- cache_page->CachedData(offset),
- Instruction::kInstrSize));
+ cache_page->CachedData(offset), kInstrSize));
} else {
// Cache miss. Load memory into the cache.
memcpy(cached_line, line, CachePage::kLineLength);
@@ -3797,7 +3794,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
int32_t next_pc = rs();
int32_t current_pc = get_pc();
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(current_pc + kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
set_pc(next_pc);
pc_modified_ = true;
@@ -3808,9 +3805,9 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
int32_t return_addr_reg = rd_reg();
int32_t current_pc = get_pc();
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(current_pc + kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
- set_register(return_addr_reg, current_pc + 2 * Instruction::kInstrSize);
+ set_register(return_addr_reg, current_pc + 2 * kInstrSize);
set_pc(next_pc);
pc_modified_ = true;
break;
@@ -6304,12 +6301,12 @@ void Simulator::DecodeTypeImmediate() {
[this, &next_pc, &execute_branch_delay_instruction](bool do_branch) {
execute_branch_delay_instruction = true;
int32_t current_pc = get_pc();
+ set_register(31, current_pc + 2 * kInstrSize);
if (do_branch) {
int16_t imm16 = this->instr_.Imm16Value();
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- set_register(31, current_pc + 2 * Instruction::kInstrSize);
+ next_pc = current_pc + (imm16 << 2) + kInstrSize;
} else {
- next_pc = current_pc + 2 * Instruction::kInstrSize;
+ next_pc = current_pc + 2 * kInstrSize;
}
};
@@ -6319,9 +6316,9 @@ void Simulator::DecodeTypeImmediate() {
int32_t current_pc = get_pc();
if (do_branch) {
int16_t imm16 = this->instr_.Imm16Value();
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ next_pc = current_pc + (imm16 << 2) + kInstrSize;
} else {
- next_pc = current_pc + 2 * Instruction::kInstrSize;
+ next_pc = current_pc + 2 * kInstrSize;
}
};
@@ -6343,9 +6340,9 @@ void Simulator::DecodeTypeImmediate() {
// pc + kInstrSize + 511 * kInstrSize]
int16_t offset = static_cast<int16_t>(imm16 << (bitsIn16Int - 10)) >>
(bitsIn16Int - 12);
- next_pc = current_pc + offset + Instruction::kInstrSize;
+ next_pc = current_pc + offset + kInstrSize;
} else {
- next_pc = current_pc + 2 * Instruction::kInstrSize;
+ next_pc = current_pc + 2 * kInstrSize;
}
};
@@ -6356,8 +6353,8 @@ void Simulator::DecodeTypeImmediate() {
int32_t imm = this->instr_.ImmValue(bits);
imm <<= 32 - bits;
imm >>= 32 - bits;
- next_pc = current_pc + (imm << 2) + Instruction::kInstrSize;
- set_register(31, current_pc + Instruction::kInstrSize);
+ next_pc = current_pc + (imm << 2) + kInstrSize;
+ set_register(31, current_pc + kInstrSize);
}
};
@@ -6368,7 +6365,7 @@ void Simulator::DecodeTypeImmediate() {
int32_t imm = this->instr_.ImmValue(bits);
imm <<= 32 - bits;
imm >>= 32 - bits;
- next_pc = get_pc() + (imm << 2) + Instruction::kInstrSize;
+ next_pc = get_pc() + (imm << 2) + kInstrSize;
}
};
@@ -6568,7 +6565,7 @@ void Simulator::DecodeTypeImmediate() {
if (rs_reg != 0) { // BNEZC
BranchCompactHelper(rs != 0, 21);
} else { // JIALC
- set_register(31, get_pc() + Instruction::kInstrSize);
+ set_register(31, get_pc() + kInstrSize);
next_pc = rt + imm16;
}
break;
@@ -6864,7 +6861,7 @@ void Simulator::DecodeTypeImmediate() {
// We don't check for end_sim_pc. First it should not be met as the current
// pc is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(get_pc() + Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(get_pc() + kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
}
@@ -6890,13 +6887,13 @@ void Simulator::DecodeTypeJump() {
// We don't check for end_sim_pc. First it should not be met as the current pc
// is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(current_pc + kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
// Update pc and ra if necessary.
// Do this after the branch delay execution.
if (simInstr.IsLinkingInstruction()) {
- set_register(31, current_pc + 2 * Instruction::kInstrSize);
+ set_register(31, current_pc + 2 * kInstrSize);
}
set_pc(next_pc);
pc_modified_ = true;
@@ -6937,13 +6934,10 @@ void Simulator::InstructionDecode(Instruction* instr) {
trace_buf_.start());
}
if (!pc_modified_) {
- set_register(pc, reinterpret_cast<int32_t>(instr) +
- Instruction::kInstrSize);
+ set_register(pc, reinterpret_cast<int32_t>(instr) + kInstrSize);
}
}
-
-
void Simulator::Execute() {
// Get the PC to simulate. Cannot use the accessor here as we need the
// raw PC value and not the one used as input to arithmetic instructions.
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index d505f92c9c..59ec3af7e3 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -447,7 +447,7 @@ class Simulator : public SimulatorBase {
// Compact branch guard.
void CheckForbiddenSlot(int32_t current_pc) {
Instruction* instr_after_compact_branch =
- reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(current_pc + kInstrSize);
if (instr_after_compact_branch->IsForbiddenAfterBranch()) {
FATAL(
"Error: Unexpected instruction 0x%08x immediately after a "
diff --git a/deps/v8/src/mips64/OWNERS b/deps/v8/src/mips64/OWNERS
index 4ce9d7f91d..8bbcab4c2d 100644
--- a/deps/v8/src/mips64/OWNERS
+++ b/deps/v8/src/mips64/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com \ No newline at end of file
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com \ No newline at end of file
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index d0a1688367..24abf0249e 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -96,7 +96,7 @@ Address RelocInfo::target_address_address() {
// place, ready to be patched with the target. After jump optimization,
// that is the address of the instruction that follows J/JAL/JR/JALR
// instruction.
- return pc_ + Assembler::kInstructionsFor64BitConstant * Assembler::kInstrSize;
+ return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize;
}
@@ -128,7 +128,7 @@ int Assembler::deserialization_special_target_size(
void Assembler::set_target_internal_reference_encoded_at(Address pc,
Address target) {
// Encoded internal references are j/jal instructions.
- Instr instr = Assembler::instr_at(pc + 0 * Assembler::kInstrSize);
+ Instr instr = Assembler::instr_at(pc + 0 * kInstrSize);
uint64_t imm28 = target & static_cast<uint64_t>(kImm28Mask);
@@ -148,7 +148,7 @@ void Assembler::deserialization_set_target_internal_reference_at(
set_target_internal_reference_encoded_at(pc, target);
} else {
DCHECK(mode == RelocInfo::INTERNAL_REFERENCE);
- Memory::Address_at(pc) = target;
+ Memory<Address>(pc) = target;
}
}
@@ -171,11 +171,8 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
- target->IsHeapObject()) {
- heap->incremental_marking()->RecordWriteIntoCode(host(), this,
- HeapObject::cast(target));
- heap->RecordWriteIntoCode(host(), this, target);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
+ WriteBarrierForCode(host(), this, target);
}
}
@@ -194,11 +191,11 @@ void RelocInfo::set_target_external_reference(
Address RelocInfo::target_internal_reference() {
if (rmode_ == INTERNAL_REFERENCE) {
- return Memory::Address_at(pc_);
+ return Memory<Address>(pc_);
} else {
// Encoded internal references are j/jal instructions.
DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED);
- Instr instr = Assembler::instr_at(pc_ + 0 * Assembler::kInstrSize);
+ Instr instr = Assembler::instr_at(pc_ + 0 * kInstrSize);
instr &= kImm26Mask;
uint64_t imm28 = instr << 2;
uint64_t segment = pc_ & ~static_cast<uint64_t>(kImm28Mask);
@@ -233,9 +230,10 @@ Address RelocInfo::target_off_heap_target() {
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
- IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
+ IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) ||
+ IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
- Memory::Address_at(pc_) = kNullAddress;
+ Memory<Address>(pc_) = kNullAddress;
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, kNullAddress);
} else {
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index edb17b7b22..4abd272a5e 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -533,6 +533,12 @@ bool Assembler::IsBc(Instr instr) {
return opcode == BC || opcode == BALC;
}
+bool Assembler::IsNal(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rt_field = GetRtField(instr);
+ uint32_t rs_field = GetRsField(instr);
+ return opcode == REGIMM && rt_field == BLTZAL && rs_field == 0;
+}
bool Assembler::IsBzc(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
@@ -781,34 +787,63 @@ int Assembler::target_at(int pos, bool is_internal) {
}
}
// Check we have a branch or jump instruction.
- DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr));
+ DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr) ||
+ IsMov(instr, t8, ra));
// Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
// the compiler uses arithmetic shifts for signed integers.
if (IsBranch(instr)) {
return AddBranchOffset(pos, instr);
- } else if (IsLui(instr)) {
- Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
- Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
- Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
+ } else if (IsMov(instr, t8, ra)) {
+ int32_t imm32;
+ Instr instr_lui = instr_at(pos + 2 * kInstrSize);
+ Instr instr_ori = instr_at(pos + 3 * kInstrSize);
+ DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
- DCHECK(IsOri(instr_ori2));
-
- // TODO(plind) create named constants for shift values.
- int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
- imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
- imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
- // Sign extend address;
- imm >>= 16;
-
- if (imm == kEndOfJumpChain) {
+ imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ if (imm32 == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
+ }
+ return pos + Assembler::kLongBranchPCOffset + imm32;
+ } else if (IsLui(instr)) {
+ if (IsNal(instr_at(pos + kInstrSize))) {
+ int32_t imm32;
+ Instr instr_lui = instr_at(pos + 0 * kInstrSize);
+ Instr instr_ori = instr_at(pos + 2 * kInstrSize);
+ DCHECK(IsLui(instr_lui));
+ DCHECK(IsOri(instr_ori));
+ imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ if (imm32 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ }
+ return pos + Assembler::kLongBranchPCOffset + imm32;
} else {
- uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
- DCHECK(instr_address - imm < INT_MAX);
- int delta = static_cast<int>(instr_address - imm);
- DCHECK(pos > delta);
- return pos - delta;
+ Instr instr_lui = instr_at(pos + 0 * kInstrSize);
+ Instr instr_ori = instr_at(pos + 1 * kInstrSize);
+ Instr instr_ori2 = instr_at(pos + 3 * kInstrSize);
+ DCHECK(IsOri(instr_ori));
+ DCHECK(IsOri(instr_ori2));
+
+ // TODO(plind) create named constants for shift values.
+ int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
+ imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
+ imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
+ // Sign extend address;
+ imm >>= 16;
+
+ if (imm == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
+ DCHECK(instr_address - imm < INT_MAX);
+ int delta = static_cast<int>(instr_address - imm);
+ DCHECK(pos > delta);
+ return pos - delta;
+ }
}
} else {
DCHECK(IsJ(instr) || IsJal(instr));
@@ -859,28 +894,57 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr = SetBranchOffset(pos, target_pos, instr);
instr_at_put(pos, instr);
} else if (IsLui(instr)) {
- Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
- Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
- Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
- DCHECK(IsOri(instr_ori));
- DCHECK(IsOri(instr_ori2));
+ if (IsNal(instr_at(pos + kInstrSize))) {
+ Instr instr_lui = instr_at(pos + 0 * kInstrSize);
+ Instr instr_ori = instr_at(pos + 2 * kInstrSize);
+ DCHECK(IsLui(instr_lui));
+ DCHECK(IsOri(instr_ori));
+ int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
+ DCHECK_EQ(imm & 3, 0);
+ if (is_int16(imm + Assembler::kLongBranchPCOffset -
+ Assembler::kBranchPCOffset)) {
+ // Optimize by converting to regular branch and link with 16-bit
+ // offset.
+ Instr instr_b = REGIMM | BGEZAL; // Branch and link.
+ instr_b = SetBranchOffset(pos, target_pos, instr_b);
+ // Correct ra register to point to one instruction after jalr from
+ // TurboAssembler::BranchAndLinkLong.
+ Instr instr_a = DADDIU | ra.code() << kRsShift | ra.code() << kRtShift |
+ kOptimizedBranchAndLinkLongReturnOffset;
+
+ instr_at_put(pos, instr_b);
+ instr_at_put(pos + 1 * kInstrSize, instr_a);
+ } else {
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
- uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
- DCHECK_EQ(imm & 3, 0);
+ instr_at_put(pos + 0 * kInstrSize,
+ instr_lui | ((imm >> kLuiShift) & kImm16Mask));
+ instr_at_put(pos + 2 * kInstrSize, instr_ori | (imm & kImm16Mask));
+ }
+ } else {
+ Instr instr_lui = instr_at(pos + 0 * kInstrSize);
+ Instr instr_ori = instr_at(pos + 1 * kInstrSize);
+ Instr instr_ori2 = instr_at(pos + 3 * kInstrSize);
+ DCHECK(IsOri(instr_ori));
+ DCHECK(IsOri(instr_ori2));
- instr_lui &= ~kImm16Mask;
- instr_ori &= ~kImm16Mask;
- instr_ori2 &= ~kImm16Mask;
+ uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
+ DCHECK_EQ(imm & 3, 0);
+
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
+ instr_ori2 &= ~kImm16Mask;
- instr_at_put(pos + 0 * Assembler::kInstrSize,
- instr_lui | ((imm >> 32) & kImm16Mask));
- instr_at_put(pos + 1 * Assembler::kInstrSize,
- instr_ori | ((imm >> 16) & kImm16Mask));
- instr_at_put(pos + 3 * Assembler::kInstrSize,
- instr_ori2 | (imm & kImm16Mask));
+ instr_at_put(pos + 0 * kInstrSize,
+ instr_lui | ((imm >> 32) & kImm16Mask));
+ instr_at_put(pos + 1 * kInstrSize,
+ instr_ori | ((imm >> 16) & kImm16Mask));
+ instr_at_put(pos + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask));
+ }
} else if (IsMov(instr, t8, ra)) {
- Instr instr_lui = instr_at(pos + 4 * Assembler::kInstrSize);
- Instr instr_ori = instr_at(pos + 5 * Assembler::kInstrSize);
+ Instr instr_lui = instr_at(pos + 2 * kInstrSize);
+ Instr instr_ori = instr_at(pos + 3 * kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
@@ -892,8 +956,16 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
Instr instr_b = BEQ;
instr_b = SetBranchOffset(pos, target_pos, instr_b);
+ Instr instr_j = instr_at(pos + 5 * kInstrSize);
+ Instr instr_branch_delay;
+
+ if (IsJump(instr_j)) {
+ instr_branch_delay = instr_at(pos + 6 * kInstrSize);
+ } else {
+ instr_branch_delay = instr_at(pos + 7 * kInstrSize);
+ }
instr_at_put(pos, instr_b);
- instr_at_put(pos + 1 * Assembler::kInstrSize, 0);
+ instr_at_put(pos + 1 * kInstrSize, instr_branch_delay);
} else {
int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
DCHECK_EQ(imm & 3, 0);
@@ -901,10 +973,9 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
- instr_at_put(pos + 4 * Assembler::kInstrSize,
- instr_lui | ((imm >> 16) & kImm16Mask));
- instr_at_put(pos + 5 * Assembler::kInstrSize,
- instr_ori | (imm & kImm16Mask));
+ instr_at_put(pos + 2 * kInstrSize,
+ instr_lui | ((imm >> kLuiShift) & kImm16Mask));
+ instr_at_put(pos + 3 * kInstrSize, instr_ori | (imm & kImm16Mask));
}
} else if (IsJ(instr) || IsJal(instr)) {
int32_t imm28 = target_pos - pos;
@@ -989,7 +1060,7 @@ void Assembler::bind_to(Label* L, int pos) {
target_at_put(fixup_pos, pos, false);
} else {
DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
- IsEmittedConstant(instr));
+ IsEmittedConstant(instr) || IsMov(instr, t8, ra));
target_at_put(fixup_pos, pos, false);
}
}
@@ -1405,6 +1476,25 @@ uint64_t Assembler::jump_offset(Label* L) {
return static_cast<uint64_t>(imm);
}
+uint64_t Assembler::branch_long_offset(Label* L) {
+ int64_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ return kEndOfJumpChain;
+ }
+ }
+ int64_t offset = target_pos - (pc_offset() + kLongBranchPCOffset);
+ DCHECK_EQ(offset & 3, 0);
+
+ return static_cast<uint64_t>(offset);
+}
int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
int32_t target_pos;
@@ -2404,7 +2494,7 @@ void Assembler::scd(Register rd, const MemOperand& rs) {
}
void Assembler::lui(Register rd, int32_t j) {
- DCHECK(is_uint16(j));
+ DCHECK(is_uint16(j) || is_int16(j));
GenInstrImmediate(LUI, zero_reg, rd, j);
}
@@ -3986,9 +4076,9 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
Instr instr = instr_at(pc);
DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
if (IsLui(instr)) {
- Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
- Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
- Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize);
+ Instr instr_lui = instr_at(pc + 0 * kInstrSize);
+ Instr instr_ori = instr_at(pc + 1 * kInstrSize);
+ Instr instr_ori2 = instr_at(pc + 3 * kInstrSize);
DCHECK(IsOri(instr_ori));
DCHECK(IsOri(instr_ori2));
// TODO(plind): symbolic names for the shifts.
@@ -4008,12 +4098,9 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
instr_ori &= ~kImm16Mask;
instr_ori2 &= ~kImm16Mask;
- instr_at_put(pc + 0 * Assembler::kInstrSize,
- instr_lui | ((imm >> 32) & kImm16Mask));
- instr_at_put(pc + 1 * Assembler::kInstrSize,
- instr_ori | (imm >> 16 & kImm16Mask));
- instr_at_put(pc + 3 * Assembler::kInstrSize,
- instr_ori2 | (imm & kImm16Mask));
+ instr_at_put(pc + 0 * kInstrSize, instr_lui | ((imm >> 32) & kImm16Mask));
+ instr_at_put(pc + 1 * kInstrSize, instr_ori | (imm >> 16 & kImm16Mask));
+ instr_at_put(pc + 3 * kInstrSize, instr_ori2 | (imm & kImm16Mask));
return 4; // Number of instructions patched.
} else if (IsJ(instr) || IsJal(instr)) {
// Regular j/jal relocation.
@@ -4185,22 +4272,17 @@ void Assembler::CheckTrampolinePool() {
int pool_start = pc_offset();
for (int i = 0; i < unbound_labels_count_; i++) {
- { // Buffer growth (and relocation) must be blocked for internal
- // references until associated instructions are emitted and available
- // to be patched.
+ {
if (kArchVariant == kMips64r6) {
bc(&after_pool);
nop();
} else {
- Label find_pc;
or_(t8, ra, zero_reg);
- bal(&find_pc);
- or_(t9, ra, zero_reg);
- bind(&find_pc);
+ nal(); // Read PC into ra register.
+ lui(t9, 0); // Branch delay slot.
+ ori(t9, t9, 0);
+ daddu(t9, ra, t9);
or_(ra, t8, zero_reg);
- lui(t8, 0);
- ori(t8, t8, 0);
- daddu(t9, t9, t8);
// Instruction jr will take or_ from the next trampoline.
// in its branch delay slot. This is the expected behavior
// in order to decrease size of trampoline pool.
@@ -4303,7 +4385,7 @@ void Assembler::set_target_value_at(Address pc, uint64_t target,
(target & kImm16Mask);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(pc, 4 * Assembler::kInstrSize);
+ Assembler::FlushICache(pc, 4 * kInstrSize);
}
}
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index f94db35974..868882eb4a 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -224,8 +224,8 @@ class Register : public RegisterBase<Register, kRegAfterLast> {
};
// s7: context register
-// s3: lithium scratch
-// s4: lithium scratch2
+// s3: scratch register
+// s4: scratch register 2
#define DECLARE_REGISTER(R) \
constexpr Register R = Register::from_code<kRegCode_##R>();
GENERAL_REGISTERS(DECLARE_REGISTER)
@@ -310,6 +310,7 @@ DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
#undef DECLARE_DOUBLE_REGISTER
constexpr DoubleRegister no_freg = DoubleRegister::no_reg();
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
// SIMD registers.
typedef MSARegister Simd128Register;
@@ -482,8 +483,7 @@ class MemOperand : public Operand {
friend class Assembler;
};
-
-class Assembler : public AssemblerBase {
+class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the
@@ -565,6 +565,7 @@ class Assembler : public AssemblerBase {
}
uint64_t jump_address(Label* L);
uint64_t jump_offset(Label* L);
+ uint64_t branch_long_offset(Label* L);
// Puts a labels target address at the given position.
// The high 8 bits are set to zero.
@@ -616,16 +617,19 @@ class Assembler : public AssemblerBase {
Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
- // Size of an instruction.
- static constexpr int kInstrSize = sizeof(Instr);
-
// Difference between address of current opcode and target address offset.
- static constexpr int kBranchPCOffset = 4;
+ static constexpr int kBranchPCOffset = kInstrSize;
// Difference between address of current opcode and target address offset,
// when we are generatinga sequence of instructions for long relative PC
// branches
- static constexpr int kLongBranchPCOffset = 12;
+ static constexpr int kLongBranchPCOffset = 3 * kInstrSize;
+
+ // Adjust ra register in branch delay slot of bal instruction so to skip
+ // instructions not needed after optimization of PIC in
+ // TurboAssembler::BranchAndLink method.
+
+ static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 4 * kInstrSize;
// Here we are patching the address in the LUI/ORI instruction pair.
// These values are used in the serialization process and must be zero for
@@ -661,7 +665,7 @@ class Assembler : public AssemblerBase {
static constexpr int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
static constexpr int kTrampolineSlotsSize =
- kArchVariant == kMips64r6 ? 2 * kInstrSize : 8 * kInstrSize;
+ kArchVariant == kMips64r6 ? 2 * kInstrSize : 7 * kInstrSize;
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
@@ -763,6 +767,7 @@ class Assembler : public AssemblerBase {
bltc(rs, rt, shifted_branch_offset(L));
}
void bltzal(Register rs, int16_t offset);
+ void nal() { bltzal(zero_reg, 0); }
void blezalc(Register rt, int16_t offset);
inline void blezalc(Register rt, Label* L) {
blezalc(rt, shifted_branch_offset(L));
@@ -1837,6 +1842,7 @@ class Assembler : public AssemblerBase {
static bool IsBranch(Instr instr);
static bool IsMsaBranch(Instr instr);
static bool IsBc(Instr instr);
+ static bool IsNal(Instr instr);
static bool IsBzc(Instr instr);
static bool IsBeq(Instr instr);
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 5ed97cc004..cd02bea0f1 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -251,7 +251,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// The entry hook is a "push ra" instruction, followed by a call.
// Note: on MIPS "push" is 2 instruction
const int32_t kReturnAddressDistanceFromFunctionStart =
- Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
+ Assembler::kCallTargetAddressOffset + (2 * kInstrSize);
// This should contain all kJSCallerSaved registers.
const RegList kSavedRegs =
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index d51ead5a6c..81a6cd4342 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -543,7 +543,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
}
CodeDesc desc;
masm.GetCode(isolte, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(desc));
+ DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
@@ -570,7 +570,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(desc));
+ DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index 9f3869dff2..66e0c8470e 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -1154,30 +1154,6 @@ enum MSABranchDF {
MSA_BRANCH_V
};
-// Commute a condition such that {a cond b == b cond' a}.
-inline Condition CommuteCondition(Condition cc) {
- switch (cc) {
- case Uless:
- return Ugreater;
- case Ugreater:
- return Uless;
- case Ugreater_equal:
- return Uless_equal;
- case Uless_equal:
- return Ugreater_equal;
- case less:
- return greater;
- case greater:
- return less;
- case greater_equal:
- return less_equal;
- case less_equal:
- return greater_equal;
- default:
- return cc;
- }
-}
-
// ----- Coprocessor conditions.
enum FPUCondition {
@@ -1278,11 +1254,12 @@ static constexpr uint64_t OpcodeToBitNumber(Opcode opcode) {
return 1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift);
}
+constexpr uint8_t kInstrSize = 4;
+constexpr uint8_t kInstrSizeLog2 = 2;
+
class InstructionBase {
public:
enum {
- kInstrSize = 4,
- kInstrSizeLog2 = 2,
// On MIPS PC cannot actually be directly accessed. We behave as if PC was
// always the value of the current instruction being executed.
kPCReadOffset = 0
@@ -1767,10 +1744,10 @@ const int kCArgSlotCount = 0;
// TODO(plind): below should be based on kPointerSize
// TODO(plind): find all usages and remove the needless instructions for n64.
-const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize * 2;
+const int kCArgsSlotsSize = kCArgSlotCount * kInstrSize * 2;
const int kInvalidStackOffset = -1;
-const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
+const int kBranchReturnOffset = 2 * kInstrSize;
static const int kNegOffset = 0x00008000;
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index 1d3e88372e..62d25e53b9 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -235,9 +235,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Maximum size of a table entry generated below.
#ifdef _MIPS_ARCH_MIPS64R6
-const int Deoptimizer::table_entry_size_ = 2 * Assembler::kInstrSize;
+const int Deoptimizer::table_entry_size_ = 2 * kInstrSize;
#else
-const int Deoptimizer::table_entry_size_ = 3 * Assembler::kInstrSize;
+const int Deoptimizer::table_entry_size_ = 3 * kInstrSize;
#endif
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
@@ -249,10 +249,10 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&table_start);
#ifdef _MIPS_ARCH_MIPS64R6
int kMaxEntriesBranchReach =
- (1 << (kImm26Bits - 2)) / (table_entry_size_ / Assembler::kInstrSize);
+ (1 << (kImm26Bits - 2)) / (table_entry_size_ / kInstrSize);
#else
int kMaxEntriesBranchReach =
- (1 << (kImm16Bits - 2)) / (table_entry_size_ / Assembler::kInstrSize);
+ (1 << (kImm16Bits - 2)) / (table_entry_size_ / kInstrSize);
#endif
if (count() <= kMaxEntriesBranchReach) {
diff --git a/deps/v8/src/mips64/disasm-mips64.cc b/deps/v8/src/mips64/disasm-mips64.cc
index 8f77a68b21..1279d25f42 100644
--- a/deps/v8/src/mips64/disasm-mips64.cc
+++ b/deps/v8/src/mips64/disasm-mips64.cc
@@ -1079,15 +1079,14 @@ int Decoder::DecodeBreakInstr(Instruction* instr) {
Format(instr, "break, code: 'code");
out_buffer_pos_ += SNPrintF(
out_buffer_ + out_buffer_pos_, "\n%p %08" PRIx64,
- static_cast<void*>(
- reinterpret_cast<int32_t*>(instr + Instruction::kInstrSize)),
+ static_cast<void*>(reinterpret_cast<int32_t*>(instr + kInstrSize)),
reinterpret_cast<uint64_t>(
- *reinterpret_cast<char**>(instr + Instruction::kInstrSize)));
+ *reinterpret_cast<char**>(instr + kInstrSize)));
// Size 3: the break_ instr, plus embedded 64-bit char pointer.
- return 3 * Instruction::kInstrSize;
+ return 3 * kInstrSize;
} else {
Format(instr, "break, code: 'code");
- return Instruction::kInstrSize;
+ return kInstrSize;
}
}
@@ -1897,10 +1896,9 @@ int Decoder::DecodeTypeRegister(Instruction* instr) {
default:
UNREACHABLE();
}
- return Instruction::kInstrSize;
+ return kInstrSize;
}
-
void Decoder::DecodeTypeImmediateCOP1(Instruction* instr) {
switch (instr->RsFieldRaw()) {
case BC1:
@@ -3023,10 +3021,9 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
UNSUPPORTED_MIPS();
}
}
- return Instruction::kInstrSize;
+ return kInstrSize;
}
-
} // namespace internal
} // namespace v8
@@ -3071,13 +3068,6 @@ const char* NameConverter::NameInCode(byte* addr) const {
//------------------------------------------------------------------------------
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-
-Disassembler::~Disassembler() {}
-
-
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte* instruction) {
v8::internal::Decoder d(converter_, buffer);
@@ -3090,10 +3080,10 @@ int Disassembler::ConstantPoolSizeAt(byte* instruction) {
return -1;
}
-
-void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
+ UnimplementedOpcodeAction unimplemented_action) {
NameConverter converter;
- Disassembler d(converter);
+ Disassembler d(converter, unimplemented_action);
for (byte* pc = begin; pc < end;) {
v8::internal::EmbeddedVector<char, 128> buffer;
buffer[0] = '\0';
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 94058aa721..8f4fdcc905 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -246,30 +246,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-namespace {
-
-void InterpreterCEntryDescriptor_InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // argument count (argc)
- a2, // address of first argument (argv)
- a1 // the runtime function to call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace
-
-void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
-void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 889d09f27e..b55b47a2ed 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -21,6 +21,7 @@
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/snapshot.h"
+#include "src/wasm/wasm-code-manager.h"
namespace v8 {
namespace internal {
@@ -2389,7 +2390,7 @@ void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs,
void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs,
FPURegister scratch, Register result) {
DCHECK(fs != scratch);
- DCHECK(!AreAliased(rd, result, at));
+ DCHECK(result.is_valid() ? !AreAliased(rd, result, at) : !AreAliased(rd, at));
Label simple_convert, done, fail;
if (result.is_valid()) {
@@ -2444,7 +2445,7 @@ void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs,
void TurboAssembler::Trunc_ul_s(Register rd, FPURegister fs,
FPURegister scratch, Register result) {
DCHECK(fs != scratch);
- DCHECK(!AreAliased(rd, result, at));
+ DCHECK(result.is_valid() ? !AreAliased(rd, result, at) : !AreAliased(rd, at));
Label simple_convert, done, fail;
if (result.is_valid()) {
@@ -4206,6 +4207,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
@@ -4218,32 +4220,10 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
}
-int TurboAssembler::CallSize(Register target, Condition cond, Register rs,
- const Operand& rt, BranchDelaySlot bd) {
- int size = 0;
-
- if (cond == cc_always) {
- size += 1;
- } else {
- size += 3;
- }
-
- if (bd == PROTECT && kArchVariant != kMips64r6) size += 1;
-
- return size * kInstrSize;
-}
-
-
// Note: To call gcc-compiled C code on mips, you must call through t9.
void TurboAssembler::Call(Register target, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bd) {
-#ifdef DEBUG
- int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
-#endif
-
BlockTrampolinePoolScope block_trampoline_pool(this);
- Label start;
- bind(&start);
if (kArchVariant == kMips64r6 && bd == PROTECT) {
if (cond == cc_always) {
jialc(target, 0);
@@ -4263,35 +4243,13 @@ void TurboAssembler::Call(Register target, Condition cond, Register rs,
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
-
-#ifdef DEBUG
- DCHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
-#endif
-}
-
-int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
- Condition cond, Register rs, const Operand& rt,
- BranchDelaySlot bd) {
- int size = CallSize(t9, cond, rs, rt, bd);
- return size + 4 * kInstrSize;
}
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
- Label start;
- bind(&start);
li(t9, Operand(static_cast<int64_t>(target), rmode), ADDRESS_LOAD);
Call(t9, cond, rs, rt, bd);
- DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
-}
-
-int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond, Register rs, const Operand& rt,
- BranchDelaySlot bd) {
- return CallSize(code.address(), rmode, cond, rs, rt, bd);
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
@@ -4309,6 +4267,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
@@ -4318,12 +4277,8 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
}
}
- Label start;
- bind(&start);
DCHECK(RelocInfo::IsCodeTarget(rmode));
Call(code.address(), rmode, cond, rs, rt, bd);
- DCHECK_EQ(CallSize(code, rmode, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
}
void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt,
@@ -4336,17 +4291,22 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
(!L->is_bound() || is_near_r6(L))) {
BranchShortHelperR6(0, L);
} else {
- EmitForbiddenSlotInstruction();
+ // Generate position independent long branch.
BlockTrampolinePoolScope block_trampoline_pool(this);
- {
- BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- j(L);
+ int64_t imm64;
+ imm64 = branch_long_offset(L);
+ DCHECK(is_int32(imm64));
+ or_(t8, ra, zero_reg);
+ nal(); // Read PC into ra register.
+ lui(t9, (imm64 & kHiMaskOf32) >> kLuiShift); // Branch delay slot.
+ ori(t9, t9, (imm64 & kImm16Mask));
+ daddu(t9, ra, t9);
+ if (bdslot == USE_DELAY_SLOT) {
+ or_(ra, t8, zero_reg);
}
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT) nop();
+ jr(t9);
+ // Emit a or_ in the branch delay slot if it's protected.
+ if (bdslot == PROTECT) or_(ra, t8, zero_reg);
}
}
@@ -4355,15 +4315,16 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
(!L->is_bound() || is_near_r6(L))) {
BranchAndLinkShortHelperR6(0, L);
} else {
- EmitForbiddenSlotInstruction();
+ // Generate position independent long branch and link.
BlockTrampolinePoolScope block_trampoline_pool(this);
- {
- BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- jal(L);
- }
+ int64_t imm64;
+ imm64 = branch_long_offset(L);
+ DCHECK(is_int32(imm64));
+ lui(t8, (imm64 & kHiMaskOf32) >> kLuiShift);
+ nal(); // Read PC into ra register.
+ ori(t8, t8, (imm64 & kImm16Mask)); // Branch delay slot.
+ daddu(t8, ra, t8);
+ jalr(t8);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT) nop();
}
@@ -5068,6 +5029,15 @@ void TurboAssembler::Abort(AbortReason reason) {
return;
}
+ if (should_abort_hard()) {
+ // We don't care if we constructed a frame. Just pretend we did.
+ FrameScope assume_frame(this, StackFrame::NONE);
+ PrepareCallCFunction(0, a0);
+ li(a0, Operand(static_cast<int>(reason)));
+ CallCFunction(ExternalReference::abort_with_reason(), 1);
+ return;
+ }
+
Move(a0, Smi::FromInt(static_cast<int>(reason)));
// Disable stub call restrictions to always allow calls to abort.
@@ -5802,7 +5772,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) {
- And(scratch, object, Operand(~Page::kPageAlignmentMask));
+ And(scratch, object, Operand(~kPageAlignmentMask));
Ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
Branch(condition_met, cc, scratch, Operand(zero_reg));
@@ -5833,42 +5803,20 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
UNREACHABLE();
}
-bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
- Register reg5, Register reg6, Register reg7, Register reg8,
- Register reg9, Register reg10) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
- reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
- reg10.is_valid();
-
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
- if (reg7.is_valid()) regs |= reg7.bit();
- if (reg8.is_valid()) regs |= reg8.bit();
- if (reg9.is_valid()) regs |= reg9.bit();
- if (reg10.is_valid()) regs |= reg10.bit();
- int n_of_non_aliasing_regs = NumRegs(regs);
-
- return n_of_valid_regs != n_of_non_aliasing_regs;
-}
-
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
// This push on ra and the pop below together ensure that we restore the
// register ra, which is needed while computing the code start address.
push(ra);
- // The bal instruction puts the address of the current instruction into
+ // The nal instruction puts the address of the current instruction into
// the return address (ra) register, which we can use later on.
- Label current;
- bal(&current);
- nop();
+ if (kArchVariant == kMips64r6) {
+ addiupc(ra, 1);
+ } else {
+ nal();
+ nop();
+ }
int pc = pc_offset();
- bind(&current);
li(dst, Operand(pc));
Dsubu(dst, ra, dst);
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 3636568136..7dd5761571 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -35,11 +35,11 @@ constexpr Register kJavaScriptCallExtraArg1Register = a2;
constexpr Register kOffHeapTrampolineRegister = at;
constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0;
+constexpr Register kRuntimeCallArgvRegister = a2;
constexpr Register kWasmInstanceRegister = a0;
// Forward declarations.
-enum class AbortReason;
-class JumpTarget;
+enum class AbortReason : uint8_t;
// Reserved Register Usage Summary.
//
@@ -60,14 +60,6 @@ enum LeaveExitFrameMode {
NO_EMIT_RETURN = false
};
-// Flags used for AllocateHeapNumber
-enum TaggingMode {
- // Tag the result.
- TAG_RESULT,
- // Don't tag
- DONT_TAG_RESULT
-};
-
// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
enum BranchDelaySlot {
USE_DELAY_SLOT,
@@ -103,13 +95,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg5 = no_reg,
Register reg6 = no_reg);
-bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
- Register reg4 = no_reg, Register reg5 = no_reg,
- Register reg6 = no_reg, Register reg7 = no_reg,
- Register reg8 = no_reg, Register reg9 = no_reg,
- Register reg10 = no_reg);
-
-
// -----------------------------------------------------------------------------
// Static helper functions.
@@ -148,7 +133,7 @@ inline MemOperand CFunctionArgumentOperand(int index) {
return MemOperand(sp, offset);
}
-class TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
@@ -282,13 +267,8 @@ class TurboAssembler : public TurboAssemblerBase {
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
- static int CallSize(Register target, COND_ARGS);
void Call(Register target, COND_ARGS);
- static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
- int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- COND_ARGS);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
COND_ARGS);
@@ -1286,7 +1266,7 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
bind(&here);
daddu(scratch, scratch, ra);
pop(ra);
- Ld(scratch, MemOperand(scratch, 6 * v8::internal::Assembler::kInstrSize));
+ Ld(scratch, MemOperand(scratch, 6 * v8::internal::kInstrSize));
}
jr(scratch);
nop(); // Branch delay slot nop.
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index f5231fe89f..7f57b4da28 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -360,7 +360,7 @@ void MipsDebugger::Debug() {
} else {
// Allow si to jump over generated breakpoints.
PrintF("/!\\ Jumping over generated breakpoint.\n");
- sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
+ sim_->set_pc(sim_->get_pc() + kInstrSize);
}
} else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
// Execute the one instruction we broke at with breakpoints disabled.
@@ -494,7 +494,7 @@ void MipsDebugger::Debug() {
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstrSize);
+ end = cur + (10 * kInstrSize);
} else if (argc == 2) {
int regnum = Registers::Number(arg1);
if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) {
@@ -503,7 +503,7 @@ void MipsDebugger::Debug() {
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(value);
// Disassemble 10 instructions at <arg1>.
- end = cur + (10 * Instruction::kInstrSize);
+ end = cur + (10 * kInstrSize);
}
} else {
// The argument is the number of instructions.
@@ -511,7 +511,7 @@ void MipsDebugger::Debug() {
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
// Disassemble <arg1> instructions.
- end = cur + (value * Instruction::kInstrSize);
+ end = cur + (value * kInstrSize);
}
}
} else {
@@ -519,7 +519,7 @@ void MipsDebugger::Debug() {
int64_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte*>(value1);
- end = cur + (value2 * Instruction::kInstrSize);
+ end = cur + (value2 * kInstrSize);
}
}
@@ -527,7 +527,7 @@ void MipsDebugger::Debug() {
dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
buffer.start());
- cur += Instruction::kInstrSize;
+ cur += kInstrSize;
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
@@ -554,12 +554,10 @@ void MipsDebugger::Debug() {
PrintF("No flags on MIPS !\n");
} else if (strcmp(cmd, "stop") == 0) {
int64_t value;
- intptr_t stop_pc = sim_->get_pc() -
- 2 * Instruction::kInstrSize;
+ intptr_t stop_pc = sim_->get_pc() - 2 * kInstrSize;
Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
Instruction* msg_address =
- reinterpret_cast<Instruction*>(stop_pc +
- Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(stop_pc + kInstrSize);
if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
// Remove the current stop.
if (sim_->IsStopInstruction(stop_instr)) {
@@ -628,20 +626,20 @@ void MipsDebugger::Debug() {
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstrSize);
+ end = cur + (10 * kInstrSize);
} else if (argc == 2) {
int64_t value;
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(value);
// no length parameter passed, assume 10 instructions
- end = cur + (10 * Instruction::kInstrSize);
+ end = cur + (10 * kInstrSize);
}
} else {
int64_t value1;
int64_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte*>(value1);
- end = cur + (value2 * Instruction::kInstrSize);
+ end = cur + (value2 * kInstrSize);
}
}
@@ -649,7 +647,7 @@ void MipsDebugger::Debug() {
dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
buffer.start());
- cur += Instruction::kInstrSize;
+ cur += kInstrSize;
}
} else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
PrintF("cont\n");
@@ -802,8 +800,7 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
if (cache_hit) {
// Check that the data in memory matches the contents of the I-cache.
CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr),
- cache_page->CachedData(offset),
- Instruction::kInstrSize));
+ cache_page->CachedData(offset), kInstrSize));
} else {
// Cache miss. Load memory into the cache.
memcpy(cached_line, line, CachePage::kLineLength);
@@ -3672,7 +3669,7 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
int64_t next_pc = rs();
int64_t current_pc = get_pc();
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(current_pc + kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
set_pc(next_pc);
pc_modified_ = true;
@@ -3683,9 +3680,9 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
int64_t current_pc = get_pc();
int32_t return_addr_reg = rd_reg();
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(current_pc + kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
- set_register(return_addr_reg, current_pc + 2 * Instruction::kInstrSize);
+ set_register(return_addr_reg, current_pc + 2 * kInstrSize);
set_pc(next_pc);
pc_modified_ = true;
break;
@@ -6544,12 +6541,12 @@ void Simulator::DecodeTypeImmediate() {
[this, &next_pc, &execute_branch_delay_instruction](bool do_branch) {
execute_branch_delay_instruction = true;
int64_t current_pc = get_pc();
+ set_register(31, current_pc + 2 * kInstrSize);
if (do_branch) {
int16_t imm16 = instr_.Imm16Value();
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- set_register(31, current_pc + 2 * Instruction::kInstrSize);
+ next_pc = current_pc + (imm16 << 2) + kInstrSize;
} else {
- next_pc = current_pc + 2 * Instruction::kInstrSize;
+ next_pc = current_pc + 2 * kInstrSize;
}
};
@@ -6559,9 +6556,9 @@ void Simulator::DecodeTypeImmediate() {
int64_t current_pc = get_pc();
if (do_branch) {
int16_t imm16 = instr_.Imm16Value();
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ next_pc = current_pc + (imm16 << 2) + kInstrSize;
} else {
- next_pc = current_pc + 2 * Instruction::kInstrSize;
+ next_pc = current_pc + 2 * kInstrSize;
}
};
@@ -6583,9 +6580,9 @@ void Simulator::DecodeTypeImmediate() {
// pc + kInstrSize + 511 * kInstrSize]
int16_t offset = static_cast<int16_t>(imm16 << (bitsIn16Int - 10)) >>
(bitsIn16Int - 12);
- next_pc = current_pc + offset + Instruction::kInstrSize;
+ next_pc = current_pc + offset + kInstrSize;
} else {
- next_pc = current_pc + 2 * Instruction::kInstrSize;
+ next_pc = current_pc + 2 * kInstrSize;
}
};
@@ -6596,8 +6593,8 @@ void Simulator::DecodeTypeImmediate() {
int32_t imm = instr_.ImmValue(bits);
imm <<= 32 - bits;
imm >>= 32 - bits;
- next_pc = current_pc + (imm << 2) + Instruction::kInstrSize;
- set_register(31, current_pc + Instruction::kInstrSize);
+ next_pc = current_pc + (imm << 2) + kInstrSize;
+ set_register(31, current_pc + kInstrSize);
}
};
@@ -6608,7 +6605,7 @@ void Simulator::DecodeTypeImmediate() {
int32_t imm = instr_.ImmValue(bits);
imm <<= 32 - bits;
imm >>= 32 - bits;
- next_pc = get_pc() + (imm << 2) + Instruction::kInstrSize;
+ next_pc = get_pc() + (imm << 2) + kInstrSize;
}
};
@@ -6814,7 +6811,7 @@ void Simulator::DecodeTypeImmediate() {
BranchCompactHelper(rs != 0, 21);
} else { // JIALC
int64_t current_pc = get_pc();
- set_register(31, current_pc + Instruction::kInstrSize);
+ set_register(31, current_pc + kInstrSize);
next_pc = rt + imm16;
}
break;
@@ -7213,7 +7210,7 @@ void Simulator::DecodeTypeImmediate() {
// We don't check for end_sim_pc. First it should not be met as the current
// pc is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(get_pc() + Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(get_pc() + kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
}
@@ -7238,13 +7235,13 @@ void Simulator::DecodeTypeJump() {
// We don't check for end_sim_pc. First it should not be met as the current pc
// is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(current_pc + kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
// Update pc and ra if necessary.
// Do this after the branch delay execution.
if (simInstr.IsLinkingInstruction()) {
- set_register(31, current_pc + 2 * Instruction::kInstrSize);
+ set_register(31, current_pc + 2 * kInstrSize);
}
set_pc(next_pc);
pc_modified_ = true;
@@ -7290,8 +7287,7 @@ void Simulator::InstructionDecode(Instruction* instr) {
}
if (!pc_modified_) {
- set_register(pc, reinterpret_cast<int64_t>(instr) +
- Instruction::kInstrSize);
+ set_register(pc, reinterpret_cast<int64_t>(instr) + kInstrSize);
}
}
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index 30db7b95d6..0ed51c21e1 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -469,7 +469,7 @@ class Simulator : public SimulatorBase {
// Compact branch guard.
void CheckForbiddenSlot(int64_t current_pc) {
Instruction* instr_after_compact_branch =
- reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(current_pc + kInstrSize);
if (instr_after_compact_branch->IsForbiddenAfterBranch()) {
FATAL(
"Error: Unexpected instruction 0x%08x immediately after a "
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index d4ee72654f..122cdde5bf 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -616,10 +616,7 @@ class DataHandler::BodyDescriptor final : public BodyDescriptorBase {
static_assert(kData1Offset < kSizeWithData1,
"Field order must be in sync with this iteration code");
IteratePointers(obj, kSmiHandlerOffset, kData1Offset, v);
- if (object_size >= kSizeWithData1) {
- IterateMaybeWeakPointer(obj, kData1Offset, v);
- IteratePointers(obj, kData1Offset + kPointerSize, object_size, v);
- }
+ IterateMaybeWeakPointers(obj, kData1Offset, object_size, v);
}
static inline int SizeOf(Map* map, HeapObject* object) {
@@ -729,7 +726,10 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_MESSAGE_OBJECT_TYPE:
case JS_BOUND_FUNCTION_TYPE:
#ifdef V8_INTL_SUPPORT
+ case JS_INTL_COLLATOR_TYPE:
+ case JS_INTL_LIST_FORMAT_TYPE:
case JS_INTL_LOCALE_TYPE:
+ case JS_INTL_PLURAL_RULES_TYPE:
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
#endif // V8_INTL_SUPPORT
case WASM_GLOBAL_TYPE:
@@ -762,8 +762,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
return Op::template apply<Cell::BodyDescriptor>(p1, p2, p3, p4);
case PROPERTY_CELL_TYPE:
return Op::template apply<PropertyCell::BodyDescriptor>(p1, p2, p3, p4);
- case WEAK_CELL_TYPE:
- return Op::template apply<WeakCell::BodyDescriptor>(p1, p2, p3, p4);
case SYMBOL_TYPE:
return Op::template apply<Symbol::BodyDescriptor>(p1, p2, p3, p4);
case BYTECODE_ARRAY_TYPE:
@@ -782,6 +780,9 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case PRE_PARSED_SCOPE_DATA_TYPE:
return Op::template apply<PreParsedScopeData::BodyDescriptor>(p1, p2, p3,
p4);
+ case UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE:
+ return Op::template apply<
+ UncompiledDataWithoutPreParsedScope::BodyDescriptor>(p1, p2, p3, p4);
case UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE:
return Op::template apply<
UncompiledDataWithPreParsedScope::BodyDescriptor>(p1, p2, p3, p4);
@@ -791,10 +792,9 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case BYTE_ARRAY_TYPE:
case FREE_SPACE_TYPE:
case BIGINT_TYPE:
- case UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE:
return ReturnType();
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
case FIXED_##TYPE##_ARRAY_TYPE: \
return Op::template apply<FixedTypedArrayBase::BodyDescriptor>(p1, p2, p3, \
p4);
diff --git a/deps/v8/src/objects-body-descriptors.h b/deps/v8/src/objects-body-descriptors.h
index d6baf86e11..6277f9d8bd 100644
--- a/deps/v8/src/objects-body-descriptors.h
+++ b/deps/v8/src/objects-body-descriptors.h
@@ -109,6 +109,43 @@ class FlexibleBodyDescriptor final : public BodyDescriptorBase {
typedef FlexibleBodyDescriptor<HeapObject::kHeaderSize> StructBodyDescriptor;
+// This class describes a body of an object which has a parent class that also
+// has a body descriptor. This represents a union of the parent's body
+// descriptor, and a new descriptor for the child -- so, both parent and child's
+// slots are iterated. The parent must be fixed size, and its slots be disjoint
+// with the child's.
+template <class ParentBodyDescriptor, class ChildBodyDescriptor>
+class SubclassBodyDescriptor final : public BodyDescriptorBase {
+ public:
+ // The parent must end be before the child's start offset, to make sure that
+ // their slots are disjoint.
+ STATIC_ASSERT(ParentBodyDescriptor::kSize <=
+ ChildBodyDescriptor::kStartOffset);
+
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return ParentBodyDescriptor::IsValidSlot(map, obj, offset) ||
+ ChildBodyDescriptor::IsValidSlot(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map* map, HeapObject* obj, ObjectVisitor* v) {
+ ParentBodyDescriptor::IterateBody(map, obj, v);
+ ChildBodyDescriptor::IterateBody(map, obj, v);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ ParentBodyDescriptor::IterateBody(map, obj, object_size, v);
+ ChildBodyDescriptor::IterateBody(map, obj, object_size, v);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ // The child should know its full size.
+ return ChildBodyDescriptor::SizeOf(map, object);
+ }
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 8f149c8788..3ce26f95c9 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -15,17 +15,24 @@
#include "src/objects-inl.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-collator-inl.h"
+#endif // V8_INTL_SUPPORT
#include "src/objects/data-handler-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-generator-inl.h"
#include "src/objects/literal-objects-inl.h"
#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-list-format-inl.h"
#include "src/objects/js-locale-inl.h"
#endif // V8_INTL_SUPPORT
#include "src/objects/js-regexp-inl.h"
#include "src/objects/js-regexp-string-iterator-inl.h"
#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-plural-rules-inl.h"
#include "src/objects/js-relative-time-format-inl.h"
#endif // V8_INTL_SUPPORT
#include "src/objects/maybe-object.h"
@@ -196,7 +203,7 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
FeedbackVector::cast(this)->FeedbackVectorVerify(isolate);
break;
-#define VERIFY_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+#define VERIFY_TYPED_ARRAY(Type, type, TYPE, ctype) \
case FIXED_##TYPE##_ARRAY_TYPE: \
Fixed##Type##Array::cast(this)->FixedTypedArrayVerify(isolate); \
break;
@@ -217,10 +224,12 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case WASM_GLOBAL_TYPE:
case WASM_MEMORY_TYPE:
- case WASM_MODULE_TYPE:
case WASM_TABLE_TYPE:
JSObject::cast(this)->JSObjectVerify(isolate);
break;
+ case WASM_MODULE_TYPE:
+ WasmModuleObject::cast(this)->WasmModuleObjectVerify(isolate);
+ break;
case WASM_INSTANCE_TYPE:
WasmInstanceObject::cast(this)->WasmInstanceObjectVerify(isolate);
break;
@@ -257,9 +266,6 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case PROPERTY_CELL_TYPE:
PropertyCell::cast(this)->PropertyCellVerify(isolate);
break;
- case WEAK_CELL_TYPE:
- WeakCell::cast(this)->WeakCellVerify(isolate);
- break;
case JS_ARRAY_TYPE:
JSArray::cast(this)->JSArrayVerify(isolate);
break;
@@ -350,9 +356,18 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
CodeDataContainer::cast(this)->CodeDataContainerVerify(isolate);
break;
#ifdef V8_INTL_SUPPORT
+ case JS_INTL_COLLATOR_TYPE:
+ JSCollator::cast(this)->JSCollatorVerify(isolate);
+ break;
+ case JS_INTL_LIST_FORMAT_TYPE:
+ JSListFormat::cast(this)->JSListFormatVerify(isolate);
+ break;
case JS_INTL_LOCALE_TYPE:
JSLocale::cast(this)->JSLocaleVerify(isolate);
break;
+ case JS_INTL_PLURAL_RULES_TYPE:
+ JSPluralRules::cast(this)->JSPluralRulesVerify(isolate);
+ break;
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
JSRelativeTimeFormat::cast(this)->JSRelativeTimeFormatVerify(isolate);
break;
@@ -436,8 +451,7 @@ void FixedTypedArray<Traits>::FixedTypedArrayVerify(Isolate* isolate) {
}
}
-
-bool JSObject::ElementsAreSafeToExamine() {
+bool JSObject::ElementsAreSafeToExamine() const {
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
return reinterpret_cast<Map*>(elements()) !=
@@ -933,9 +947,21 @@ void JSFunction::JSFunctionVerify(Isolate* isolate) {
CHECK(feedback_cell()->IsFeedbackCell());
CHECK(code()->IsCode());
CHECK(map()->is_callable());
+ Handle<JSFunction> function(this, isolate);
+ LookupIterator it(isolate, function, isolate->factory()->prototype_string(),
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
if (has_prototype_slot()) {
VerifyObjectField(isolate, kPrototypeOrInitialMapOffset);
}
+
+ if (has_prototype_property()) {
+ CHECK(it.IsFound());
+ CHECK_EQ(LookupIterator::ACCESSOR, it.state());
+ CHECK(it.GetAccessors()->IsAccessorInfo());
+ } else {
+ CHECK(!it.IsFound() || it.state() != LookupIterator::ACCESSOR ||
+ !it.GetAccessors()->IsAccessorInfo());
+ }
}
void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) {
@@ -943,9 +969,8 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) {
VerifyObjectField(isolate, kFunctionDataOffset);
VerifyObjectField(isolate, kOuterScopeInfoOrFeedbackMetadataOffset);
- VerifyObjectField(isolate, kFunctionIdentifierOrDebugInfoOffset);
+ VerifyObjectField(isolate, kScriptOrDebugInfoOffset);
VerifyObjectField(isolate, kNameOrScopeInfoOffset);
- VerifyObjectField(isolate, kScriptOffset);
Object* value = name_or_scope_info();
CHECK(value == kNoSharedNameSentinel || value->IsString() ||
@@ -960,8 +985,8 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) {
HasUncompiledDataWithPreParsedScope() ||
HasUncompiledDataWithoutPreParsedScope());
- CHECK(function_identifier_or_debug_info()->IsUndefined(isolate) ||
- HasBuiltinFunctionId() || HasInferredName() || HasDebugInfo());
+ CHECK(script_or_debug_info()->IsUndefined(isolate) ||
+ script_or_debug_info()->IsScript() || HasDebugInfo());
if (!is_compiled()) {
CHECK(!HasFeedbackMetadata());
@@ -1000,6 +1025,7 @@ void JSGlobalProxy::JSGlobalProxyVerify(Isolate* isolate) {
CHECK(IsJSGlobalProxy());
JSObjectVerify(isolate);
VerifyObjectField(isolate, JSGlobalProxy::kNativeContextOffset);
+ CHECK(map()->is_access_check_needed());
// Make sure that this object has no properties, elements.
CHECK_EQ(0, FixedArray::cast(elements())->length());
}
@@ -1070,11 +1096,6 @@ void PropertyCell::PropertyCellVerify(Isolate* isolate) {
VerifyObjectField(isolate, kValueOffset);
}
-void WeakCell::WeakCellVerify(Isolate* isolate) {
- CHECK(IsWeakCell());
- VerifyObjectField(isolate, kValueOffset);
-}
-
void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
CHECK(IsCodeDataContainer());
VerifyObjectField(isolate, kNextCodeLinkOffset);
@@ -1180,8 +1201,7 @@ void JSWeakMap::JSWeakMapVerify(Isolate* isolate) {
void JSArrayIterator::JSArrayIteratorVerify(Isolate* isolate) {
CHECK(IsJSArrayIterator());
JSObjectVerify(isolate);
- CHECK(iterated_object()->IsJSReceiver() ||
- iterated_object()->IsUndefined(isolate));
+ CHECK(iterated_object()->IsJSReceiver());
CHECK_GE(next_index()->Number(), 0);
CHECK_LE(next_index()->Number(), kMaxSafeInteger);
@@ -1409,6 +1429,7 @@ void JSRegExpStringIterator::JSRegExpStringIteratorVerify(Isolate* isolate) {
void JSProxy::JSProxyVerify(Isolate* isolate) {
CHECK(IsJSProxy());
+ CHECK(map()->GetConstructor()->IsJSFunction());
VerifyPointer(isolate, target());
VerifyPointer(isolate, handler());
if (!IsRevoked()) {
@@ -1534,7 +1555,8 @@ void Module::ModuleVerify(Isolate* isolate) {
void PrototypeInfo::PrototypeInfoVerify(Isolate* isolate) {
CHECK(IsPrototypeInfo());
- CHECK(weak_cell()->IsWeakCell() || weak_cell()->IsUndefined(isolate));
+ Object* module_ns = module_namespace();
+ CHECK(module_ns->IsJSModuleNamespace() || module_ns->IsUndefined(isolate));
if (prototype_users()->IsWeakArrayList()) {
PrototypeUsers::Verify(WeakArrayList::cast(prototype_users()));
} else {
@@ -1643,13 +1665,16 @@ void WasmExportedFunctionData::WasmExportedFunctionDataVerify(
CHECK(wrapper_code()->kind() == Code::JS_TO_WASM_FUNCTION ||
wrapper_code()->kind() == Code::C_WASM_ENTRY);
VerifyObjectField(isolate, kInstanceOffset);
+ VerifySmiField(kJumpTableOffsetOffset);
VerifySmiField(kFunctionIndexOffset);
}
void WasmModuleObject::WasmModuleObjectVerify(Isolate* isolate) {
CHECK(IsWasmModuleObject());
VerifyObjectField(isolate, kNativeModuleOffset);
+ CHECK(managed_native_module()->IsForeign());
VerifyObjectField(isolate, kExportWrappersOffset);
+ CHECK(export_wrappers()->IsFixedArray());
VerifyObjectField(isolate, kScriptOffset);
VerifyObjectField(isolate, kAsmJsOffsetTableOffset);
VerifyObjectField(isolate, kBreakPointInfosOffset);
@@ -1665,10 +1690,10 @@ void DataHandler::DataHandlerVerify(Isolate* isolate) {
VerifyMaybeObjectField(isolate, kData1Offset);
}
if (data_count >= 2) {
- VerifyObjectField(isolate, kData2Offset);
+ VerifyMaybeObjectField(isolate, kData2Offset);
}
if (data_count >= 3) {
- VerifyObjectField(isolate, kData3Offset);
+ VerifyMaybeObjectField(isolate, kData3Offset);
}
}
@@ -1783,16 +1808,17 @@ void Script::ScriptVerify(Isolate* isolate) {
}
void NormalizedMapCache::NormalizedMapCacheVerify(Isolate* isolate) {
- FixedArray::cast(this)->FixedArrayVerify(isolate);
+ WeakFixedArray::cast(this)->WeakFixedArrayVerify(isolate);
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < length(); i++) {
- Object* e = FixedArray::get(i);
- if (e->IsWeakCell()) {
- if (!WeakCell::cast(e)->cleared()) {
- Map::cast(WeakCell::cast(e)->value())->DictionaryMapVerify(isolate);
- }
+ MaybeObject* e = WeakFixedArray::Get(i);
+ HeapObject* heap_object;
+ if (e->ToWeakHeapObject(&heap_object)) {
+ Map::cast(heap_object)->DictionaryMapVerify(isolate);
} else {
- CHECK(e->IsUndefined(isolate));
+ CHECK(e->IsClearedWeakHeapObject() ||
+ (e->ToStrongHeapObject(&heap_object) &&
+ heap_object->IsUndefined(isolate)));
}
}
}
@@ -1801,7 +1827,7 @@ void NormalizedMapCache::NormalizedMapCacheVerify(Isolate* isolate) {
void DebugInfo::DebugInfoVerify(Isolate* isolate) {
CHECK(IsDebugInfo());
VerifyPointer(isolate, shared());
- VerifyPointer(isolate, function_identifier());
+ VerifyPointer(isolate, script());
VerifyPointer(isolate, original_bytecode_array());
VerifyPointer(isolate, break_points());
}
@@ -1828,12 +1854,14 @@ void PreParsedScopeData::PreParsedScopeDataVerify(Isolate* isolate) {
void UncompiledDataWithPreParsedScope::UncompiledDataWithPreParsedScopeVerify(
Isolate* isolate) {
CHECK(IsUncompiledDataWithPreParsedScope());
+ VerifyPointer(isolate, inferred_name());
VerifyPointer(isolate, pre_parsed_scope_data());
}
void UncompiledDataWithoutPreParsedScope::
UncompiledDataWithoutPreParsedScopeVerify(Isolate* isolate) {
CHECK(IsUncompiledDataWithoutPreParsedScope());
+ VerifyPointer(isolate, inferred_name());
}
void InterpreterData::InterpreterDataVerify(Isolate* isolate) {
@@ -1843,7 +1871,23 @@ void InterpreterData::InterpreterDataVerify(Isolate* isolate) {
}
#ifdef V8_INTL_SUPPORT
+void JSCollator::JSCollatorVerify(Isolate* isolate) {
+ CHECK(IsJSCollator());
+ JSObjectVerify(isolate);
+ VerifyObjectField(isolate, kICUCollatorOffset);
+ VerifyObjectField(isolate, kFlagsOffset);
+ VerifyObjectField(isolate, kBoundCompareOffset);
+}
+
+void JSListFormat::JSListFormatVerify(Isolate* isolate) {
+ JSObjectVerify(isolate);
+ VerifyObjectField(isolate, kLocaleOffset);
+ VerifyObjectField(isolate, kFormatterOffset);
+ VerifyObjectField(isolate, kFlagsOffset);
+}
+
void JSLocale::JSLocaleVerify(Isolate* isolate) {
+ JSObjectVerify(isolate);
VerifyObjectField(isolate, kLanguageOffset);
VerifyObjectField(isolate, kScriptOffset);
VerifyObjectField(isolate, kRegionOffset);
@@ -1858,11 +1902,20 @@ void JSLocale::JSLocaleVerify(Isolate* isolate) {
VerifyObjectField(isolate, kNumberingSystemOffset);
}
+void JSPluralRules::JSPluralRulesVerify(Isolate* isolate) {
+ CHECK(IsJSPluralRules());
+ JSObjectVerify(isolate);
+ VerifyObjectField(isolate, kLocaleOffset);
+ VerifyObjectField(isolate, kTypeOffset);
+ VerifyObjectField(isolate, kICUPluralRulesOffset);
+ VerifyObjectField(isolate, kICUDecimalFormatOffset);
+}
+
void JSRelativeTimeFormat::JSRelativeTimeFormatVerify(Isolate* isolate) {
+ JSObjectVerify(isolate);
VerifyObjectField(isolate, kLocaleOffset);
- VerifyObjectField(isolate, kStyleOffset);
- VerifyObjectField(isolate, kNumericOffset);
VerifyObjectField(isolate, kFormatterOffset);
+ VerifyObjectField(isolate, kFlagsOffset);
}
#endif // V8_INTL_SUPPORT
@@ -1910,16 +1963,16 @@ void JSObject::IncrementSpillStatistics(Isolate* isolate,
break;
}
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
- { info->number_of_objects_with_fast_elements_++;
- FixedArrayBase* e = FixedArrayBase::cast(elements());
- info->number_of_fast_used_elements_ += e->length();
- break;
- }
+ {
+ info->number_of_objects_with_fast_elements_++;
+ FixedArrayBase* e = FixedArrayBase::cast(elements());
+ info->number_of_fast_used_elements_ += e->length();
+ break;
+ }
case DICTIONARY_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS: {
NumberDictionary* dict = element_dictionary();
@@ -2055,17 +2108,13 @@ bool TransitionsAccessor::IsConsistentWithBackPointers() {
// Estimates if there is a path from the object to a context.
// This function is not precise, and can return false even if
// there is a path to a context.
-bool CanLeak(Object* obj, Heap* heap, bool skip_weak_cell) {
+bool CanLeak(Object* obj, Heap* heap) {
if (!obj->IsHeapObject()) return false;
- if (obj->IsWeakCell()) {
- if (skip_weak_cell) return false;
- return CanLeak(WeakCell::cast(obj)->value(), heap, skip_weak_cell);
- }
if (obj->IsCell()) {
- return CanLeak(Cell::cast(obj)->value(), heap, skip_weak_cell);
+ return CanLeak(Cell::cast(obj)->value(), heap);
}
if (obj->IsPropertyCell()) {
- return CanLeak(PropertyCell::cast(obj)->value(), heap, skip_weak_cell);
+ return CanLeak(PropertyCell::cast(obj)->value(), heap);
}
if (obj->IsContext()) return true;
if (obj->IsMap()) {
@@ -2076,17 +2125,16 @@ bool CanLeak(Object* obj, Heap* heap, bool skip_weak_cell) {
}
return true;
}
- return CanLeak(HeapObject::cast(obj)->map(), heap, skip_weak_cell);
+ return CanLeak(HeapObject::cast(obj)->map(), heap);
}
void Code::VerifyEmbeddedObjects(Isolate* isolate, VerifyMode mode) {
if (kind() == OPTIMIZED_FUNCTION) return;
Heap* heap = isolate->heap();
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- bool skip_weak_cell = (mode == kNoContextSpecificPointers) ? false : true;
for (RelocIterator it(this, mask); !it.done(); it.next()) {
Object* target = it.rinfo()->target_object();
- DCHECK(!CanLeak(target, heap, skip_weak_cell));
+ DCHECK(!CanLeak(target, heap));
}
}
diff --git a/deps/v8/src/objects-definitions.h b/deps/v8/src/objects-definitions.h
index 5e922a487c..3b0a379632 100644
--- a/deps/v8/src/objects-definitions.h
+++ b/deps/v8/src/objects-definitions.h
@@ -166,7 +166,6 @@ namespace internal {
V(STORE_HANDLER_TYPE) \
V(UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE) \
V(UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE) \
- V(WEAK_CELL_TYPE) \
V(WEAK_ARRAY_LIST_TYPE) \
\
V(JS_PROXY_TYPE) \
@@ -217,7 +216,10 @@ namespace internal {
#ifdef V8_INTL_SUPPORT
#define INSTANCE_TYPE_LIST(V) \
INSTANCE_TYPE_LIST_BEFORE_INTL(V) \
+ V(JS_INTL_COLLATOR_TYPE) \
+ V(JS_INTL_LIST_FORMAT_TYPE) \
V(JS_INTL_LOCALE_TYPE) \
+ V(JS_INTL_PLURAL_RULES_TYPE) \
V(JS_INTL_RELATIVE_TIME_FORMAT_TYPE) \
INSTANCE_TYPE_LIST_AFTER_INTL(V)
#else
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index e0a9a8becf..05b2b9653f 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -29,9 +29,11 @@
#include "src/keys.h"
#include "src/layout-descriptor-inl.h"
#include "src/lookup-cache-inl.h"
-#include "src/lookup.h"
+#include "src/lookup-inl.h"
+#include "src/maybe-handles-inl.h"
#include "src/objects/bigint.h"
#include "src/objects/descriptor-array.h"
+#include "src/objects/js-proxy-inl.h"
#include "src/objects/literal-objects.h"
#include "src/objects/maybe-object-inl.h"
#include "src/objects/regexp-match-info.h"
@@ -40,7 +42,7 @@
#include "src/objects/templates.h"
#include "src/property-details.h"
#include "src/property.h"
-#include "src/prototype.h"
+#include "src/prototype-inl.h"
#include "src/roots-inl.h"
#include "src/transitions-inl.h"
#include "src/v8memory.h"
@@ -71,100 +73,58 @@ int PropertyDetails::field_width_in_words() const {
return representation().IsDouble() ? kDoubleSize / kPointerSize : 1;
}
+namespace InstanceTypeChecker {
+
+// Define type checkers for classes with single instance type.
+INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECKER);
+
+#define TYPED_ARRAY_INSTANCE_TYPE_CHECKER(Type, type, TYPE, ctype) \
+ INSTANCE_TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
+TYPED_ARRAYS(TYPED_ARRAY_INSTANCE_TYPE_CHECKER)
+#undef TYPED_ARRAY_INSTANCE_TYPE_CHECKER
+
+#define STRUCT_INSTANCE_TYPE_CHECKER(NAME, Name, name) \
+ INSTANCE_TYPE_CHECKER(Name, NAME##_TYPE)
+STRUCT_LIST(STRUCT_INSTANCE_TYPE_CHECKER)
+#undef STRUCT_INSTANCE_TYPE_CHECKER
+
+// Define type checkers for classes with ranges of instance types.
+#define INSTANCE_TYPE_CHECKER_RANGE(type, first_instance_type, \
+ last_instance_type) \
+ V8_INLINE bool Is##type(InstanceType instance_type) { \
+ return instance_type >= first_instance_type && \
+ instance_type <= last_instance_type; \
+ }
+INSTANCE_TYPE_CHECKERS_RANGE(INSTANCE_TYPE_CHECKER_RANGE);
+#undef INSTANCE_TYPE_CHECKER_RANGE
+
+V8_INLINE bool IsFixedArrayBase(InstanceType instance_type) {
+ return IsFixedArray(instance_type) || IsFixedDoubleArray(instance_type) ||
+ IsFixedTypedArrayBase(instance_type);
+}
+
+V8_INLINE bool IsHeapObject(InstanceType instance_type) { return true; }
+
+V8_INLINE bool IsInternalizedString(InstanceType instance_type) {
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ return (instance_type & (kIsNotStringMask | kIsNotInternalizedMask)) ==
+ (kStringTag | kInternalizedTag);
+}
+
+V8_INLINE bool IsJSObject(InstanceType instance_type) {
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
+ return instance_type >= FIRST_JS_OBJECT_TYPE;
+}
+
+} // namespace InstanceTypeChecker
+
// TODO(v8:7786): For instance types that have a single map instance on the
// roots, and when that map is a embedded in the binary, compare against the map
// pointer rather than looking up the instance type.
-TYPE_CHECKER(AllocationSite, ALLOCATION_SITE_TYPE)
-TYPE_CHECKER(BigInt, BIGINT_TYPE)
-TYPE_CHECKER(ObjectBoilerplateDescription, OBJECT_BOILERPLATE_DESCRIPTION_TYPE)
-TYPE_CHECKER(BreakPoint, TUPLE2_TYPE)
-TYPE_CHECKER(BreakPointInfo, TUPLE2_TYPE)
-TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
-TYPE_CHECKER(BytecodeArray, BYTECODE_ARRAY_TYPE)
-TYPE_CHECKER(CallHandlerInfo, CALL_HANDLER_INFO_TYPE)
-TYPE_CHECKER(Cell, CELL_TYPE)
-TYPE_CHECKER(Code, CODE_TYPE)
-TYPE_CHECKER(CodeDataContainer, CODE_DATA_CONTAINER_TYPE)
-TYPE_CHECKER(CoverageInfo, FIXED_ARRAY_TYPE)
-TYPE_CHECKER(DescriptorArray, DESCRIPTOR_ARRAY_TYPE)
-TYPE_CHECKER(EphemeronHashTable, EPHEMERON_HASH_TABLE_TYPE)
-TYPE_CHECKER(FeedbackCell, FEEDBACK_CELL_TYPE)
-TYPE_CHECKER(FeedbackMetadata, FEEDBACK_METADATA_TYPE)
-TYPE_CHECKER(FeedbackVector, FEEDBACK_VECTOR_TYPE)
-TYPE_CHECKER(FixedArrayExact, FIXED_ARRAY_TYPE)
-TYPE_CHECKER(FixedArrayOfWeakCells, FIXED_ARRAY_TYPE)
-TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
-TYPE_CHECKER(Foreign, FOREIGN_TYPE)
-TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
-TYPE_CHECKER(GlobalDictionary, GLOBAL_DICTIONARY_TYPE)
-TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
-TYPE_CHECKER(JSArgumentsObject, JS_ARGUMENTS_TYPE)
-TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
-TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)
-TYPE_CHECKER(JSArrayIterator, JS_ARRAY_ITERATOR_TYPE)
-TYPE_CHECKER(JSAsyncFromSyncIterator, JS_ASYNC_FROM_SYNC_ITERATOR_TYPE)
-TYPE_CHECKER(JSAsyncGeneratorObject, JS_ASYNC_GENERATOR_OBJECT_TYPE)
-TYPE_CHECKER(JSBoundFunction, JS_BOUND_FUNCTION_TYPE)
-TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
-TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE)
-TYPE_CHECKER(JSDate, JS_DATE_TYPE)
-TYPE_CHECKER(JSError, JS_ERROR_TYPE)
-TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
-TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
-TYPE_CHECKER(JSMap, JS_MAP_TYPE)
-TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
-TYPE_CHECKER(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE)
-TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
-TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
-TYPE_CHECKER(JSRegExpStringIterator, JS_REGEXP_STRING_ITERATOR_TYPE)
-TYPE_CHECKER(JSSet, JS_SET_TYPE)
-TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
-TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
-TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
-TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
-TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
-TYPE_CHECKER(Map, MAP_TYPE)
-TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
-TYPE_CHECKER(NameDictionary, NAME_DICTIONARY_TYPE)
-TYPE_CHECKER(NativeContext, NATIVE_CONTEXT_TYPE)
-TYPE_CHECKER(NumberDictionary, NUMBER_DICTIONARY_TYPE)
-TYPE_CHECKER(Oddball, ODDBALL_TYPE)
-TYPE_CHECKER(OrderedHashMap, ORDERED_HASH_MAP_TYPE)
-TYPE_CHECKER(OrderedHashSet, ORDERED_HASH_SET_TYPE)
-TYPE_CHECKER(PreParsedScopeData, PRE_PARSED_SCOPE_DATA_TYPE)
-TYPE_CHECKER(PropertyArray, PROPERTY_ARRAY_TYPE)
-TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
-TYPE_CHECKER(PropertyDescriptorObject, FIXED_ARRAY_TYPE)
-TYPE_CHECKER(ScopeInfo, SCOPE_INFO_TYPE)
-TYPE_CHECKER(ScriptContextTable, SCRIPT_CONTEXT_TABLE_TYPE)
-TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
-TYPE_CHECKER(SimpleNumberDictionary, SIMPLE_NUMBER_DICTIONARY_TYPE)
-TYPE_CHECKER(SmallOrderedHashMap, SMALL_ORDERED_HASH_MAP_TYPE)
-TYPE_CHECKER(SmallOrderedHashSet, SMALL_ORDERED_HASH_SET_TYPE)
-TYPE_CHECKER(SourcePositionTableWithFrameCache, TUPLE2_TYPE)
-TYPE_CHECKER(StringTable, STRING_TABLE_TYPE)
-TYPE_CHECKER(Symbol, SYMBOL_TYPE)
-TYPE_CHECKER(TemplateObjectDescription, TUPLE2_TYPE)
-TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
-TYPE_CHECKER(UncompiledDataWithoutPreParsedScope,
- UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE)
-TYPE_CHECKER(UncompiledDataWithPreParsedScope,
- UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE)
-TYPE_CHECKER(WasmGlobalObject, WASM_GLOBAL_TYPE)
-TYPE_CHECKER(WasmInstanceObject, WASM_INSTANCE_TYPE)
-TYPE_CHECKER(WasmMemoryObject, WASM_MEMORY_TYPE)
-TYPE_CHECKER(WasmModuleObject, WASM_MODULE_TYPE)
-TYPE_CHECKER(WasmTableObject, WASM_TABLE_TYPE)
-TYPE_CHECKER(WeakArrayList, WEAK_ARRAY_LIST_TYPE)
-TYPE_CHECKER(WeakCell, WEAK_CELL_TYPE)
-
-#ifdef V8_INTL_SUPPORT
-TYPE_CHECKER(JSLocale, JS_INTL_LOCALE_TYPE)
-TYPE_CHECKER(JSRelativeTimeFormat, JS_INTL_RELATIVE_TIME_FORMAT_TYPE)
-#endif // V8_INTL_SUPPORT
-
-#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype, size) \
- TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
+INSTANCE_TYPE_CHECKERS(TYPE_CHECKER);
+
+#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype) \
+ TYPE_CHECKER(Fixed##Type##Array)
TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
#undef TYPED_ARRAY_TYPE_CHECKER
@@ -173,22 +133,6 @@ bool HeapObject::IsUncompiledData() const {
IsUncompiledDataWithPreParsedScope();
}
-bool HeapObject::IsFixedArrayBase() const {
- return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
-}
-
-bool HeapObject::IsFixedArray() const {
- InstanceType instance_type = map()->instance_type();
- return instance_type >= FIRST_FIXED_ARRAY_TYPE &&
- instance_type <= LAST_FIXED_ARRAY_TYPE;
-}
-
-bool HeapObject::IsWeakFixedArray() const {
- InstanceType instance_type = map()->instance_type();
- return instance_type >= FIRST_WEAK_FIXED_ARRAY_TYPE &&
- instance_type <= LAST_WEAK_FIXED_ARRAY_TYPE;
-}
-
bool HeapObject::IsSloppyArgumentsElements() const {
return IsFixedArrayExact();
}
@@ -202,6 +146,10 @@ bool HeapObject::IsJSGeneratorObject() const {
IsJSAsyncGeneratorObject();
}
+bool HeapObject::IsDataHandler() const {
+ return IsLoadHandler() || IsStoreHandler();
+}
+
bool HeapObject::IsClassBoilerplate() const { return IsFixedArrayExact(); }
bool HeapObject::IsExternal(Isolate* isolate) const {
@@ -259,14 +207,6 @@ bool HeapObject::IsNullOrUndefined() const {
return IsNullOrUndefined(GetReadOnlyRoots());
}
-bool HeapObject::IsString() const {
- return map()->instance_type() < FIRST_NONSTRING_TYPE;
-}
-
-bool HeapObject::IsName() const {
- return map()->instance_type() <= LAST_NAME_TYPE;
-}
-
bool HeapObject::IsUniqueName() const {
return IsInternalizedString() || IsSymbol();
}
@@ -288,13 +228,6 @@ bool HeapObject::IsTemplateInfo() const {
return IsObjectTemplateInfo() || IsFunctionTemplateInfo();
}
-bool HeapObject::IsInternalizedString() const {
- uint32_t type = map()->instance_type();
- STATIC_ASSERT(kNotInternalizedTag != 0);
- return (type & (kIsNotStringMask | kIsNotInternalizedMask)) ==
- (kStringTag | kInternalizedTag);
-}
-
bool HeapObject::IsConsString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsCons();
@@ -353,51 +286,19 @@ bool HeapObject::IsFiller() const {
return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
}
-bool HeapObject::IsFixedTypedArrayBase() const {
- InstanceType instance_type = map()->instance_type();
- return (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
- instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE);
-}
-
bool HeapObject::IsJSReceiver() const {
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
return map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
}
-bool HeapObject::IsJSObject() const {
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- return map()->IsJSObjectMap();
-}
-
bool HeapObject::IsJSProxy() const { return map()->IsJSProxyMap(); }
-bool HeapObject::IsJSMapIterator() const {
- InstanceType instance_type = map()->instance_type();
- STATIC_ASSERT(JS_MAP_KEY_ITERATOR_TYPE + 1 == JS_MAP_KEY_VALUE_ITERATOR_TYPE);
- STATIC_ASSERT(JS_MAP_KEY_VALUE_ITERATOR_TYPE + 1 ==
- JS_MAP_VALUE_ITERATOR_TYPE);
- return (instance_type >= JS_MAP_KEY_ITERATOR_TYPE &&
- instance_type <= JS_MAP_VALUE_ITERATOR_TYPE);
-}
-
-bool HeapObject::IsJSSetIterator() const {
- InstanceType instance_type = map()->instance_type();
- return (instance_type == JS_SET_VALUE_ITERATOR_TYPE ||
- instance_type == JS_SET_KEY_VALUE_ITERATOR_TYPE);
-}
-
bool HeapObject::IsJSWeakCollection() const {
return IsJSWeakMap() || IsJSWeakSet();
}
bool HeapObject::IsJSCollection() const { return IsJSMap() || IsJSSet(); }
-bool HeapObject::IsMicrotask() const {
- InstanceType instance_type = map()->instance_type();
- return (instance_type >= FIRST_MICROTASK_TYPE &&
- instance_type <= LAST_MICROTASK_TYPE);
-}
-
bool HeapObject::IsPromiseReactionJobTask() const {
return IsPromiseFulfillReactionJobTask() || IsPromiseRejectReactionJobTask();
}
@@ -446,23 +347,12 @@ bool HeapObject::IsTemplateList() const {
}
bool HeapObject::IsDependentCode() const {
- if (!IsFixedArrayExact()) return false;
- // There's actually no way to see the difference between a fixed array and
- // a dependent codes array.
+ if (!IsWeakFixedArray()) return false;
+ // There's actually no way to see the difference between a weak fixed array
+ // and a dependent codes array.
return true;
}
-bool HeapObject::IsContext() const {
- int instance_type = map()->instance_type();
- return instance_type >= FIRST_CONTEXT_TYPE &&
- instance_type <= LAST_CONTEXT_TYPE;
-}
-
-template <>
-inline bool Is<JSFunction>(Object* obj) {
- return obj->IsJSFunction();
-}
-
bool HeapObject::IsAbstractCode() const {
return IsBytecodeArray() || IsCode();
}
@@ -500,18 +390,6 @@ bool HeapObject::IsJSArrayBufferView() const {
return IsJSDataView() || IsJSTypedArray();
}
-bool HeapObject::IsHashTable() const {
- int instance_type = map()->instance_type();
- return instance_type >= FIRST_HASH_TABLE_TYPE &&
- instance_type <= LAST_HASH_TABLE_TYPE;
-}
-
-bool HeapObject::IsDictionary() const {
- int instance_type = map()->instance_type();
- return instance_type >= FIRST_DICTIONARY_TYPE &&
- instance_type <= LAST_DICTIONARY_TYPE;
-}
-
bool HeapObject::IsStringSet() const { return IsHashTable(); }
bool HeapObject::IsObjectHashSet() const { return IsHashTable(); }
@@ -543,12 +421,6 @@ Maybe<bool> Object::IsArray(Handle<Object> object) {
return JSProxy::IsArray(Handle<JSProxy>::cast(object));
}
-bool HeapObject::IsJSGlobalProxy() const {
- bool result = map()->instance_type() == JS_GLOBAL_PROXY_TYPE;
- DCHECK(!result || map()->is_access_check_needed());
- return result;
-}
-
bool HeapObject::IsUndetectable() const { return map()->is_undetectable(); }
bool HeapObject::IsAccessCheckNeeded() const {
@@ -576,9 +448,7 @@ bool HeapObject::IsStruct() const {
bool Object::Is##Name() const { \
return IsHeapObject() && HeapObject::cast(this)->Is##Name(); \
} \
- bool HeapObject::Is##Name() const { \
- return map()->instance_type() == NAME##_TYPE; \
- }
+ TYPE_CHECKER(Name)
STRUCT_LIST(MAKE_STRUCT_PREDICATE)
#undef MAKE_STRUCT_PREDICATE
@@ -609,6 +479,7 @@ CAST_ACCESSOR(BigInt)
CAST_ACCESSOR(ObjectBoilerplateDescription)
CAST_ACCESSOR(Cell)
CAST_ACCESSOR(ArrayBoilerplateDescription)
+CAST_ACCESSOR(DataHandler)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(EphemeronHashTable)
CAST_ACCESSOR(EnumCache)
@@ -617,17 +488,14 @@ CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(GlobalDictionary)
CAST_ACCESSOR(HeapObject)
CAST_ACCESSOR(JSAsyncFromSyncIterator)
-CAST_ACCESSOR(JSAsyncGeneratorObject)
CAST_ACCESSOR(JSBoundFunction)
CAST_ACCESSOR(JSDataView)
CAST_ACCESSOR(JSDate)
CAST_ACCESSOR(JSFunction)
-CAST_ACCESSOR(JSGeneratorObject)
CAST_ACCESSOR(JSGlobalObject)
CAST_ACCESSOR(JSGlobalProxy)
CAST_ACCESSOR(JSMessageObject)
CAST_ACCESSOR(JSObject)
-CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(JSReceiver)
CAST_ACCESSOR(JSStringIterator)
CAST_ACCESSOR(JSValue)
@@ -659,7 +527,6 @@ CAST_ACCESSOR(Struct)
CAST_ACCESSOR(TemplateObjectDescription)
CAST_ACCESSOR(Tuple2)
CAST_ACCESSOR(Tuple3)
-CAST_ACCESSOR(WeakCell)
bool Object::HasValidElements() {
// Dictionary is covered under FixedArray.
@@ -1001,19 +868,6 @@ ReadOnlyRoots HeapObject::GetReadOnlyRoots() const {
return ReadOnlyRoots(MemoryChunk::FromHeapObject(this)->heap());
}
-Heap* HeapObject::GetHeap() const {
- Heap* heap = MemoryChunk::FromAddress(
- reinterpret_cast<Address>(const_cast<HeapObject*>(this)))
- ->heap();
- SLOW_DCHECK(heap != nullptr);
- return heap;
-}
-
-
-Isolate* HeapObject::GetIsolate() const {
- return GetHeap()->isolate();
-}
-
Heap* NeverReadOnlySpaceObject::GetHeap() const {
MemoryChunk* chunk =
MemoryChunk::FromAddress(reinterpret_cast<Address>(this));
@@ -1043,8 +897,7 @@ void HeapObject::set_map(Map* value) {
if (value != nullptr) {
// TODO(1600) We are passing nullptr as a slot because maps can never be on
// evacuation candidate.
- Heap::FromWritableHeapObject(this)->incremental_marking()->RecordWrite(
- this, nullptr, value);
+ MarkingBarrier(this, nullptr, value);
}
}
@@ -1063,8 +916,7 @@ void HeapObject::synchronized_set_map(Map* value) {
if (value != nullptr) {
// TODO(1600) We are passing nullptr as a slot because maps can never be on
// evacuation candidate.
- Heap::FromWritableHeapObject(this)->incremental_marking()->RecordWrite(
- this, nullptr, value);
+ MarkingBarrier(this, nullptr, value);
}
}
@@ -1085,8 +937,7 @@ void HeapObject::set_map_after_allocation(Map* value, WriteBarrierMode mode) {
DCHECK_NOT_NULL(value);
// TODO(1600) We are passing nullptr as a slot because maps can never be on
// evacuation candidate.
- Heap::FromWritableHeapObject(this)->incremental_marking()->RecordWrite(
- this, nullptr, value);
+ MarkingBarrier(this, nullptr, value);
}
}
@@ -1162,7 +1013,7 @@ void AllocationSite::Initialize() {
set_pretenure_data(0);
set_pretenure_create_count(0);
set_dependent_code(
- DependentCode::cast(GetReadOnlyRoots().empty_fixed_array()),
+ DependentCode::cast(GetReadOnlyRoots().empty_weak_fixed_array()),
SKIP_WRITE_BARRIER);
}
@@ -1402,7 +1253,7 @@ void JSObject::SetMapAndElements(Handle<JSObject> object,
void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kElementsOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(this, kElementsOffset, value, mode);
}
@@ -1465,34 +1316,6 @@ void PropertyCell::set_property_details(PropertyDetails details) {
set_property_details_raw(details.AsSmi());
}
-
-Object* WeakCell::value() const { return READ_FIELD(this, kValueOffset); }
-
-
-void WeakCell::clear() {
- // Either the garbage collector is clearing the cell or we are simply
- // initializing the root empty weak cell.
- DCHECK(Heap::FromWritableHeapObject(this)->gc_state() == Heap::MARK_COMPACT ||
- this == GetReadOnlyRoots().empty_weak_cell());
- WRITE_FIELD(this, kValueOffset, Smi::kZero);
-}
-
-
-void WeakCell::initialize(HeapObject* val) {
- WRITE_FIELD(this, kValueOffset, val);
- // We just have to execute the generational barrier here because we never
- // mark through a weak cell and collect evacuation candidates when we process
- // all weak cells.
- Heap* heap = Heap::FromWritableHeapObject(this);
- WriteBarrierMode mode =
- heap->incremental_marking()->marking_state()->IsBlack(this)
- ? UPDATE_WRITE_BARRIER
- : UPDATE_WEAK_WRITE_BARRIER;
- CONDITIONAL_WRITE_BARRIER(heap, this, kValueOffset, val, mode);
-}
-
-bool WeakCell::cleared() const { return value() == Smi::kZero; }
-
int JSObject::GetHeaderSize() const { return GetHeaderSize(map()); }
int JSObject::GetHeaderSize(const Map* map) {
@@ -1560,7 +1383,7 @@ void JSObject::SetEmbedderField(int index, Object* value) {
// to adjust the index here.
int offset = GetHeaderSize() + (kPointerSize * index);
WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(GetHeap(), this, offset, value);
+ WRITE_BARRIER(this, offset, value);
}
void JSObject::SetEmbedderField(int index, Smi* value) {
@@ -1605,7 +1428,7 @@ void JSObject::RawFastPropertyAtPut(FieldIndex index, Object* value) {
if (index.is_inobject()) {
int offset = index.offset();
WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(GetHeap(), this, offset, value);
+ WRITE_BARRIER(this, offset, value);
} else {
property_array()->set(index.outobject_array_index(), value);
}
@@ -1681,7 +1504,7 @@ Object* JSObject::InObjectPropertyAtPut(int index,
// Adjust for the number of properties stored in the object.
int offset = GetInObjectPropertyOffset(index);
WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(this, offset, value, mode);
return value;
}
@@ -1748,7 +1571,7 @@ void PropertyArray::set(int index, Object* value) {
DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
RELAXED_WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset, value);
+ WRITE_BARRIER(this, offset, value);
}
int RegExpMatchInfo::NumberOfCaptureRegisters() {
@@ -1847,8 +1670,7 @@ void PropertyArray::set(int index, Object* value, WriteBarrierMode mode) {
DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset,
- value, mode);
+ CONDITIONAL_WRITE_BARRIER(this, offset, value, mode);
}
Object** PropertyArray::data_start() {
@@ -2210,7 +2032,6 @@ DEFINE_DEOPT_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrBytecodeOffset, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(OptimizationId, Smi)
-DEFINE_DEOPT_ELEMENT_ACCESSORS(WeakCellCache, Object)
DEFINE_DEOPT_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
DEFINE_DEOPT_ENTRY_ACCESSORS(BytecodeOffsetRaw, Smi)
@@ -2261,8 +2082,7 @@ FreeSpace* FreeSpace::next() {
(!Heap::FromWritableHeapObject(this)->deserialization_complete() &&
map() == nullptr));
DCHECK_LE(kNextOffset + kPointerSize, relaxed_read_size());
- return reinterpret_cast<FreeSpace*>(
- Memory::Address_at(address() + kNextOffset));
+ return reinterpret_cast<FreeSpace*>(Memory<Address>(address() + kNextOffset));
}
@@ -2535,8 +2355,7 @@ Code* JSFunction::code() { return Code::cast(READ_FIELD(this, kCodeOffset)); }
void JSFunction::set_code(Code* value) {
DCHECK(!Heap::InNewSpace(value));
WRITE_FIELD(this, kCodeOffset, value);
- GetHeap()->incremental_marking()->RecordWrite(
- this, HeapObject::RawField(this, kCodeOffset), value);
+ MarkingBarrier(this, HeapObject::RawField(this, kCodeOffset), value);
}
@@ -2577,10 +2396,7 @@ bool JSFunction::has_context() const {
return READ_FIELD(this, kContextOffset)->IsContext();
}
-JSObject* JSFunction::global_proxy() {
- return context()->global_proxy();
-}
-
+JSGlobalProxy* JSFunction::global_proxy() { return context()->global_proxy(); }
Context* JSFunction::native_context() { return context()->native_context(); }
@@ -2588,7 +2404,7 @@ Context* JSFunction::native_context() { return context()->native_context(); }
void JSFunction::set_context(Object* value) {
DCHECK(value->IsUndefined() || value->IsContext());
WRITE_FIELD(this, kContextOffset, value);
- WRITE_BARRIER(GetHeap(), this, kContextOffset, value);
+ WRITE_BARRIER(this, kContextOffset, value);
}
ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map, Object,
@@ -2619,11 +2435,19 @@ bool JSFunction::has_prototype() {
return map()->has_non_instance_prototype() || has_instance_prototype();
}
+bool JSFunction::has_prototype_property() {
+ return (has_prototype_slot() && IsConstructor()) ||
+ IsGeneratorFunction(shared()->kind());
+}
+
+bool JSFunction::PrototypeRequiresRuntimeLookup() {
+ return !has_prototype_property() || map()->has_non_instance_prototype();
+}
Object* JSFunction::instance_prototype() {
DCHECK(has_instance_prototype());
if (has_initial_map()) return initial_map()->prototype();
- // When there is no initial map and the prototype is a JSObject, the
+ // When there is no initial map and the prototype is a JSReceiver, the
// initial map field is used for the prototype field.
return prototype_or_initial_map();
}
@@ -2631,7 +2455,7 @@ Object* JSFunction::instance_prototype() {
Object* JSFunction::prototype() {
DCHECK(has_prototype());
- // If the function's prototype property has been set to a non-JSObject
+ // If the function's prototype property has been set to a non-JSReceiver
// value, that value is stored in the constructor field of the map.
if (map()->has_non_instance_prototype()) {
Object* prototype = map()->GetConstructor();
@@ -2648,11 +2472,6 @@ bool JSFunction::is_compiled() {
return code()->builtin_index() != Builtins::kCompileLazy;
}
-ACCESSORS(JSProxy, target, Object, kTargetOffset)
-ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
-
-bool JSProxy::IsRevoked() const { return !handler()->IsJSReceiver(); }
-
// static
bool Foreign::IsNormalized(Object* value) {
if (value == Smi::kZero) return true;
@@ -2672,36 +2491,9 @@ void SmallOrderedHashTable<Derived>::SetDataEntry(int entry, int relative_index,
Object* value) {
Address entry_offset = GetDataEntryOffset(entry, relative_index);
RELAXED_WRITE_FIELD(this, entry_offset, value);
- WRITE_BARRIER(Heap::FromWritableHeapObject(this), this,
- static_cast<int>(entry_offset), value);
-}
-
-ACCESSORS(JSGeneratorObject, function, JSFunction, kFunctionOffset)
-ACCESSORS(JSGeneratorObject, context, Context, kContextOffset)
-ACCESSORS(JSGeneratorObject, receiver, Object, kReceiverOffset)
-ACCESSORS(JSGeneratorObject, input_or_debug_pos, Object, kInputOrDebugPosOffset)
-SMI_ACCESSORS(JSGeneratorObject, resume_mode, kResumeModeOffset)
-SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
-ACCESSORS(JSGeneratorObject, parameters_and_registers, FixedArray,
- kParametersAndRegistersOffset)
-
-bool JSGeneratorObject::is_suspended() const {
- DCHECK_LT(kGeneratorExecuting, 0);
- DCHECK_LT(kGeneratorClosed, 0);
- return continuation() >= 0;
+ WRITE_BARRIER(this, static_cast<int>(entry_offset), value);
}
-bool JSGeneratorObject::is_closed() const {
- return continuation() == kGeneratorClosed;
-}
-
-bool JSGeneratorObject::is_executing() const {
- return continuation() == kGeneratorExecuting;
-}
-
-ACCESSORS(JSAsyncGeneratorObject, queue, HeapObject, kQueueOffset)
-SMI_ACCESSORS(JSAsyncGeneratorObject, is_awaiting, kIsAwaitingOffset)
-
ACCESSORS(JSValue, value, Object, kValueOffset)
@@ -2724,7 +2516,7 @@ SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
SMI_ACCESSORS(JSMessageObject, error_level, kErrorLevelOffset)
-ElementsKind JSObject::GetElementsKind() {
+ElementsKind JSObject::GetElementsKind() const {
ElementsKind kind = map()->elements_kind();
#if VERIFY_HEAP && DEBUG
FixedArrayBase* fixed_array =
@@ -2816,7 +2608,7 @@ bool JSObject::HasFixedTypedArrayElements() {
return map()->has_fixed_typed_array_elements();
}
-#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype, size) \
+#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype) \
bool JSObject::HasFixed##Type##Elements() { \
HeapObject* array = elements(); \
DCHECK_NOT_NULL(array); \
@@ -3319,8 +3111,7 @@ Object* Object::GetHash() {
return receiver->GetIdentityHash(isolate);
}
-Handle<Object> ObjectHashTableShape::AsHandle(Isolate* isolate,
- Handle<Object> key) {
+Handle<Object> ObjectHashTableShape::AsHandle(Handle<Object> key) {
return key;
}
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index aeadc2d398..d76c036ba9 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -14,15 +14,24 @@
#include "src/interpreter/bytecodes.h"
#include "src/objects-inl.h"
#include "src/objects/arguments-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-collator-inl.h"
+#endif // V8_INTL_SUPPORT
+#include "src/objects/data-handler-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-generator-inl.h"
#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-list-format-inl.h"
#include "src/objects/js-locale-inl.h"
#endif // V8_INTL_SUPPORT
#include "src/objects/js-regexp-inl.h"
#include "src/objects/js-regexp-string-iterator-inl.h"
#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-plural-rules-inl.h"
#include "src/objects/js-relative-time-format-inl.h"
#endif // V8_INTL_SUPPORT
#include "src/objects/literal-objects-inl.h"
@@ -159,12 +168,12 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
FreeSpace::cast(this)->FreeSpacePrint(os);
break;
-#define PRINT_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
- case Fixed##Type##Array::kInstanceType: \
- Fixed##Type##Array::cast(this)->FixedTypedArrayPrint(os); \
+#define PRINT_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype) \
+ case Fixed##Type##Array::kInstanceType: \
+ Fixed##Type##Array::cast(this)->FixedTypedArrayPrint(os); \
break;
- TYPED_ARRAYS(PRINT_FIXED_TYPED_ARRAY)
+ TYPED_ARRAYS(PRINT_FIXED_TYPED_ARRAY)
#undef PRINT_FIXED_TYPED_ARRAY
case FILLER_TYPE:
@@ -180,10 +189,12 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
// TODO(titzer): debug printing for more wasm objects
case WASM_GLOBAL_TYPE:
case WASM_MEMORY_TYPE:
- case WASM_MODULE_TYPE:
case WASM_TABLE_TYPE:
JSObject::cast(this)->JSObjectPrint(os);
break;
+ case WASM_MODULE_TYPE:
+ WasmModuleObject::cast(this)->WasmModuleObjectPrint(os);
+ break;
case WASM_INSTANCE_TYPE:
WasmInstanceObject::cast(this)->WasmInstanceObjectPrint(os);
break;
@@ -285,9 +296,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case PROPERTY_CELL_TYPE:
PropertyCell::cast(this)->PropertyCellPrint(os);
break;
- case WEAK_CELL_TYPE:
- WeakCell::cast(this)->WeakCellPrint(os);
- break;
case JS_ARRAY_BUFFER_TYPE:
JSArrayBuffer::cast(this)->JSArrayBufferPrint(os);
break;
@@ -301,9 +309,18 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
JSDataView::cast(this)->JSDataViewPrint(os);
break;
#ifdef V8_INTL_SUPPORT
+ case JS_INTL_COLLATOR_TYPE:
+ JSCollator::cast(this)->JSCollatorPrint(os);
+ break;
+ case JS_INTL_LIST_FORMAT_TYPE:
+ JSListFormat::cast(this)->JSListFormatPrint(os);
+ break;
case JS_INTL_LOCALE_TYPE:
JSLocale::cast(this)->JSLocalePrint(os);
break;
+ case JS_INTL_PLURAL_RULES_TYPE:
+ JSPluralRules::cast(this)->JSPluralRulesPrint(os);
+ break;
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
JSRelativeTimeFormat::cast(this)->JSRelativeTimeFormatPrint(os);
break;
@@ -574,10 +591,10 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
break;
}
-#define PRINT_ELEMENTS(Type, type, TYPE, elementType, size) \
- case TYPE##_ELEMENTS: { \
- DoPrintElements<Fixed##Type##Array>(os, elements()); \
- break; \
+#define PRINT_ELEMENTS(Type, type, TYPE, elementType) \
+ case TYPE##_ELEMENTS: { \
+ DoPrintElements<Fixed##Type##Array>(os, elements()); \
+ break; \
}
TYPED_ARRAYS(PRINT_ELEMENTS)
#undef PRINT_ELEMENTS
@@ -802,12 +819,12 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
layout_descriptor()->ShortPrint(os);
}
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(this);
+ Isolate* isolate;
// Read-only maps can't have transitions, which is fortunate because we need
// the isolate to iterate over the transitions.
- if (chunk->owner()->identity() != RO_SPACE) {
+ if (Isolate::FromWritableHeapObject(this, &isolate)) {
DisallowHeapAllocation no_gc;
- TransitionsAccessor transitions(chunk->heap()->isolate(), this, &no_gc);
+ TransitionsAccessor transitions(isolate, this, &no_gc);
int nof_transitions = transitions.NumberOfTransitions();
if (nof_transitions > 0) {
os << "\n - transitions #" << nof_transitions << ": ";
@@ -894,7 +911,7 @@ void PrintWeakArrayElements(std::ostream& os, T* array) {
if (previous_index != i - 1) {
ss << '-' << (i - 1);
}
- os << std::setw(12) << ss.str() << ": " << MaybeObjectBrief(previous_value);
+ os << std::setw(12) << ss.str() << ": " << Brief(previous_value);
previous_index = i;
previous_value = value;
}
@@ -1043,7 +1060,7 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
if (entry_size > 0) os << " {";
for (int i = 0; i < entry_size; i++) {
int index = GetIndex(slot) + i;
- os << "\n [" << index << "]: " << MaybeObjectBrief(get(index));
+ os << "\n [" << index << "]: " << Brief(get(index));
}
if (entry_size > 0) os << "\n }";
}
@@ -1095,7 +1112,8 @@ void FeedbackNexus::Print(std::ostream& os) { // NOLINT
case FeedbackSlotKind::kInstanceOf:
case FeedbackSlotKind::kStoreDataPropertyInLiteral:
case FeedbackSlotKind::kStoreKeyedStrict:
- case FeedbackSlotKind::kStoreInArrayLiteral: {
+ case FeedbackSlotKind::kStoreInArrayLiteral:
+ case FeedbackSlotKind::kCloneObject: {
os << ICState2String(StateFromFeedback());
break;
}
@@ -1506,18 +1524,6 @@ void PropertyCell::PropertyCellPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-
-void WeakCell::WeakCellPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "WeakCell");
- if (cleared()) {
- os << "\n - cleared";
- } else {
- os << "\n - value: " << Brief(value());
- }
- os << "\n";
-}
-
-
void Code::CodePrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Code");
os << "\n";
@@ -1666,10 +1672,10 @@ void JSModuleNamespace::JSModuleNamespacePrint(std::ostream& os) { // NOLINT
void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "PrototypeInfo");
- os << "\n - weak cell: " << Brief(weak_cell());
+ os << "\n - module namespace: " << Brief(module_namespace());
os << "\n - prototype users: " << Brief(prototype_users());
os << "\n - registry slot: " << registry_slot();
- os << "\n - object create map: " << MaybeObjectBrief(object_create_map());
+ os << "\n - object create map: " << Brief(object_create_map());
os << "\n - should_be_fast_map: " << should_be_fast_map();
os << "\n";
}
@@ -1762,14 +1768,17 @@ void WasmExportedFunctionData::WasmExportedFunctionDataPrint(
}
void WasmModuleObject::WasmModuleObjectPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "WasmModuleObject");
- JSObjectPrintBody(os, this);
+ HeapObject::PrintHeader(os, "WasmModuleObject");
os << "\n - module: " << module();
os << "\n - native module: " << native_module();
os << "\n - export wrappers: " << Brief(export_wrappers());
os << "\n - script: " << Brief(script());
- os << "\n - asm_js_offset_table: " << Brief(asm_js_offset_table());
- os << "\n - breakpoint_infos: " << Brief(breakpoint_infos());
+ if (has_asm_js_offset_table()) {
+ os << "\n - asm_js_offset_table: " << Brief(asm_js_offset_table());
+ }
+ if (has_breakpoint_infos()) {
+ os << "\n - breakpoint_infos: " << Brief(breakpoint_infos());
+ }
os << "\n";
}
@@ -1780,7 +1789,7 @@ void LoadHandler::LoadHandlerPrint(std::ostream& os) { // NOLINT
os << "\n - validity_cell: " << Brief(validity_cell());
int data_count = data_field_count();
if (data_count >= 1) {
- os << "\n - data1: " << MaybeObjectBrief(data1());
+ os << "\n - data1: " << Brief(data1());
}
if (data_count >= 2) {
os << "\n - data2: " << Brief(data2());
@@ -1798,7 +1807,7 @@ void StoreHandler::StoreHandlerPrint(std::ostream& os) { // NOLINT
os << "\n - validity_cell: " << Brief(validity_cell());
int data_count = data_field_count();
if (data_count >= 1) {
- os << "\n - data1: " << MaybeObjectBrief(data1());
+ os << "\n - data1: " << Brief(data1());
}
if (data_count >= 2) {
os << "\n - data2: " << Brief(data2());
@@ -1944,6 +1953,23 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
}
#ifdef V8_INTL_SUPPORT
+void JSCollator::JSCollatorPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSCollator");
+ os << "\n - usage: " << JSCollator::UsageToString(usage());
+ os << "\n - icu collator: " << Brief(icu_collator());
+ os << "\n - bound compare: " << Brief(bound_compare());
+ os << "\n";
+}
+
+void JSListFormat::JSListFormatPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSListFormat");
+ os << "\n - locale: " << Brief(locale());
+ os << "\n - style: " << StyleAsString();
+ os << "\n - type: " << TypeAsString();
+ os << "\n - formatter: " << Brief(formatter());
+ os << "\n";
+}
+
void JSLocale::JSLocalePrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "JSLocale");
os << "\n - language: " << Brief(language());
@@ -1960,6 +1986,16 @@ void JSLocale::JSLocalePrint(std::ostream& os) { // NOLINT
os << "\n";
}
+void JSPluralRules::JSPluralRulesPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "JSPluralRules");
+ JSObjectPrint(os);
+ os << "\n - locale: " << Brief(locale());
+ os << "\n - type: " << Brief(type());
+ os << "\n - icu plural rules: " << Brief(icu_plural_rules());
+ os << "\n - icu decimal format: " << Brief(icu_decimal_format());
+ os << "\n";
+}
+
void JSRelativeTimeFormat::JSRelativeTimeFormatPrint(
std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSRelativeTimeFormat");
@@ -2046,7 +2082,7 @@ void DebugInfo::DebugInfoPrint(std::ostream& os) { // NOLINT
os << "\n - flags: " << flags();
os << "\n - debugger_hints: " << debugger_hints();
os << "\n - shared: " << Brief(shared());
- os << "\n - function_identifier: " << Brief(function_identifier());
+ os << "\n - script: " << Brief(script());
os << "\n - original bytecode array: " << Brief(original_bytecode_array());
os << "\n - break_points: ";
break_points()->FixedArrayPrint(os);
@@ -2396,11 +2432,11 @@ void JSObject::PrintTransitions(std::ostream& os) { // NOLINT
//
// The following functions are used by our gdb macros.
//
-extern void _v8_internal_Print_Object(void* object) {
+V8_EXPORT_PRIVATE extern void _v8_internal_Print_Object(void* object) {
reinterpret_cast<i::Object*>(object)->Print();
}
-extern void _v8_internal_Print_Code(void* object) {
+V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
i::Address address = reinterpret_cast<i::Address>(object);
i::Isolate* isolate = i::Isolate::Current();
@@ -2435,7 +2471,8 @@ extern void _v8_internal_Print_Code(void* object) {
#endif // ENABLE_DISASSEMBLER
}
-extern void _v8_internal_Print_LayoutDescriptor(void* object) {
+V8_EXPORT_PRIVATE extern void _v8_internal_Print_LayoutDescriptor(
+ void* object) {
i::Object* o = reinterpret_cast<i::Object*>(object);
if (!o->IsLayoutDescriptor()) {
printf("Please provide a layout descriptor\n");
@@ -2444,12 +2481,12 @@ extern void _v8_internal_Print_LayoutDescriptor(void* object) {
}
}
-extern void _v8_internal_Print_StackTrace() {
+V8_EXPORT_PRIVATE extern void _v8_internal_Print_StackTrace() {
i::Isolate* isolate = i::Isolate::Current();
isolate->PrintStack(stdout);
}
-extern void _v8_internal_Print_TransitionTree(void* object) {
+V8_EXPORT_PRIVATE extern void _v8_internal_Print_TransitionTree(void* object) {
i::Object* o = reinterpret_cast<i::Object*>(object);
if (!o->IsMap()) {
printf("Please provide a valid Map\n");
@@ -2457,7 +2494,7 @@ extern void _v8_internal_Print_TransitionTree(void* object) {
#if defined(DEBUG) || defined(OBJECT_PRINT)
i::DisallowHeapAllocation no_gc;
i::Map* map = reinterpret_cast<i::Map*>(object);
- i::TransitionsAccessor transitions(map->GetIsolate(), map, &no_gc);
+ i::TransitionsAccessor transitions(i::Isolate::Current(), map, &no_gc);
transitions.PrintTransitionTree();
#endif
}
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 0aac39d304..d4af74b2bd 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -46,7 +46,7 @@
#include "src/isolate-inl.h"
#include "src/keys.h"
#include "src/log.h"
-#include "src/lookup.h"
+#include "src/lookup-inl.h"
#include "src/macro-assembler.h"
#include "src/map-updater.h"
#include "src/messages.h"
@@ -59,13 +59,20 @@
#include "src/objects/debug-objects-inl.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-inl.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-collator.h"
+#endif // V8_INTL_SUPPORT
#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-generator-inl.h"
#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-list-format.h"
#include "src/objects/js-locale.h"
#endif // V8_INTL_SUPPORT
#include "src/objects/js-regexp-inl.h"
#include "src/objects/js-regexp-string-iterator.h"
#ifdef V8_INTL_SUPPORT
+#include "src/objects/js-plural-rules.h"
#include "src/objects/js-relative-time-format.h"
#endif // V8_INTL_SUPPORT
#include "src/objects/literal-objects-inl.h"
@@ -81,7 +88,7 @@
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/snapshot.h"
#include "src/source-position-table.h"
-#include "src/string-builder.h"
+#include "src/string-builder-inl.h"
#include "src/string-search.h"
#include "src/string-stream.h"
#include "src/unicode-cache-inl.h"
@@ -866,13 +873,13 @@ MaybeHandle<FixedArray> CreateListFromArrayLikeFastPath(
isolate, array, length);
} else if (object->IsJSTypedArray()) {
Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(object);
- uint32_t length = array->length_value();
+ size_t length = array->length_value();
if (array->WasNeutered() ||
- length > static_cast<uint32_t>(FixedArray::kMaxLength)) {
+ length > static_cast<size_t>(FixedArray::kMaxLength)) {
return MaybeHandle<FixedArray>();
}
return array->GetElementsAccessor()->CreateListFromArrayLike(
- isolate, array, length);
+ isolate, array, static_cast<uint32_t>(length));
}
}
return MaybeHandle<FixedArray>();
@@ -950,11 +957,11 @@ MaybeHandle<FixedArray> Object::CreateListFromArrayLike(
// static
MaybeHandle<Object> Object::GetLengthFromArrayLike(Isolate* isolate,
- Handle<Object> object) {
+ Handle<JSReceiver> object) {
Handle<Object> val;
- Handle<Object> key = isolate->factory()->length_string();
+ Handle<Name> key = isolate->factory()->length_string();
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, val, Runtime::GetObjectProperty(isolate, object, key), Object);
+ isolate, val, JSReceiver::GetProperty(isolate, object, key), Object);
return Object::ToLength(isolate, val);
}
@@ -1335,6 +1342,27 @@ MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
return result;
}
+// 9.1.12 ObjectCreate ( proto [ , internalSlotsList ] )
+// Notice: This is NOT 19.1.2.2 Object.create ( O, Properties )
+MaybeHandle<JSObject> JSObject::ObjectCreate(Isolate* isolate,
+ Handle<Object> prototype) {
+ // Generate the map with the specified {prototype} based on the Object
+ // function's initial map from the current native context.
+ // TODO(bmeurer): Use a dedicated cache for Object.create; think about
+ // slack tracking for Object.create.
+ Handle<Map> map =
+ Map::GetObjectCreateMap(isolate, Handle<HeapObject>::cast(prototype));
+
+ // Actually allocate the object.
+ Handle<JSObject> object;
+ if (map->is_dictionary_map()) {
+ object = isolate->factory()->NewSlowJSObjectFromMap(map);
+ } else {
+ object = isolate->factory()->NewJSObjectFromMap(map);
+ }
+ return object;
+}
+
void JSObject::EnsureWritableFastElements(Handle<JSObject> object) {
DCHECK(object->HasSmiOrObjectElements() ||
object->HasFastStringWrapperElements());
@@ -1416,8 +1444,14 @@ int JSObject::GetHeaderSize(InstanceType type,
case JS_MODULE_NAMESPACE_TYPE:
return JSModuleNamespace::kHeaderSize;
#ifdef V8_INTL_SUPPORT
+ case JS_INTL_COLLATOR_TYPE:
+ return JSCollator::kSize;
+ case JS_INTL_LIST_FORMAT_TYPE:
+ return JSListFormat::kSize;
case JS_INTL_LOCALE_TYPE:
return JSLocale::kSize;
+ case JS_INTL_PLURAL_RULES_TYPE:
+ return JSPluralRules::kSize;
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
return JSRelativeTimeFormat::kSize;
#endif // V8_INTL_SUPPORT
@@ -1539,7 +1573,7 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
if (info->replace_on_access() && receiver->IsJSReceiver()) {
RETURN_ON_EXCEPTION(isolate,
Accessors::ReplaceAccessorWithDataProperty(
- isolate, receiver, holder, name, result),
+ receiver, holder, name, result),
Object);
}
return reboxed_result;
@@ -1588,8 +1622,7 @@ Address CallHandlerInfo::redirected_callback() const {
return ExternalReference::Create(&fun, type).address();
}
-bool AccessorInfo::IsCompatibleReceiverMap(Isolate* isolate,
- Handle<AccessorInfo> info,
+bool AccessorInfo::IsCompatibleReceiverMap(Handle<AccessorInfo> info,
Handle<Map> map) {
if (!info->HasExpectedReceiverType()) return true;
if (!map->IsJSObjectMap()) return false;
@@ -2482,30 +2515,22 @@ void Object::ShortPrint(std::ostream& os) { os << Brief(this); }
void MaybeObject::ShortPrint(FILE* out) {
OFStream os(out);
- os << MaybeObjectBrief(this);
+ os << Brief(this);
}
void MaybeObject::ShortPrint(StringStream* accumulator) {
std::ostringstream os;
- os << MaybeObjectBrief(this);
+ os << Brief(this);
accumulator->Add(os.str().c_str());
}
-void MaybeObject::ShortPrint(std::ostream& os) { os << MaybeObjectBrief(this); }
+void MaybeObject::ShortPrint(std::ostream& os) { os << Brief(this); }
-std::ostream& operator<<(std::ostream& os, const Brief& v) {
- if (v.value->IsSmi()) {
- Smi::cast(v.value)->SmiPrint(os);
- } else {
- // TODO(svenpanne) Const-correct HeapObjectShortPrint!
- HeapObject* obj = const_cast<HeapObject*>(HeapObject::cast(v.value));
- obj->HeapObjectShortPrint(os);
- }
- return os;
-}
+Brief::Brief(const Object* v)
+ : value(MaybeObject::FromObject(const_cast<Object*>(v))) {}
-std::ostream& operator<<(std::ostream& os, const MaybeObjectBrief& v) {
- // TODO(marja): const-correct this the same way as the Object* version.
+std::ostream& operator<<(std::ostream& os, const Brief& v) {
+ // TODO(marja): const-correct HeapObjectShortPrint.
MaybeObject* maybe_object = const_cast<MaybeObject*>(v.value);
Smi* smi;
HeapObject* heap_object;
@@ -2573,7 +2598,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
DisallowHeapAllocation no_allocation;
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
- DCHECK(!this->IsExternalString());
+ DCHECK(this->SupportsExternalization());
DCHECK(!resource->IsCompressible());
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
@@ -2588,11 +2613,11 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
int size = this->Size(); // Byte size of the original string.
// Abort if size does not allow in-place conversion.
if (size < ExternalString::kShortSize) return false;
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(this);
+ Isolate* isolate;
// Read-only strings cannot be made external, since that would mutate the
// string.
- if (chunk->owner()->identity() == RO_SPACE) return false;
- Heap* heap = chunk->heap();
+ if (!Isolate::FromWritableHeapObject(this, &isolate)) return false;
+ Heap* heap = isolate->heap();
bool is_one_byte = this->IsOneByteRepresentation();
bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(this).IsIndirect();
@@ -2642,7 +2667,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
this->synchronized_set_map(new_map);
ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
- self->set_resource(resource);
+ self->SetResource(isolate, resource);
heap->RegisterExternalString(this);
if (is_internalized) self->Hash(); // Force regeneration of the hash value.
return true;
@@ -2653,7 +2678,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
DisallowHeapAllocation no_allocation;
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
- DCHECK(!this->IsExternalString());
+ DCHECK(this->SupportsExternalization());
DCHECK(!resource->IsCompressible());
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
@@ -2673,11 +2698,11 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
int size = this->Size(); // Byte size of the original string.
// Abort if size does not allow in-place conversion.
if (size < ExternalString::kShortSize) return false;
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(this);
+ Isolate* isolate;
// Read-only strings cannot be made external, since that would mutate the
// string.
- if (chunk->owner()->identity() == RO_SPACE) return false;
- Heap* heap = chunk->heap();
+ if (!Isolate::FromWritableHeapObject(this, &isolate)) return false;
+ Heap* heap = isolate->heap();
bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(this).IsIndirect();
@@ -2716,12 +2741,31 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
this->synchronized_set_map(new_map);
ExternalOneByteString* self = ExternalOneByteString::cast(this);
- self->set_resource(resource);
+ self->SetResource(isolate, resource);
heap->RegisterExternalString(this);
if (is_internalized) self->Hash(); // Force regeneration of the hash value.
return true;
}
+bool String::SupportsExternalization() {
+ if (this->IsThinString()) {
+ return i::ThinString::cast(this)->actual()->SupportsExternalization();
+ }
+
+ Isolate* isolate;
+ // RO_SPACE strings cannot be externalized.
+ if (!Isolate::FromWritableHeapObject(this, &isolate)) {
+ return false;
+ }
+
+ // Already an external string.
+ if (StringShape(this).IsExternal()) {
+ return false;
+ }
+
+ return !isolate->heap()->IsInGCPostProcessing();
+}
+
void String::StringShortPrint(StringStream* accumulator, bool show_details) {
int len = length();
if (len > kMaxShortPrintLength) {
@@ -3071,9 +3115,6 @@ VisitorId Map::GetVisitorId(Map* map) {
case PROPERTY_CELL_TYPE:
return kVisitPropertyCell;
- case WEAK_CELL_TYPE:
- return kVisitWeakCell;
-
case TRANSITION_ARRAY_TYPE:
return kVisitTransitionArray;
@@ -3111,6 +3152,9 @@ VisitorId Map::GetVisitorId(Map* map) {
case PRE_PARSED_SCOPE_DATA_TYPE:
return kVisitPreParsedScopeData;
+ case UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE:
+ return kVisitUncompiledDataWithoutPreParsedScope;
+
case UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE:
return kVisitUncompiledDataWithPreParsedScope;
@@ -3143,7 +3187,10 @@ VisitorId Map::GetVisitorId(Map* map) {
case JS_REGEXP_TYPE:
case JS_REGEXP_STRING_ITERATOR_TYPE:
#ifdef V8_INTL_SUPPORT
+ case JS_INTL_COLLATOR_TYPE:
+ case JS_INTL_LIST_FORMAT_TYPE:
case JS_INTL_LOCALE_TYPE:
+ case JS_INTL_PLURAL_RULES_TYPE:
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
#endif // V8_INTL_SUPPORT
case WASM_GLOBAL_TYPE:
@@ -3164,7 +3211,6 @@ VisitorId Map::GetVisitorId(Map* map) {
case HEAP_NUMBER_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
case FEEDBACK_METADATA_TYPE:
- case UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE:
return kVisitDataObject;
case BIGINT_TYPE:
@@ -3289,7 +3335,7 @@ bool JSObject::IsUnmodifiedApiObject(Object** o) {
HeapObject* heap_object = HeapObject::cast(object);
if (!object->IsJSObject()) return false;
JSObject* js_object = JSObject::cast(object);
- if (!js_object->WasConstructedFromApiFunction()) return false;
+ if (!js_object->IsApiWrapper()) return false;
Object* maybe_constructor = js_object->map()->GetConstructor();
if (!maybe_constructor->IsJSFunction()) return false;
JSFunction* constructor = JSFunction::cast(maybe_constructor);
@@ -3433,13 +3479,13 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case FREE_SPACE_TYPE:
os << "<FreeSpace[" << FreeSpace::cast(this)->size() << "]>";
break;
-#define TYPED_ARRAY_SHORT_PRINT(Type, type, TYPE, ctype, size) \
+#define TYPED_ARRAY_SHORT_PRINT(Type, type, TYPE, ctype) \
case FIXED_##TYPE##_ARRAY_TYPE: \
os << "<Fixed" #Type "Array[" << Fixed##Type##Array::cast(this)->length() \
<< "]>"; \
break;
- TYPED_ARRAYS(TYPED_ARRAY_SHORT_PRINT)
+ TYPED_ARRAYS(TYPED_ARRAY_SHORT_PRINT)
#undef TYPED_ARRAY_SHORT_PRINT
case PRE_PARSED_SCOPE_DATA_TYPE: {
@@ -3578,15 +3624,6 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << '>';
break;
}
- case WEAK_CELL_TYPE: {
- os << "<WeakCell value= ";
- HeapStringAllocator allocator;
- StringStream accumulator(&allocator);
- WeakCell::cast(this)->value()->ShortPrint(&accumulator);
- os << accumulator.ToCString().get();
- os << '>';
- break;
- }
case CALL_HANDLER_INFO_TYPE: {
CallHandlerInfo* info = CallHandlerInfo::cast(this);
os << "<CallHandlerInfo ";
@@ -3676,7 +3713,7 @@ String* JSReceiver::class_name() {
if (IsJSSet()) return roots.Set_string();
if (IsJSSetIterator()) return roots.SetIterator_string();
if (IsJSTypedArray()) {
-#define SWITCH_KIND(Type, type, TYPE, ctype, size) \
+#define SWITCH_KIND(Type, type, TYPE, ctype) \
if (map()->elements_kind() == TYPE##_ELEMENTS) { \
return roots.Type##Array_string(); \
}
@@ -3778,8 +3815,9 @@ void HeapObject::RehashBasedOnMap(Isolate* isolate) {
}
}
-// static
-Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
+namespace {
+std::pair<MaybeHandle<JSFunction>, Handle<String>> GetConstructorHelper(
+ Handle<JSReceiver> receiver) {
Isolate* isolate = receiver->GetIsolate();
// If the object was instantiated simply with base == new.target, the
@@ -3794,37 +3832,61 @@ Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
String* name = constructor->shared()->DebugName();
if (name->length() != 0 &&
!name->Equals(ReadOnlyRoots(isolate).Object_string())) {
- return handle(name, isolate);
+ return std::make_pair(handle(constructor, isolate),
+ handle(name, isolate));
}
} else if (maybe_constructor->IsFunctionTemplateInfo()) {
FunctionTemplateInfo* info =
FunctionTemplateInfo::cast(maybe_constructor);
if (info->class_name()->IsString()) {
- return handle(String::cast(info->class_name()), isolate);
+ return std::make_pair(
+ MaybeHandle<JSFunction>(),
+ handle(String::cast(info->class_name()), isolate));
}
}
}
Handle<Object> maybe_tag = JSReceiver::GetDataProperty(
receiver, isolate->factory()->to_string_tag_symbol());
- if (maybe_tag->IsString()) return Handle<String>::cast(maybe_tag);
+ if (maybe_tag->IsString())
+ return std::make_pair(MaybeHandle<JSFunction>(),
+ Handle<String>::cast(maybe_tag));
PrototypeIterator iter(isolate, receiver);
- if (iter.IsAtEnd()) return handle(receiver->class_name(), isolate);
+ if (iter.IsAtEnd()) {
+ return std::make_pair(MaybeHandle<JSFunction>(),
+ handle(receiver->class_name(), isolate));
+ }
+
Handle<JSReceiver> start = PrototypeIterator::GetCurrent<JSReceiver>(iter);
LookupIterator it(receiver, isolate->factory()->constructor_string(), start,
LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
Handle<Object> maybe_constructor = JSReceiver::GetDataProperty(&it);
- Handle<String> result = isolate->factory()->Object_string();
if (maybe_constructor->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(*maybe_constructor);
String* name = constructor->shared()->DebugName();
- if (name->length() > 0) result = handle(name, isolate);
+
+ if (name->length() != 0 &&
+ !name->Equals(ReadOnlyRoots(isolate).Object_string())) {
+ return std::make_pair(handle(constructor, isolate),
+ handle(name, isolate));
+ }
}
- return result.is_identical_to(isolate->factory()->Object_string())
- ? handle(receiver->class_name(), isolate)
- : result;
+ return std::make_pair(MaybeHandle<JSFunction>(),
+ handle(receiver->class_name(), isolate));
+}
+} // anonymous namespace
+
+// static
+MaybeHandle<JSFunction> JSReceiver::GetConstructor(
+ Handle<JSReceiver> receiver) {
+ return GetConstructorHelper(receiver).first;
+}
+
+// static
+Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
+ return GetConstructorHelper(receiver).second;
}
Handle<Context> JSReceiver::GetCreationContext() {
@@ -3852,9 +3914,9 @@ Handle<Context> JSReceiver::GetCreationContext() {
}
// static
-MaybeObjectHandle Map::WrapFieldType(Handle<FieldType> type) {
+MaybeObjectHandle Map::WrapFieldType(Isolate* isolate, Handle<FieldType> type) {
if (type->IsClass()) {
- return MaybeObjectHandle::Weak(type->AsClass());
+ return MaybeObjectHandle::Weak(type->AsClass(), isolate);
}
return MaybeObjectHandle(type);
}
@@ -3898,7 +3960,7 @@ MaybeHandle<Map> Map::CopyWithField(Isolate* isolate, Handle<Map> map,
isolate, map->instance_type(), &constness, &representation, &type);
}
- MaybeObjectHandle wrapped_type = WrapFieldType(type);
+ MaybeObjectHandle wrapped_type = WrapFieldType(isolate, type);
DCHECK_IMPLIES(!FLAG_track_constant_fields,
constness == PropertyConstness::kMutable);
@@ -3926,7 +3988,8 @@ MaybeHandle<Map> Map::CopyWithConstant(Isolate* isolate, Handle<Map> map,
PropertyConstness::kConst, representation, flag);
} else {
// Allocate new instance descriptors with (name, constant) added.
- Descriptor d = Descriptor::DataConstant(name, 0, constant, attributes);
+ Descriptor d =
+ Descriptor::DataConstant(isolate, name, 0, constant, attributes);
Handle<Map> new_map = Map::CopyAddDescriptor(isolate, map, &d, flag);
return new_map;
}
@@ -4523,8 +4586,8 @@ Handle<Map> Map::CopyGeneralizeAllFields(Isolate* isolate, Handle<Map> map,
? details.field_index()
: new_map->NumberOfFields();
Descriptor d = Descriptor::DataField(
- handle(descriptors->GetKey(modify_index), isolate), field_index,
- attributes, Representation::Tagged());
+ isolate, handle(descriptors->GetKey(modify_index), isolate),
+ field_index, attributes, Representation::Tagged());
descriptors->Replace(modify_index, &d);
if (details.location() != kField) {
new_map->AccountAddedPropertyField();
@@ -4583,7 +4646,7 @@ void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray* new_descriptors,
// Replace descriptors by new_descriptors in all maps that share it. The old
// descriptors will not be trimmed in the mark-compactor, we need to mark
// all its elements.
- isolate->heap()->incremental_marking()->RecordWrites(to_replace);
+ MarkingBarrierForElements(isolate->heap(), to_replace);
Map* current = this;
while (current->instance_descriptors() == to_replace) {
Object* next = current->GetBackPointer();
@@ -4741,7 +4804,7 @@ void Map::GeneralizeField(Isolate* isolate, Handle<Map> map, int modify_index,
PropertyDetails details = descriptors->GetDetails(modify_index);
Handle<Name> name(descriptors->GetKey(modify_index), isolate);
- MaybeObjectHandle wrapped_type(WrapFieldType(new_field_type));
+ MaybeObjectHandle wrapped_type(WrapFieldType(isolate, new_field_type));
field_owner->UpdateFieldType(isolate, modify_index, name, new_constness,
new_representation, wrapped_type);
field_owner->dependent_code()->DeoptimizeDependentCodeGroup(
@@ -5364,7 +5427,7 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
// Replace descriptors by new_descriptors in all maps that share it. The old
// descriptors will not be trimmed in the mark-compactor, we need to mark
// all its elements.
- isolate->heap()->incremental_marking()->RecordWrites(*descriptors);
+ MarkingBarrierForElements(isolate->heap(), *descriptors);
Map* current = *map;
while (current->instance_descriptors() == *descriptors) {
@@ -5559,15 +5622,6 @@ bool Map::IsMapInArrayPrototypeChain(Isolate* isolate) const {
return false;
}
-Handle<WeakCell> Map::WeakCellForMap(Isolate* isolate, Handle<Map> map) {
- if (map->weak_cell_cache()->IsWeakCell()) {
- return Handle<WeakCell>(WeakCell::cast(map->weak_cell_cache()), isolate);
- }
- Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(map);
- map->set_weak_cell_cache(*weak_cell);
- return weak_cell;
-}
-
static Handle<Map> AddMissingElementsTransitions(Isolate* isolate,
Handle<Map> map,
ElementsKind to_kind) {
@@ -5629,7 +5683,7 @@ Handle<Map> Map::TransitionElementsTo(Isolate* isolate, Handle<Map> map,
DCHECK(!map->IsUndefined(isolate));
// Check if we can go back in the elements kind transition chain.
- if (IsHoleyOrDictionaryElementsKind(from_kind) &&
+ if (IsHoleyElementsKind(from_kind) &&
to_kind == GetPackedElementsKind(from_kind) &&
map->GetBackPointer()->IsMap() &&
Map::cast(map->GetBackPointer())->elements_kind() == to_kind) {
@@ -6343,8 +6397,8 @@ Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
Handle<NormalizedMapCache> NormalizedMapCache::New(Isolate* isolate) {
- Handle<FixedArray> array(
- isolate->factory()->NewFixedArray(kEntries, TENURED));
+ Handle<WeakFixedArray> array(
+ isolate->factory()->NewWeakFixedArray(kEntries, TENURED));
return Handle<NormalizedMapCache>::cast(array);
}
@@ -6352,35 +6406,26 @@ Handle<NormalizedMapCache> NormalizedMapCache::New(Isolate* isolate) {
MaybeHandle<Map> NormalizedMapCache::Get(Handle<Map> fast_map,
PropertyNormalizationMode mode) {
DisallowHeapAllocation no_gc;
- Object* value = FixedArray::get(GetIndex(fast_map));
- if (!value->IsWeakCell() || WeakCell::cast(value)->cleared()) {
+ MaybeObject* value = WeakFixedArray::Get(GetIndex(fast_map));
+ HeapObject* heap_object;
+ if (!value->ToWeakHeapObject(&heap_object)) {
return MaybeHandle<Map>();
}
- Map* normalized_map = Map::cast(WeakCell::cast(value)->value());
+ Map* normalized_map = Map::cast(heap_object);
if (!normalized_map->EquivalentToForNormalization(*fast_map, mode)) {
return MaybeHandle<Map>();
}
return handle(normalized_map, GetIsolate());
}
-void NormalizedMapCache::Set(Handle<Map> fast_map, Handle<Map> normalized_map,
- Handle<WeakCell> normalized_map_weak_cell) {
+void NormalizedMapCache::Set(Handle<Map> fast_map, Handle<Map> normalized_map) {
DisallowHeapAllocation no_gc;
DCHECK(normalized_map->is_dictionary_map());
- DCHECK_EQ(normalized_map_weak_cell->value(), *normalized_map);
- FixedArray::set(GetIndex(fast_map), *normalized_map_weak_cell);
-}
-
-
-void NormalizedMapCache::Clear() {
- int entries = length();
- for (int i = 0; i != entries; i++) {
- set_undefined(i);
- }
+ WeakFixedArray::Set(GetIndex(fast_map),
+ HeapObjectReference::Weak(*normalized_map));
}
-
void JSObject::NormalizeProperties(Handle<JSObject> object,
PropertyNormalizationMode mode,
int expected_additional_properties,
@@ -7385,6 +7430,16 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
return Just(true);
}
+// static
+Maybe<bool> JSReceiver::CreateDataProperty(Isolate* isolate,
+ Handle<JSReceiver> object,
+ Handle<Name> key,
+ Handle<Object> value,
+ ShouldThrow should_throw) {
+ LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, key,
+ LookupIterator::OWN);
+ return CreateDataProperty(&it, value, should_throw);
+}
// static
Maybe<bool> JSReceiver::CreateDataProperty(LookupIterator* it,
@@ -8088,9 +8143,9 @@ bool JSObject::ReferencesObject(Object* obj) {
switch (kind) {
// Raw pixels and external arrays do not reference other
// objects.
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
- break;
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
+ break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -8843,8 +8898,7 @@ bool JSObject::HasEnumerableElements() {
}
return false;
}
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -9294,10 +9348,8 @@ Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
reinterpret_cast<void*>(new_map->address()),
Map::kDependentCodeOffset));
}
- STATIC_ASSERT(Map::kWeakCellCacheOffset ==
- Map::kDependentCodeOffset + kPointerSize);
STATIC_ASSERT(Map::kPrototypeValidityCellOffset ==
- Map::kWeakCellCacheOffset + kPointerSize);
+ Map::kDependentCodeOffset + kPointerSize);
int offset = Map::kPrototypeValidityCellOffset + kPointerSize;
DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address() + offset),
reinterpret_cast<void*>(new_map->address() + offset),
@@ -9307,8 +9359,7 @@ Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
} else {
new_map = Map::CopyNormalized(isolate, fast_map, mode);
if (use_cache) {
- Handle<WeakCell> cell = Map::WeakCellForMap(isolate, new_map);
- cache->Set(fast_map, new_map, cell);
+ cache->Set(fast_map, new_map);
isolate->counters()->maps_normalized()->Increment();
}
if (FLAG_trace_maps) {
@@ -10198,6 +10249,38 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
return descriptors;
}
+// Create a new descriptor array with only enumerable, configurable, writeable
+// data properties, but identical field locations.
+Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
+ Isolate* isolate, Handle<DescriptorArray> src, int enumeration_index,
+ int slack) {
+ if (enumeration_index + slack == 0) {
+ return isolate->factory()->empty_descriptor_array();
+ }
+
+ int size = enumeration_index;
+ Handle<DescriptorArray> descriptors =
+ DescriptorArray::Allocate(isolate, size, slack);
+
+ for (int i = 0; i < size; ++i) {
+ Name* key = src->GetKey(i);
+ PropertyDetails details = src->GetDetails(i);
+
+ SLOW_DCHECK(!key->IsPrivateField() && details.IsEnumerable() &&
+ details.kind() == kData);
+
+ // Ensure the ObjectClone property details are NONE, and that all source
+ // details did not contain DONT_ENUM.
+ PropertyDetails new_details(kData, NONE, details.location(),
+ details.constness(), details.representation(),
+ details.field_index());
+ descriptors->Set(i, key, src->GetValue(i), new_details);
+ }
+
+ descriptors->Sort();
+
+ return descriptors;
+}
bool DescriptorArray::IsEqualUpTo(DescriptorArray* desc, int nof_descriptors) {
for (int i = 0; i < nof_descriptors; i++) {
@@ -10317,79 +10400,6 @@ bool FixedArray::IsEqualTo(FixedArray* other) {
}
#endif
-// static
-void FixedArrayOfWeakCells::Set(Isolate* isolate,
- Handle<FixedArrayOfWeakCells> array, int index,
- Handle<HeapObject> value) {
- DCHECK(array->IsEmptySlot(index)); // Don't overwrite anything.
- Handle<WeakCell> cell =
- value->IsMap() ? Map::WeakCellForMap(isolate, Handle<Map>::cast(value))
- : isolate->factory()->NewWeakCell(value);
- Handle<FixedArray>::cast(array)->set(index + kFirstIndex, *cell);
- array->set_last_used_index(index);
-}
-
-
-// static
-Handle<FixedArrayOfWeakCells> FixedArrayOfWeakCells::Add(
- Isolate* isolate, Handle<Object> maybe_array, Handle<HeapObject> value,
- int* assigned_index) {
- Handle<FixedArrayOfWeakCells> array =
- (maybe_array.is_null() || !maybe_array->IsFixedArrayOfWeakCells())
- ? Allocate(isolate, 1, Handle<FixedArrayOfWeakCells>::null())
- : Handle<FixedArrayOfWeakCells>::cast(maybe_array);
- // Try to store the new entry if there's room. Optimize for consecutive
- // accesses.
- int first_index = array->last_used_index();
- int length = array->Length();
- if (length > 0) {
- for (int i = first_index;;) {
- if (array->IsEmptySlot((i))) {
- FixedArrayOfWeakCells::Set(isolate, array, i, value);
- if (assigned_index != nullptr) *assigned_index = i;
- return array;
- }
- i = (i + 1) % length;
- if (i == first_index) break;
- }
- }
-
- // No usable slot found, grow the array.
- int new_length = length == 0 ? 1 : length + (length >> 1) + 4;
- Handle<FixedArrayOfWeakCells> new_array =
- Allocate(isolate, new_length, array);
- FixedArrayOfWeakCells::Set(isolate, new_array, length, value);
- if (assigned_index != nullptr) *assigned_index = length;
- return new_array;
-}
-
-template <class CompactionCallback>
-void FixedArrayOfWeakCells::Compact(Isolate* isolate) {
- FixedArray* array = FixedArray::cast(this);
- int new_length = kFirstIndex;
- for (int i = kFirstIndex; i < array->length(); i++) {
- Object* element = array->get(i);
- if (element->IsSmi()) continue;
- if (WeakCell::cast(element)->cleared()) continue;
- Object* value = WeakCell::cast(element)->value();
- CompactionCallback::Callback(value, i - kFirstIndex,
- new_length - kFirstIndex);
- array->set(new_length++, element);
- }
- array->Shrink(isolate, new_length);
- set_last_used_index(0);
-}
-
-void FixedArrayOfWeakCells::Iterator::Reset(Object* maybe_array) {
- if (maybe_array->IsFixedArrayOfWeakCells()) {
- list_ = FixedArrayOfWeakCells::cast(maybe_array);
- index_ = 0;
-#ifdef DEBUG
- last_used_index_ = list_->last_used_index();
-#endif // DEBUG
- }
-}
-
void JSObject::PrototypeRegistryCompactionCallback(HeapObject* value,
int old_index,
int new_index) {
@@ -10401,51 +10411,6 @@ void JSObject::PrototypeRegistryCompactionCallback(HeapObject* value,
proto_info->set_registry_slot(new_index);
}
-template void FixedArrayOfWeakCells::Compact<
- FixedArrayOfWeakCells::NullCallback>(Isolate* isolate);
-
-bool FixedArrayOfWeakCells::Remove(Handle<HeapObject> value) {
- if (Length() == 0) return false;
- // Optimize for the most recently added element to be removed again.
- int first_index = last_used_index();
- for (int i = first_index;;) {
- if (Get(i) == *value) {
- Clear(i);
- // Users of FixedArrayOfWeakCells should make sure that there are no
- // duplicates.
- return true;
- }
- i = (i + 1) % Length();
- if (i == first_index) return false;
- }
- UNREACHABLE();
-}
-
-
-// static
-Handle<FixedArrayOfWeakCells> FixedArrayOfWeakCells::Allocate(
- Isolate* isolate, int size, Handle<FixedArrayOfWeakCells> initialize_from) {
- DCHECK_LE(0, size);
- Handle<FixedArray> result =
- isolate->factory()->NewUninitializedFixedArray(size + kFirstIndex);
- int index = 0;
- if (!initialize_from.is_null()) {
- DCHECK(initialize_from->Length() <= size);
- Handle<FixedArray> raw_source = Handle<FixedArray>::cast(initialize_from);
- // Copy the entries without compacting, since the PrototypeInfo relies on
- // the index of the entries not to change.
- while (index < raw_source->length()) {
- result->set(index, raw_source->get(index));
- index++;
- }
- }
- while (index < result->length()) {
- result->set(index, Smi::kZero);
- index++;
- }
- return Handle<FixedArrayOfWeakCells>::cast(result);
-}
-
// static
Handle<ArrayList> ArrayList::Add(Isolate* isolate, Handle<ArrayList> array,
Handle<Object> obj) {
@@ -10532,8 +10497,8 @@ Handle<WeakArrayList> WeakArrayList::AddToEnd(Isolate* isolate,
MaybeObjectHandle value) {
int length = array->length();
array = EnsureSpace(isolate, array, length + 1);
- // Check that GC didn't remove elements from the array.
- DCHECK_EQ(array->length(), length);
+ // Reload length; GC might have removed elements from the array.
+ length = array->length();
array->Set(length, *value);
array->set_length(length + 1);
return array;
@@ -10544,17 +10509,46 @@ bool WeakArrayList::IsFull() { return length() == capacity(); }
// static
Handle<WeakArrayList> WeakArrayList::EnsureSpace(Isolate* isolate,
Handle<WeakArrayList> array,
- int length) {
+ int length,
+ PretenureFlag pretenure) {
int capacity = array->capacity();
if (capacity < length) {
int new_capacity = length;
new_capacity = new_capacity + Max(new_capacity / 2, 2);
int grow_by = new_capacity - capacity;
- array = isolate->factory()->CopyWeakArrayListAndGrow(array, grow_by);
+ array =
+ isolate->factory()->CopyWeakArrayListAndGrow(array, grow_by, pretenure);
}
return array;
}
+int WeakArrayList::CountLiveWeakReferences() const {
+ int live_weak_references = 0;
+ for (int i = 0; i < length(); i++) {
+ if (Get(i)->IsWeakHeapObject()) {
+ ++live_weak_references;
+ }
+ }
+ return live_weak_references;
+}
+
+bool WeakArrayList::RemoveOne(MaybeObjectHandle value) {
+ if (length() == 0) return false;
+ // Optimize for the most recently added element to be removed again.
+ int last_index = length() - 1;
+ for (int i = last_index; i >= 0; --i) {
+ if (Get(i) == *value) {
+ // Move the last element into the this slot (or no-op, if this is the
+ // last slot).
+ Set(i, Get(last_index));
+ Set(last_index, HeapObjectReference::ClearedValue());
+ set_length(last_index);
+ return true;
+ }
+ }
+ return false;
+}
+
// static
Handle<WeakArrayList> PrototypeUsers::Add(Isolate* isolate,
Handle<WeakArrayList> array,
@@ -10604,18 +10598,12 @@ Handle<WeakArrayList> PrototypeUsers::Add(Isolate* isolate,
}
WeakArrayList* PrototypeUsers::Compact(Handle<WeakArrayList> array, Heap* heap,
- CompactionCallback callback) {
+ CompactionCallback callback,
+ PretenureFlag pretenure) {
if (array->length() == 0) {
return *array;
}
- // Count the amount of live references.
- int new_length = kFirstIndex;
- for (int i = kFirstIndex; i < array->length(); i++) {
- MaybeObject* element = array->Get(i);
- if (element->IsSmi()) continue;
- if (element->IsClearedWeakHeapObject()) continue;
- ++new_length;
- }
+ int new_length = kFirstIndex + array->CountLiveWeakReferences();
if (new_length == array->length()) {
return *array;
}
@@ -10623,7 +10611,7 @@ WeakArrayList* PrototypeUsers::Compact(Handle<WeakArrayList> array, Heap* heap,
Handle<WeakArrayList> new_array = WeakArrayList::EnsureSpace(
heap->isolate(),
handle(ReadOnlyRoots(heap).empty_weak_array_list(), heap->isolate()),
- new_length);
+ new_length, pretenure);
// Allocation might have caused GC and turned some of the elements into
// cleared weak heap objects. Count the number of live objects again.
int copy_to = kFirstIndex;
@@ -10672,14 +10660,16 @@ Handle<FrameArray> FrameArray::AppendJSFrame(Handle<FrameArray> in,
Handle<FrameArray> FrameArray::AppendWasmFrame(
Handle<FrameArray> in, Handle<WasmInstanceObject> wasm_instance,
int wasm_function_index, wasm::WasmCode* code, int offset, int flags) {
+ Isolate* isolate = wasm_instance->GetIsolate();
const int frame_count = in->FrameCount();
const int new_length = LengthFor(frame_count + 1);
- Handle<FrameArray> array =
- EnsureSpace(wasm_instance->GetIsolate(), in, new_length);
+ Handle<FrameArray> array = EnsureSpace(isolate, in, new_length);
+ // The {code} will be {nullptr} for interpreted wasm frames.
+ Handle<Foreign> code_foreign =
+ isolate->factory()->NewForeign(reinterpret_cast<Address>(code));
array->SetWasmInstance(frame_count, *wasm_instance);
array->SetWasmFunctionIndex(frame_count, Smi::FromInt(wasm_function_index));
- // The {code} will be {nullptr} for interpreted wasm frames.
- array->SetIsWasmInterpreterFrame(frame_count, Smi::FromInt(code == nullptr));
+ array->SetWasmCodeObject(frame_count, *code_foreign);
array->SetOffset(frame_count, Smi::FromInt(offset));
array->SetFlags(frame_count, Smi::FromInt(flags));
array->set(kFrameCountIndex, Smi::FromInt(frame_count + 1));
@@ -12602,6 +12592,9 @@ static void GetMinInobjectSlack(Map* map, void* data) {
}
}
+int Map::InstanceSizeFromSlack(int slack) const {
+ return instance_size() - slack * kPointerSize;
+}
static void ShrinkInstanceSize(Map* map, void* data) {
int slack = *reinterpret_cast<int*>(data);
@@ -12610,7 +12603,7 @@ static void ShrinkInstanceSize(Map* map, void* data) {
int old_visitor_id = Map::GetVisitorId(map);
int new_unused = map->UnusedPropertyFields() - slack;
#endif
- map->set_instance_size(map->instance_size() - slack * kPointerSize);
+ map->set_instance_size(map->InstanceSizeFromSlack(slack));
map->set_construction_counter(Map::kNoSlackTracking);
DCHECK_EQ(old_visitor_id, Map::GetVisitorId(map));
DCHECK_EQ(new_unused, map->UnusedPropertyFields());
@@ -12620,7 +12613,7 @@ static void StopSlackTracking(Map* map, void* data) {
map->set_construction_counter(Map::kNoSlackTracking);
}
-void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
+int Map::ComputeMinObjectSlack(Isolate* isolate) {
DisallowHeapAllocation no_gc;
// Has to be an initial map.
DCHECK(GetBackPointer()->IsUndefined(isolate));
@@ -12628,6 +12621,16 @@ void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
int slack = UnusedPropertyFields();
TransitionsAccessor transitions(isolate, this, &no_gc);
transitions.TraverseTransitionTree(&GetMinInobjectSlack, &slack);
+ return slack;
+}
+
+void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
+ DisallowHeapAllocation no_gc;
+ // Has to be an initial map.
+ DCHECK(GetBackPointer()->IsUndefined(isolate));
+
+ int slack = ComputeMinObjectSlack(isolate);
+ TransitionsAccessor transitions(isolate, this, &no_gc);
if (slack != 0) {
// Resize the initial map and all maps in its transition tree.
transitions.TraverseTransitionTree(&ShrinkInstanceSize, &slack);
@@ -12943,30 +12946,6 @@ bool Map::IsPrototypeChainInvalidated(Map* map) {
}
// static
-Handle<WeakCell> Map::GetOrCreatePrototypeWeakCell(Handle<JSReceiver> prototype,
- Isolate* isolate) {
- DCHECK(!prototype.is_null());
- if (prototype->IsJSProxy()) {
- Handle<WeakCell> cell = isolate->factory()->NewWeakCell(prototype);
- return cell;
- }
-
- Handle<PrototypeInfo> proto_info =
- GetOrCreatePrototypeInfo(Handle<JSObject>::cast(prototype), isolate);
- Object* maybe_cell = proto_info->weak_cell();
- // Return existing cell if it's already created.
- if (maybe_cell->IsWeakCell()) {
- Handle<WeakCell> cell(WeakCell::cast(maybe_cell), isolate);
- DCHECK(!cell->cleared());
- return cell;
- }
- // Otherwise create a new cell.
- Handle<WeakCell> cell = isolate->factory()->NewWeakCell(prototype);
- proto_info->set_weak_cell(*cell);
- return cell;
-}
-
-// static
void Map::SetPrototype(Isolate* isolate, Handle<Map> map,
Handle<Object> prototype,
bool enable_prototype_setup_mode) {
@@ -13147,6 +13126,12 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_DATE_TYPE:
case JS_FUNCTION_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
+#ifdef V8_INTL_SUPPORT
+ case JS_INTL_COLLATOR_TYPE:
+ case JS_INTL_LIST_FORMAT_TYPE:
+ case JS_INTL_PLURAL_RULES_TYPE:
+ case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
+#endif
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MAP_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
@@ -13200,7 +13185,6 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case PROPERTY_CELL_TYPE:
case SHARED_FUNCTION_INFO_TYPE:
case SYMBOL_TYPE:
- case WEAK_CELL_TYPE:
case ALLOCATION_SITE_TYPE:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
@@ -13392,6 +13376,14 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
return map;
}
+int JSFunction::ComputeInstanceSizeWithMinSlack(Isolate* isolate) {
+ if (has_prototype_slot() && has_initial_map() &&
+ initial_map()->IsInobjectSlackTrackingInProgress()) {
+ int slack = initial_map()->ComputeMinObjectSlack(isolate);
+ return initial_map()->InstanceSizeFromSlack(slack);
+ }
+ return initial_map()->instance_size();
+}
void JSFunction::PrintName(FILE* out) {
std::unique_ptr<char[]> name = shared()->DebugName()->ToCString();
@@ -13786,8 +13778,60 @@ MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
Script::Iterator::Iterator(Isolate* isolate)
: iterator_(isolate->heap()->script_list()) {}
+Script* Script::Iterator::Next() {
+ Object* o = iterator_.Next();
+ if (o != nullptr) {
+ return Script::cast(o);
+ }
+ return nullptr;
+}
+
+Code* SharedFunctionInfo::GetCode() const {
+ // ======
+ // NOTE: This chain of checks MUST be kept in sync with the equivalent CSA
+ // GetSharedFunctionInfoCode method in code-stub-assembler.cc.
+ // ======
-Script* Script::Iterator::Next() { return iterator_.Next<Script>(); }
+ Isolate* isolate = GetIsolate();
+ Object* data = function_data();
+ if (data->IsSmi()) {
+ // Holding a Smi means we are a builtin.
+ DCHECK(HasBuiltinId());
+ return isolate->builtins()->builtin(builtin_id());
+ } else if (data->IsBytecodeArray()) {
+ // Having a bytecode array means we are a compiled, interpreted function.
+ DCHECK(HasBytecodeArray());
+ return isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
+ } else if (data->IsFixedArray()) {
+ // Having a fixed array means we are an asm.js/wasm function.
+ DCHECK(HasAsmWasmData());
+ return isolate->builtins()->builtin(Builtins::kInstantiateAsmJs);
+ } else if (data->IsUncompiledData()) {
+ // Having uncompiled data (with or without scope) means we need to compile.
+ DCHECK(HasUncompiledData());
+ return isolate->builtins()->builtin(Builtins::kCompileLazy);
+ } else if (data->IsFunctionTemplateInfo()) {
+ // Having a function template info means we are an API function.
+ DCHECK(IsApiFunction());
+ return isolate->builtins()->builtin(Builtins::kHandleApiCall);
+ } else if (data->IsWasmExportedFunctionData()) {
+ // Having a WasmExportedFunctionData means the code is in there.
+ DCHECK(HasWasmExportedFunctionData());
+ return wasm_exported_function_data()->wrapper_code();
+ } else if (data->IsInterpreterData()) {
+ Code* code = InterpreterTrampoline();
+ DCHECK(code->IsCode());
+ DCHECK(code->is_interpreter_trampoline_builtin());
+ return code;
+ }
+ UNREACHABLE();
+}
+
+WasmExportedFunctionData* SharedFunctionInfo::wasm_exported_function_data()
+ const {
+ DCHECK(HasWasmExportedFunctionData());
+ return WasmExportedFunctionData::cast(function_data());
+}
SharedFunctionInfo::ScriptIterator::ScriptIterator(Isolate* isolate,
Script* script)
@@ -13824,11 +13868,11 @@ SharedFunctionInfo::GlobalIterator::GlobalIterator(Isolate* isolate)
sfi_iterator_(isolate, script_iterator_.Next()) {}
SharedFunctionInfo* SharedFunctionInfo::GlobalIterator::Next() {
- SharedFunctionInfo* next = noscript_sfi_iterator_.Next<SharedFunctionInfo>();
- if (next != nullptr) return next;
+ HeapObject* next = noscript_sfi_iterator_.Next();
+ if (next != nullptr) return SharedFunctionInfo::cast(next);
for (;;) {
next = sfi_iterator_.Next();
- if (next != nullptr) return next;
+ if (next != nullptr) return SharedFunctionInfo::cast(next);
Script* next_script = script_iterator_.Next();
if (next_script == nullptr) return nullptr;
sfi_iterator_.Reset(next_script);
@@ -13852,6 +13896,7 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
// This is okay because the gc-time processing of these lists can tolerate
// duplicates.
if (script_object->IsScript()) {
+ DCHECK(!shared->script()->IsScript());
Handle<Script> script = Handle<Script>::cast(script_object);
Handle<WeakFixedArray> list =
handle(script->shared_function_infos(), isolate);
@@ -13864,25 +13909,31 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
}
#endif
list->Set(function_literal_id, HeapObjectReference::Weak(*shared));
+
+ // Remove shared function info from root array.
+ WeakArrayList* noscript_list =
+ isolate->heap()->noscript_shared_function_infos();
+ CHECK(noscript_list->RemoveOne(MaybeObjectHandle::Weak(shared)));
} else {
- Handle<Object> list = isolate->factory()->noscript_shared_function_infos();
+ DCHECK(shared->script()->IsScript());
+ Handle<WeakArrayList> list =
+ isolate->factory()->noscript_shared_function_infos();
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
- FixedArrayOfWeakCells::Iterator iterator(*list);
- SharedFunctionInfo* next;
- while ((next = iterator.Next<SharedFunctionInfo>()) != nullptr) {
+ WeakArrayList::Iterator iterator(*list);
+ HeapObject* next;
+ while ((next = iterator.Next()) != nullptr) {
DCHECK_NE(next, *shared);
}
}
#endif // DEBUG
- list = FixedArrayOfWeakCells::Add(isolate, list, shared);
+ list =
+ WeakArrayList::AddToEnd(isolate, list, MaybeObjectHandle::Weak(shared));
isolate->heap()->SetRootNoScriptSharedFunctionInfos(*list);
- }
- if (shared->script()->IsScript()) {
// Remove shared function info from old script's list.
Script* old_script = Script::cast(shared->script());
@@ -13899,10 +13950,6 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
ReadOnlyRoots(isolate).undefined_value()));
}
}
- } else {
- // Remove shared function info from root array.
- Object* list = isolate->heap()->noscript_shared_function_infos();
- CHECK(FixedArrayOfWeakCells::cast(list)->Remove(shared));
}
// Finally set new script.
@@ -14164,7 +14211,6 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_is_declaration(lit->is_declaration());
shared_info->set_is_named_expression(lit->is_named_expression());
shared_info->set_is_anonymous_expression(lit->is_anonymous_expression());
- shared_info->set_inferred_name(*lit->inferred_name());
shared_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
shared_info->set_language_mode(lit->language_mode());
shared_info->set_is_wrapped(lit->is_wrapped());
@@ -14217,8 +14263,9 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
.ToHandle(&pre_parsed_scope_data)) {
Handle<UncompiledData> data =
isolate->factory()->NewUncompiledDataWithPreParsedScope(
- lit->start_position(), lit->end_position(),
- lit->function_literal_id(), pre_parsed_scope_data);
+ lit->inferred_name(), lit->start_position(),
+ lit->end_position(), lit->function_literal_id(),
+ pre_parsed_scope_data);
shared_info->set_uncompiled_data(*data);
needs_position_info = false;
}
@@ -14228,7 +14275,7 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
if (needs_position_info) {
Handle<UncompiledData> data =
isolate->factory()->NewUncompiledDataWithoutPreParsedScope(
- lit->start_position(), lit->end_position(),
+ lit->inferred_name(), lit->start_position(), lit->end_position(),
lit->function_literal_id());
shared_info->set_uncompiled_data(*data);
}
@@ -14246,9 +14293,10 @@ void SharedFunctionInfo::SetExpectedNofPropertiesFromEstimate(
// so we can afford to adjust the estimate generously.
estimate += 8;
- // Limit actual estimate to fit in a 16 bit field, we will never allocate
+ // Limit actual estimate to fit in a 8 bit field, we will never allocate
// more than this in any case.
- estimate = std::min(estimate, kMaxUInt16);
+ STATIC_ASSERT(JSObject::kMaxInObjectProperties <= kMaxUInt8);
+ estimate = std::min(estimate, kMaxUInt8);
set_expected_nof_properties(estimate);
}
@@ -14325,11 +14373,11 @@ void Code::CopyFrom(Heap* heap, const CodeDesc& desc) {
}
void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
- // copy code
+ // Copy code.
CopyBytes(reinterpret_cast<byte*>(raw_instruction_start()), desc.buffer,
static_cast<size_t>(desc.instr_size));
- // copy unwinding info, if any
+ // Copy unwinding info, if any.
if (desc.unwinding_info) {
DCHECK_GT(desc.unwinding_info_size, 0);
set_unwinding_info_size(desc.unwinding_info_size);
@@ -14338,20 +14386,15 @@ void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
static_cast<size_t>(desc.unwinding_info_size));
}
- // copy reloc info
+ // Copy reloc info.
CopyBytes(relocation_start(),
desc.buffer + desc.buffer_size - desc.reloc_size,
static_cast<size_t>(desc.reloc_size));
- // unbox handles and relocate
- int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
- RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
- RelocInfo::kApplyMask;
- // Needed to find target_object and runtime_entry on X64
+ // Unbox handles and relocate.
Assembler* origin = desc.origin;
AllowDeferredHandleDereference embedding_raw_address;
+ const int mode_mask = RelocInfo::PostCodegenRelocationMask();
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
@@ -14359,8 +14402,8 @@ void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
it.rinfo()->set_target_object(heap, *p, UPDATE_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsCodeTargetMode(mode)) {
- // rewrite code handles to direct pointers to the first instruction in the
- // code object
+ // Rewrite code handles to direct pointers to the first instruction in the
+ // code object.
Handle<Object> p = it.rinfo()->target_object_handle(origin);
Code* code = Code::cast(*p);
it.rinfo()->set_target_address(code->raw_instruction_start(),
@@ -14569,9 +14612,14 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
bool is_process_independent = true;
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
+#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
+ defined(V8_TARGET_ARCH_ARM)
+ // On X64, ARM, ARM64 we emit relative builtin-to-builtin jumps for isolate
+ // independent builtins in the snapshot. They are later rewritten as
+ // pc-relative jumps to the off-heap instruction stream and are thus
+ // process-independent.
+ // See also: FinalizeEmbeddedCodeTargets.
if (RelocInfo::IsCodeTargetMode(it.rinfo()->rmode())) {
- // Off-heap code targets are later rewritten as pc-relative jumps to the
- // off-heap instruction stream and are thus process-independent.
Address target_address = it.rinfo()->target_address();
if (InstructionStream::PcIsOffHeap(isolate, target_address)) continue;
@@ -14579,35 +14627,13 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
CHECK(target->IsCode());
if (Builtins::IsIsolateIndependentBuiltin(target)) continue;
}
+#endif
is_process_independent = false;
}
return is_process_independent;
}
-Handle<WeakCell> Code::WeakCellFor(Handle<Code> code) {
- DCHECK(code->kind() == OPTIMIZED_FUNCTION);
- WeakCell* raw_cell = code->CachedWeakCell();
- if (raw_cell != nullptr) {
- return Handle<WeakCell>(raw_cell, code->GetIsolate());
- }
- Handle<WeakCell> cell = code->GetIsolate()->factory()->NewWeakCell(code);
- DeoptimizationData::cast(code->deoptimization_data())
- ->SetWeakCellCache(*cell);
- return cell;
-}
-
-WeakCell* Code::CachedWeakCell() {
- DCHECK(kind() == OPTIMIZED_FUNCTION);
- Object* weak_cell_cache =
- DeoptimizationData::cast(deoptimization_data())->WeakCellCache();
- if (weak_cell_cache->IsWeakCell()) {
- DCHECK(this == WeakCell::cast(weak_cell_cache)->value());
- return WeakCell::cast(weak_cell_cache);
- }
- return nullptr;
-}
-
bool Code::Inlines(SharedFunctionInfo* sfi) {
// We can only check for inlining for optimized code.
DCHECK(is_optimized_code());
@@ -14910,6 +14936,24 @@ void Code::PrintBuiltinCode(Isolate* isolate, const char* name) {
}
}
+namespace {
+
+inline void DisassembleCodeRange(Isolate* isolate, std::ostream& os, Code* code,
+ Address begin, size_t size,
+ Address current_pc) {
+ Address end = begin + size;
+ // TODO(mstarzinger): Refactor CodeReference to avoid the
+ // unhandlified->handlified transition.
+ AllowHandleAllocation allow_handles;
+ DisallowHeapAllocation no_gc;
+ HandleScope handle_scope(isolate);
+ Disassembler::Decode(isolate, &os, reinterpret_cast<byte*>(begin),
+ reinterpret_cast<byte*>(end),
+ CodeReference(handle(code, isolate)), current_pc);
+}
+
+} // namespace
+
void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
Isolate* isolate = GetIsolate();
os << "kind = " << Kind2String(kind()) << "\n";
@@ -14928,9 +14972,16 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
os << "stack_slots = " << stack_slots() << "\n";
}
os << "compiler = " << (is_turbofanned() ? "turbofan" : "unknown") << "\n";
- os << "address = " << static_cast<const void*>(this) << "\n";
+ os << "address = " << static_cast<const void*>(this) << "\n\n";
+
+ if (is_off_heap_trampoline()) {
+ int trampoline_size = raw_instruction_size();
+ os << "Trampoline (size = " << trampoline_size << ")\n";
+ DisassembleCodeRange(isolate, os, this, raw_instruction_start(),
+ trampoline_size, current_pc);
+ os << "\n";
+ }
- os << "Body (size = " << InstructionSize() << ")\n";
{
int size = InstructionSize();
int safepoint_offset =
@@ -14942,25 +14993,16 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
int code_size =
Min(handler_offset, Min(safepoint_offset, constant_pool_offset));
os << "Instructions (size = " << code_size << ")\n";
- Address begin = InstructionStart();
- Address end = begin + code_size;
- {
- // TODO(mstarzinger): Refactor CodeReference to avoid the
- // unhandlified->handlified transition.
- AllowHandleAllocation allow_handles;
- DisallowHeapAllocation no_gc;
- HandleScope handle_scope(isolate);
- Disassembler::Decode(isolate, &os, reinterpret_cast<byte*>(begin),
- reinterpret_cast<byte*>(end),
- CodeReference(handle(this, isolate)), current_pc);
- }
+ DisassembleCodeRange(isolate, os, this, InstructionStart(), code_size,
+ current_pc);
if (constant_pool_offset < size) {
int constant_pool_size = safepoint_offset - constant_pool_offset;
DCHECK_EQ(constant_pool_size & kPointerAlignmentMask, 0);
os << "\nConstant Pool (size = " << constant_pool_size << ")\n";
Vector<char> buf = Vector<char>::New(50);
- intptr_t* ptr = reinterpret_cast<intptr_t*>(begin + constant_pool_offset);
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(InstructionStart() +
+ constant_pool_offset);
for (int i = 0; i < constant_pool_size; i += kPointerSize, ptr++) {
SNPrintF(buf, "%4d %08" V8PRIxPTR, i, *ptr);
os << static_cast<const void*>(ptr) << " " << buf.start() << "\n";
@@ -15150,7 +15192,7 @@ void JSArray::SetLength(Handle<JSArray> array, uint32_t new_length) {
array->GetElementsAccessor()->SetLength(array, new_length);
}
-DependentCode* DependentCode::Get(Handle<HeapObject> object) {
+DependentCode* DependentCode::GetDependentCode(Handle<HeapObject> object) {
if (object->IsMap()) {
return Handle<Map>::cast(object)->dependent_code();
} else if (object->IsPropertyCell()) {
@@ -15161,7 +15203,8 @@ DependentCode* DependentCode::Get(Handle<HeapObject> object) {
UNREACHABLE();
}
-void DependentCode::Set(Handle<HeapObject> object, Handle<DependentCode> dep) {
+void DependentCode::SetDependentCode(Handle<HeapObject> object,
+ Handle<DependentCode> dep) {
if (object->IsMap()) {
Handle<Map>::cast(object)->set_dependent_code(*dep);
} else if (object->IsPropertyCell()) {
@@ -15173,28 +15216,30 @@ void DependentCode::Set(Handle<HeapObject> object, Handle<DependentCode> dep) {
}
}
-void DependentCode::InstallDependency(Isolate* isolate, Handle<WeakCell> cell,
+void DependentCode::InstallDependency(Isolate* isolate, MaybeObjectHandle code,
Handle<HeapObject> object,
DependencyGroup group) {
- Handle<DependentCode> old_deps(DependentCode::Get(object), isolate);
+ Handle<DependentCode> old_deps(DependentCode::GetDependentCode(object),
+ isolate);
Handle<DependentCode> new_deps =
- InsertWeakCode(isolate, old_deps, group, cell);
+ InsertWeakCode(isolate, old_deps, group, code);
// Update the list head if necessary.
- if (!new_deps.is_identical_to(old_deps)) DependentCode::Set(object, new_deps);
+ if (!new_deps.is_identical_to(old_deps))
+ DependentCode::SetDependentCode(object, new_deps);
}
Handle<DependentCode> DependentCode::InsertWeakCode(
Isolate* isolate, Handle<DependentCode> entries, DependencyGroup group,
- Handle<WeakCell> code_cell) {
+ MaybeObjectHandle code) {
if (entries->length() == 0 || entries->group() > group) {
// There is no such group.
- return DependentCode::New(isolate, group, code_cell, entries);
+ return DependentCode::New(isolate, group, code, entries);
}
if (entries->group() < group) {
// The group comes later in the list.
Handle<DependentCode> old_next(entries->next_link(), isolate);
Handle<DependentCode> new_next =
- InsertWeakCode(isolate, old_next, group, code_cell);
+ InsertWeakCode(isolate, old_next, group, code);
if (!old_next.is_identical_to(new_next)) {
entries->set_next_link(*new_next);
}
@@ -15204,24 +15249,24 @@ Handle<DependentCode> DependentCode::InsertWeakCode(
int count = entries->count();
// Check for existing entry to avoid duplicates.
for (int i = 0; i < count; i++) {
- if (entries->object_at(i) == *code_cell) return entries;
+ if (entries->object_at(i) == *code) return entries;
}
if (entries->length() < kCodesStartIndex + count + 1) {
entries = EnsureSpace(isolate, entries);
// Count could have changed, reload it.
count = entries->count();
}
- entries->set_object_at(count, *code_cell);
+ entries->set_object_at(count, *code);
entries->set_count(count + 1);
return entries;
}
Handle<DependentCode> DependentCode::New(Isolate* isolate,
DependencyGroup group,
- Handle<Object> object,
+ MaybeObjectHandle object,
Handle<DependentCode> next) {
Handle<DependentCode> result = Handle<DependentCode>::cast(
- isolate->factory()->NewFixedArray(kCodesStartIndex + 1, TENURED));
+ isolate->factory()->NewWeakFixedArray(kCodesStartIndex + 1, TENURED));
result->set_next_link(*next);
result->set_flags(GroupField::encode(group) | CountField::encode(1));
result->set_object_at(0, *object);
@@ -15234,7 +15279,7 @@ Handle<DependentCode> DependentCode::EnsureSpace(
int capacity = kCodesStartIndex + DependentCode::Grow(entries->count());
int grow_by = capacity - entries->length();
return Handle<DependentCode>::cast(
- isolate->factory()->CopyFixedArrayAndGrow(entries, grow_by, TENURED));
+ isolate->factory()->CopyWeakFixedArrayAndGrow(entries, grow_by, TENURED));
}
@@ -15242,8 +15287,8 @@ bool DependentCode::Compact() {
int old_count = count();
int new_count = 0;
for (int i = 0; i < old_count; i++) {
- Object* obj = object_at(i);
- if (!obj->IsWeakCell() || !WeakCell::cast(obj)->cleared()) {
+ MaybeObject* obj = object_at(i);
+ if (!obj->IsClearedWeakHeapObject()) {
if (i != new_count) {
copy(i, new_count);
}
@@ -15257,58 +15302,19 @@ bool DependentCode::Compact() {
return new_count < old_count;
}
-
-void DependentCode::RemoveCompilationDependencies(
- DependentCode::DependencyGroup group, Foreign* info) {
- if (this->length() == 0 || this->group() > group) {
- // There is no such group.
- return;
- }
- if (this->group() < group) {
- // The group comes later in the list.
- next_link()->RemoveCompilationDependencies(group, info);
- return;
- }
- DCHECK_EQ(group, this->group());
- DisallowHeapAllocation no_allocation;
- int old_count = count();
- // Find compilation info wrapper.
- int info_pos = -1;
- for (int i = 0; i < old_count; i++) {
- if (object_at(i) == info) {
- info_pos = i;
- break;
- }
- }
- if (info_pos == -1) return; // Not found.
- // Use the last code to fill the gap.
- if (info_pos < old_count - 1) {
- copy(old_count - 1, info_pos);
- }
- clear_at(old_count - 1);
- set_count(old_count - 1);
-
-#ifdef DEBUG
- for (int i = 0; i < old_count - 1; i++) {
- DCHECK(object_at(i) != info);
- }
-#endif
-}
-
-
-bool DependentCode::Contains(DependencyGroup group, WeakCell* code_cell) {
+bool DependentCode::Contains(DependencyGroup group, MaybeObject* code) {
if (this->length() == 0 || this->group() > group) {
// There is no such group.
return false;
}
if (this->group() < group) {
// The group comes later in the list.
- return next_link()->Contains(group, code_cell);
+ return next_link()->Contains(group, code);
}
DCHECK_EQ(group, this->group());
int count = this->count();
for (int i = 0; i < count; i++) {
- if (object_at(i) == code_cell) return true;
+ if (object_at(i) == code) return true;
}
return false;
}
@@ -15345,10 +15351,9 @@ bool DependentCode::MarkCodeForDeoptimization(
bool marked = false;
int count = this->count();
for (int i = 0; i < count; i++) {
- Object* obj = object_at(i);
- WeakCell* cell = WeakCell::cast(obj);
- if (cell->cleared()) continue;
- Code* code = Code::cast(cell->value());
+ MaybeObject* obj = object_at(i);
+ if (obj->IsClearedWeakHeapObject()) continue;
+ Code* code = Code::cast(obj->ToWeakHeapObject());
if (!code->marked_for_deoptimization()) {
code->SetMarkedForDeoptimization(DependencyGroupName(group));
marked = true;
@@ -15768,8 +15773,7 @@ void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
}
ElementsKind to = value->OptimalElementsKind();
- if (IsHoleyOrDictionaryElementsKind(kind) || !object->IsJSArray() ||
- index > old_length) {
+ if (IsHoleyElementsKind(kind) || !object->IsJSArray() || index > old_length) {
to = GetHoleyElementsKind(to);
kind = GetHoleyElementsKind(kind);
}
@@ -15833,7 +15837,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
Handle<JSArray> boilerplate(JSArray::cast(site->boilerplate()), isolate);
ElementsKind kind = boilerplate->GetElementsKind();
// if kind is holey ensure that to_kind is as well.
- if (IsHoleyOrDictionaryElementsKind(kind)) {
+ if (IsHoleyElementsKind(kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
@@ -15861,7 +15865,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
// The AllocationSite is for a constructed Array.
ElementsKind kind = site->GetElementsKind();
// if kind is holey ensure that to_kind is as well.
- if (IsHoleyOrDictionaryElementsKind(kind)) {
+ if (IsHoleyElementsKind(kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
@@ -16037,10 +16041,9 @@ int JSObject::GetFastElementsUsage() {
case SLOW_STRING_WRAPPER_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NO_ELEMENTS:
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
UNREACHABLE();
}
@@ -16123,34 +16126,10 @@ bool FixedArrayBase::IsCowArray() const {
return map() == GetReadOnlyRoots().fixed_cow_array_map();
}
-bool JSObject::WasConstructedFromApiFunction() {
+bool JSObject::IsApiWrapper() {
auto instance_type = map()->instance_type();
- bool is_api_object = instance_type == JS_API_OBJECT_TYPE ||
- instance_type == JS_SPECIAL_API_OBJECT_TYPE;
- bool is_wasm_object =
- instance_type == WASM_GLOBAL_TYPE || instance_type == WASM_MEMORY_TYPE ||
- instance_type == WASM_MODULE_TYPE ||
- instance_type == WASM_INSTANCE_TYPE || instance_type == WASM_TABLE_TYPE;
-#ifdef ENABLE_SLOW_DCHECKS
- if (FLAG_enable_slow_asserts) {
- Object* maybe_constructor = map()->GetConstructor();
- if (maybe_constructor->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(maybe_constructor);
- DCHECK_EQ(constructor->shared()->IsApiFunction(),
- is_api_object || is_wasm_object);
- } else if (maybe_constructor->IsFunctionTemplateInfo()) {
- DCHECK(is_api_object || is_wasm_object);
- } else {
- return false;
- }
- }
-#endif
- // TODO(titzer): Clean this up somehow. WebAssembly objects should not be
- // considered "constructed from API functions" even though they have
- // function template info, since that would make the V8 GC identify them to
- // the embedder, e.g. the Oilpan GC.
- USE(is_wasm_object);
- return is_api_object;
+ return instance_type == JS_API_OBJECT_TYPE ||
+ instance_type == JS_SPECIAL_API_OBJECT_TYPE;
}
const char* Symbol::PrivateSymbolToName() const {
@@ -16964,128 +16943,6 @@ uint32_t HashTable<Derived, Shape>::FindInsertionEntry(uint32_t hash) {
return entry;
}
-namespace {
-
-bool CanonicalNumericIndexString(Isolate* isolate, Handle<Object> s,
- Handle<Object>* index) {
- DCHECK(s->IsString() || s->IsSmi());
-
- Handle<Object> result;
- if (s->IsSmi()) {
- result = s;
- } else {
- result = String::ToNumber(isolate, Handle<String>::cast(s));
- if (!result->IsMinusZero()) {
- Handle<String> str = Object::ToString(isolate, result).ToHandleChecked();
- // Avoid treating strings like "2E1" and "20" as the same key.
- if (!str->SameValue(*s)) return false;
- }
- }
- *index = result;
- return true;
-}
-
-} // anonymous namespace
-
-// ES#sec-integer-indexed-exotic-objects-defineownproperty-p-desc
-// static
-Maybe<bool> JSTypedArray::DefineOwnProperty(Isolate* isolate,
- Handle<JSTypedArray> o,
- Handle<Object> key,
- PropertyDescriptor* desc,
- ShouldThrow should_throw) {
- // 1. Assert: IsPropertyKey(P) is true.
- DCHECK(key->IsName() || key->IsNumber());
- // 2. Assert: O is an Object that has a [[ViewedArrayBuffer]] internal slot.
- // 3. If Type(P) is String, then
- if (key->IsString() || key->IsSmi()) {
- // 3a. Let numericIndex be ! CanonicalNumericIndexString(P)
- // 3b. If numericIndex is not undefined, then
- Handle<Object> numeric_index;
- if (CanonicalNumericIndexString(isolate, key, &numeric_index)) {
- // 3b i. If IsInteger(numericIndex) is false, return false.
- // 3b ii. If numericIndex = -0, return false.
- // 3b iii. If numericIndex < 0, return false.
- // FIXME: the standard allows up to 2^53 elements.
- uint32_t index;
- if (numeric_index->IsMinusZero() || !numeric_index->ToUint32(&index)) {
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kInvalidTypedArrayIndex));
- }
- // 3b iv. Let length be O.[[ArrayLength]].
- uint32_t length = o->length()->Number();
- // 3b v. If numericIndex ≥ length, return false.
- if (index >= length) {
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kInvalidTypedArrayIndex));
- }
- // 3b vi. If IsAccessorDescriptor(Desc) is true, return false.
- if (PropertyDescriptor::IsAccessorDescriptor(desc)) {
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kRedefineDisallowed, key));
- }
- // 3b vii. If Desc has a [[Configurable]] field and if
- // Desc.[[Configurable]] is true, return false.
- // 3b viii. If Desc has an [[Enumerable]] field and if Desc.[[Enumerable]]
- // is false, return false.
- // 3b ix. If Desc has a [[Writable]] field and if Desc.[[Writable]] is
- // false, return false.
- if ((desc->has_configurable() && desc->configurable()) ||
- (desc->has_enumerable() && !desc->enumerable()) ||
- (desc->has_writable() && !desc->writable())) {
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kRedefineDisallowed, key));
- }
- // 3b x. If Desc has a [[Value]] field, then
- // 3b x 1. Let value be Desc.[[Value]].
- // 3b x 2. Return ? IntegerIndexedElementSet(O, numericIndex, value).
- if (desc->has_value()) {
- if (!desc->has_configurable()) desc->set_configurable(false);
- if (!desc->has_enumerable()) desc->set_enumerable(true);
- if (!desc->has_writable()) desc->set_writable(true);
- Handle<Object> value = desc->value();
- RETURN_ON_EXCEPTION_VALUE(isolate,
- SetOwnElementIgnoreAttributes(
- o, index, value, desc->ToAttributes()),
- Nothing<bool>());
- }
- // 3b xi. Return true.
- return Just(true);
- }
- }
- // 4. Return ! OrdinaryDefineOwnProperty(O, P, Desc).
- return OrdinaryDefineOwnProperty(isolate, o, key, desc, should_throw);
-}
-
-ExternalArrayType JSTypedArray::type() {
- switch (elements()->map()->instance_type()) {
-#define INSTANCE_TYPE_TO_ARRAY_TYPE(Type, type, TYPE, ctype, size) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- return kExternal##Type##Array;
-
- TYPED_ARRAYS(INSTANCE_TYPE_TO_ARRAY_TYPE)
-#undef INSTANCE_TYPE_TO_ARRAY_TYPE
-
- default:
- UNREACHABLE();
- }
-}
-
-
-size_t JSTypedArray::element_size() {
- switch (elements()->map()->instance_type()) {
-#define INSTANCE_TYPE_TO_ELEMENT_SIZE(Type, type, TYPE, ctype, size) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- return size;
-
- TYPED_ARRAYS(INSTANCE_TYPE_TO_ELEMENT_SIZE)
-#undef INSTANCE_TYPE_TO_ELEMENT_SIZE
-
- default:
- UNREACHABLE();
- }
-}
-
void JSGlobalObject::InvalidatePropertyCell(Handle<JSGlobalObject> global,
Handle<Name> name) {
// Regardless of whether the property is there or not invalidate
@@ -17219,10 +17076,12 @@ void MigrateExternalStringResource(Isolate* isolate, String* from, String* to) {
const typename StringClass::Resource* to_resource = cast_to->resource();
if (to_resource == nullptr) {
// |to| is a just-created internalized copy of |from|. Migrate the resource.
- cast_to->set_resource(cast_from->resource());
+ cast_to->SetResource(isolate, cast_from->resource());
// Zap |from|'s resource pointer to reflect the fact that |from| has
// relinquished ownership of its resource.
- cast_from->set_resource(nullptr);
+ isolate->heap()->UpdateExternalString(
+ from, ExternalString::cast(from)->ExternalPayloadSize(), 0);
+ cast_from->SetResource(isolate, nullptr);
} else if (to_resource != cast_from->resource()) {
// |to| already existed and has its own resource. Finalize |from|.
isolate->heap()->FinalizeExternalString(from);
@@ -17595,12 +17454,17 @@ int SearchLiteralsMapEntry(CompilationCacheTable* cache, int cache_entry,
DCHECK(native_context->IsNativeContext());
Object* obj = cache->get(cache_entry);
- if (obj->IsFixedArray()) {
- FixedArray* literals_map = FixedArray::cast(obj);
+ // Check that there's no confusion between FixedArray and WeakFixedArray (the
+ // object used to be a FixedArray here).
+ DCHECK(!obj->IsFixedArray());
+ if (obj->IsWeakFixedArray()) {
+ WeakFixedArray* literals_map = WeakFixedArray::cast(obj);
int length = literals_map->length();
for (int i = 0; i < length; i += kLiteralEntryLength) {
- if (WeakCell::cast(literals_map->get(i + kLiteralContextOffset))
- ->value() == native_context) {
+ DCHECK(literals_map->Get(i + kLiteralContextOffset)
+ ->IsWeakOrClearedHeapObject());
+ if (literals_map->Get(i + kLiteralContextOffset) ==
+ HeapObjectReference::Weak(native_context)) {
return i;
}
}
@@ -17614,23 +17478,25 @@ void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
Isolate* isolate = native_context->GetIsolate();
DCHECK(native_context->IsNativeContext());
STATIC_ASSERT(kLiteralEntryLength == 2);
- Handle<FixedArray> new_literals_map;
+ Handle<WeakFixedArray> new_literals_map;
int entry;
Object* obj = cache->get(cache_entry);
- if (!obj->IsFixedArray() || FixedArray::cast(obj)->length() == 0) {
+ // Check that there's no confusion between FixedArray and WeakFixedArray (the
+ // object used to be a FixedArray here).
+ DCHECK(!obj->IsFixedArray());
+ if (!obj->IsWeakFixedArray() || WeakFixedArray::cast(obj)->length() == 0) {
new_literals_map =
- isolate->factory()->NewFixedArray(kLiteralInitialLength, TENURED);
+ isolate->factory()->NewWeakFixedArray(kLiteralInitialLength, TENURED);
entry = 0;
} else {
- Handle<FixedArray> old_literals_map(FixedArray::cast(obj), isolate);
+ Handle<WeakFixedArray> old_literals_map(WeakFixedArray::cast(obj), isolate);
entry = SearchLiteralsMapEntry(*cache, cache_entry, *native_context);
if (entry >= 0) {
// Just set the code of the entry.
- Handle<WeakCell> literals_cell =
- isolate->factory()->NewWeakCell(feedback_cell);
- old_literals_map->set(entry + kLiteralLiteralsOffset, *literals_cell);
+ old_literals_map->Set(entry + kLiteralLiteralsOffset,
+ HeapObjectReference::Weak(*feedback_cell));
return;
}
@@ -17638,8 +17504,8 @@ void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
DCHECK_LT(entry, 0);
int length = old_literals_map->length();
for (int i = 0; i < length; i += kLiteralEntryLength) {
- if (WeakCell::cast(old_literals_map->get(i + kLiteralContextOffset))
- ->cleared()) {
+ if (old_literals_map->Get(i + kLiteralContextOffset)
+ ->IsClearedWeakHeapObject()) {
new_literals_map = old_literals_map;
entry = i;
break;
@@ -17648,26 +17514,25 @@ void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
if (entry < 0) {
// Copy old optimized code map and append one new entry.
- new_literals_map = isolate->factory()->CopyFixedArrayAndGrow(
+ new_literals_map = isolate->factory()->CopyWeakFixedArrayAndGrow(
old_literals_map, kLiteralEntryLength, TENURED);
entry = old_literals_map->length();
}
}
- Handle<WeakCell> literals_cell =
- isolate->factory()->NewWeakCell(feedback_cell);
- WeakCell* context_cell = native_context->self_weak_cell();
-
- new_literals_map->set(entry + kLiteralContextOffset, context_cell);
- new_literals_map->set(entry + kLiteralLiteralsOffset, *literals_cell);
+ new_literals_map->Set(entry + kLiteralContextOffset,
+ HeapObjectReference::Weak(*native_context));
+ new_literals_map->Set(entry + kLiteralLiteralsOffset,
+ HeapObjectReference::Weak(*feedback_cell));
#ifdef DEBUG
for (int i = 0; i < new_literals_map->length(); i += kLiteralEntryLength) {
- WeakCell* cell =
- WeakCell::cast(new_literals_map->get(i + kLiteralContextOffset));
- DCHECK(cell->cleared() || cell->value()->IsNativeContext());
- cell = WeakCell::cast(new_literals_map->get(i + kLiteralLiteralsOffset));
- DCHECK(cell->cleared() || (cell->value()->IsFeedbackCell()));
+ MaybeObject* object = new_literals_map->Get(i + kLiteralContextOffset);
+ DCHECK(object->IsClearedWeakHeapObject() ||
+ object->ToWeakHeapObject()->IsNativeContext());
+ object = new_literals_map->Get(i + kLiteralLiteralsOffset);
+ DCHECK(object->IsClearedWeakHeapObject() ||
+ object->ToWeakHeapObject()->IsFeedbackCell());
}
#endif
@@ -17682,12 +17547,14 @@ FeedbackCell* SearchLiteralsMap(CompilationCacheTable* cache, int cache_entry,
FeedbackCell* result = nullptr;
int entry = SearchLiteralsMapEntry(cache, cache_entry, native_context);
if (entry >= 0) {
- FixedArray* literals_map = FixedArray::cast(cache->get(cache_entry));
+ WeakFixedArray* literals_map =
+ WeakFixedArray::cast(cache->get(cache_entry));
DCHECK_LE(entry + kLiteralEntryLength, literals_map->length());
- WeakCell* cell =
- WeakCell::cast(literals_map->get(entry + kLiteralLiteralsOffset));
+ MaybeObject* object = literals_map->Get(entry + kLiteralLiteralsOffset);
- result = cell->cleared() ? nullptr : FeedbackCell::cast(cell->value());
+ result = object->IsClearedWeakHeapObject()
+ ? nullptr
+ : FeedbackCell::cast(object->ToWeakHeapObject());
}
DCHECK(result == nullptr || result->IsFeedbackCell());
return result;
@@ -17698,6 +17565,10 @@ FeedbackCell* SearchLiteralsMap(CompilationCacheTable* cache, int cache_entry,
MaybeHandle<SharedFunctionInfo> CompilationCacheTable::LookupScript(
Handle<String> src, Handle<Context> native_context,
LanguageMode language_mode) {
+ // We use the empty function SFI as part of the key. Although the
+ // empty_function is native context dependent, the SFI is de-duped on
+ // snapshot builds by the PartialSnapshotCache, and so this does not prevent
+ // reuse of scripts in the compilation cache across native contexts.
Handle<SharedFunctionInfo> shared(native_context->empty_function()->shared(),
native_context->GetIsolate());
StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
@@ -17760,6 +17631,10 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
Handle<Context> native_context, LanguageMode language_mode,
Handle<SharedFunctionInfo> value) {
Isolate* isolate = native_context->GetIsolate();
+ // We use the empty function SFI as part of the key. Although the
+ // empty_function is native context dependent, the SFI is de-duped on
+ // snapshot builds by the PartialSnapshotCache, and so this does not prevent
+ // reuse of scripts in the compilation cache across native contexts.
Handle<SharedFunctionInfo> shared(native_context->empty_function()->shared(),
isolate);
StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
@@ -18721,185 +18596,6 @@ Handle<String> JSMessageObject::GetSourceLine() const {
return isolate->factory()->NewSubString(src, info.line_start, info.line_end);
}
-void JSArrayBuffer::Neuter() {
- CHECK(is_neuterable());
- CHECK(!was_neutered());
- CHECK(is_external());
- set_backing_store(nullptr);
- set_byte_length(Smi::kZero);
- set_was_neutered(true);
- set_is_neuterable(false);
- // Invalidate the neutering protector.
- Isolate* const isolate = GetIsolate();
- if (isolate->IsArrayBufferNeuteringIntact()) {
- isolate->InvalidateArrayBufferNeuteringProtector();
- }
-}
-
-void JSArrayBuffer::StopTrackingWasmMemory(Isolate* isolate) {
- DCHECK(is_wasm_memory());
- isolate->wasm_engine()->memory_tracker()->ReleaseAllocation(backing_store());
- set_is_wasm_memory(false);
-}
-
-void JSArrayBuffer::FreeBackingStoreFromMainThread() {
- if (allocation_base() == nullptr) {
- return;
- }
- FreeBackingStore(GetIsolate(), {allocation_base(), allocation_length(),
- backing_store(), is_wasm_memory()});
- // Zero out the backing store and allocation base to avoid dangling
- // pointers.
- set_backing_store(nullptr);
-}
-
-// static
-void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
- if (allocation.is_wasm_memory) {
- wasm::WasmMemoryTracker* memory_tracker =
- isolate->wasm_engine()->memory_tracker();
- if (!memory_tracker->FreeMemoryIfIsWasmMemory(allocation.backing_store)) {
- CHECK(FreePages(allocation.allocation_base, allocation.length));
- }
- } else {
- isolate->array_buffer_allocator()->Free(allocation.allocation_base,
- allocation.length);
- }
-}
-
-void JSArrayBuffer::set_is_wasm_memory(bool is_wasm_memory) {
- set_bit_field(IsWasmMemory::update(bit_field(), is_wasm_memory));
-}
-
-void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
- bool is_external, void* data, size_t byte_length,
- SharedFlag shared, bool is_wasm_memory) {
- DCHECK_EQ(array_buffer->GetEmbedderFieldCount(),
- v8::ArrayBuffer::kEmbedderFieldCount);
- for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) {
- array_buffer->SetEmbedderField(i, Smi::kZero);
- }
- array_buffer->set_bit_field(0);
- array_buffer->set_is_external(is_external);
- array_buffer->set_is_neuterable(shared == SharedFlag::kNotShared);
- array_buffer->set_is_shared(shared == SharedFlag::kShared);
- array_buffer->set_is_wasm_memory(is_wasm_memory);
-
- Handle<Object> heap_byte_length =
- isolate->factory()->NewNumberFromSize(byte_length);
- CHECK(heap_byte_length->IsSmi() || heap_byte_length->IsHeapNumber());
- array_buffer->set_byte_length(*heap_byte_length);
- // Initialize backing store at last to avoid handling of |JSArrayBuffers| that
- // are currently being constructed in the |ArrayBufferTracker|. The
- // registration method below handles the case of registering a buffer that has
- // already been promoted.
- array_buffer->set_backing_store(data);
-
- if (data && !is_external) {
- isolate->heap()->RegisterNewArrayBuffer(*array_buffer);
- }
-}
-
-namespace {
-
-inline int ConvertToMb(size_t size) {
- return static_cast<int>(size / static_cast<size_t>(MB));
-}
-
-} // namespace
-
-bool JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
- Isolate* isolate,
- size_t allocated_length,
- bool initialize, SharedFlag shared) {
- void* data;
- CHECK_NOT_NULL(isolate->array_buffer_allocator());
- if (allocated_length != 0) {
- if (allocated_length >= MB)
- isolate->counters()->array_buffer_big_allocations()->AddSample(
- ConvertToMb(allocated_length));
- if (shared == SharedFlag::kShared)
- isolate->counters()->shared_array_allocations()->AddSample(
- ConvertToMb(allocated_length));
- if (initialize) {
- data = isolate->array_buffer_allocator()->Allocate(allocated_length);
- } else {
- data = isolate->array_buffer_allocator()->AllocateUninitialized(
- allocated_length);
- }
- if (data == nullptr) {
- isolate->counters()->array_buffer_new_size_failures()->AddSample(
- ConvertToMb(allocated_length));
- return false;
- }
- } else {
- data = nullptr;
- }
-
- const bool is_external = false;
- JSArrayBuffer::Setup(array_buffer, isolate, is_external, data,
- allocated_length, shared);
- return true;
-}
-
-Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
- Handle<JSTypedArray> typed_array) {
- DCHECK(typed_array->is_on_heap());
-
- Isolate* isolate = typed_array->GetIsolate();
-
- DCHECK(IsFixedTypedArrayElementsKind(typed_array->GetElementsKind()));
-
- Handle<FixedTypedArrayBase> fixed_typed_array(
- FixedTypedArrayBase::cast(typed_array->elements()), isolate);
-
- Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(typed_array->buffer()),
- isolate);
- // This code does not know how to materialize from wasm buffers.
- DCHECK(!buffer->is_wasm_memory());
-
- void* backing_store =
- isolate->array_buffer_allocator()->AllocateUninitialized(
- fixed_typed_array->DataSize());
- if (backing_store == nullptr) {
- isolate->heap()->FatalProcessOutOfMemory(
- "JSTypedArray::MaterializeArrayBuffer");
- }
- buffer->set_is_external(false);
- DCHECK(buffer->byte_length()->IsSmi() ||
- buffer->byte_length()->IsHeapNumber());
- DCHECK(NumberToInt32(buffer->byte_length()) == fixed_typed_array->DataSize());
- // Initialize backing store at last to avoid handling of |JSArrayBuffers| that
- // are currently being constructed in the |ArrayBufferTracker|. The
- // registration method below handles the case of registering a buffer that has
- // already been promoted.
- buffer->set_backing_store(backing_store);
- // RegisterNewArrayBuffer expects a valid length for adjusting counters.
- isolate->heap()->RegisterNewArrayBuffer(*buffer);
- memcpy(buffer->backing_store(),
- fixed_typed_array->DataPtr(),
- fixed_typed_array->DataSize());
- Handle<FixedTypedArrayBase> new_elements =
- isolate->factory()->NewFixedTypedArrayWithExternalPointer(
- fixed_typed_array->length(), typed_array->type(),
- static_cast<uint8_t*>(buffer->backing_store()));
-
- typed_array->set_elements(*new_elements);
- DCHECK(!typed_array->is_on_heap());
-
- return buffer;
-}
-
-Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
- if (!is_on_heap()) {
- Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(buffer()),
- GetIsolate());
- return array_buffer;
- }
- Handle<JSTypedArray> self(this, GetIsolate());
- return MaterializeArrayBuffer(self);
-}
-
Handle<PropertyCell> PropertyCell::InvalidateEntry(
Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry) {
// Swap with a copy.
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index be3e2b8dac..c848e92af7 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -75,7 +75,10 @@
// - JSDate
// - JSMessageObject
// - JSModuleNamespace
-// - JSLocale // If V8_INTL_SUPPORT enabled.
+// - JSCollator // If V8_INTL_SUPPORT enabled.
+// - JSListFormat // If V8_INTL_SUPPORT enabled.
+// - JSLocale // If V8_INTL_SUPPORT enabled.
+// - JSPluralRules // If V8_INTL_SUPPORT enabled.
// - JSRelativeTimeFormat // If V8_INTL_SUPPORT enabled.
// - WasmGlobalObject
// - WasmInstanceObject
@@ -105,7 +108,6 @@
// - ScopeInfo
// - ModuleInfo
// - ScriptContextTable
-// - FixedArrayOfWeakCells
// - FixedDoubleArray
// - Name
// - String
@@ -170,7 +172,6 @@
// - PromiseResolveThenableJobTask
// - Module
// - ModuleInfoEntry
-// - WeakCell
// - FeedbackCell
// - FeedbackVector
// - PreParsedScopeData
@@ -532,7 +533,6 @@ enum InstanceType : uint16_t {
STORE_HANDLER_TYPE,
UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE,
UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE,
- WEAK_CELL_TYPE,
WEAK_ARRAY_LIST_TYPE,
// All the following types are subtypes of JSReceiver, which corresponds to
@@ -582,7 +582,10 @@ enum InstanceType : uint16_t {
JS_DATA_VIEW_TYPE,
#ifdef V8_INTL_SUPPORT
+ JS_INTL_COLLATOR_TYPE,
+ JS_INTL_LIST_FORMAT_TYPE,
JS_INTL_LOCALE_TYPE,
+ JS_INTL_PLURAL_RULES_TYPE,
JS_INTL_RELATIVE_TIME_FORMAT_TYPE,
#endif // V8_INTL_SUPPORT
@@ -692,9 +695,15 @@ class FixedArrayBase;
class PropertyArray;
class FunctionLiteral;
class FunctionTemplateInfo;
+class JSGeneratorObject;
+class JSAsyncGeneratorObject;
class JSGlobalObject;
+class JSGlobalProxy;
#ifdef V8_INTL_SUPPORT
+class JSCollator;
+class JSListFormat;
class JSLocale;
+class JSPluralRules;
class JSRelativeTimeFormat;
#endif // V8_INTL_SUPPORT
class JSPromise;
@@ -718,16 +727,12 @@ class FeedbackCell;
class FeedbackMetadata;
class FeedbackVector;
class UncompiledData;
-class WeakCell;
class TemplateInfo;
class TransitionArray;
class TemplateList;
template <typename T>
class ZoneForwardList;
-// A template-ized version of the IsXXX functions.
-template <class C> inline bool Is(Object* obj);
-
#ifdef OBJECT_PRINT
#define DECL_PRINTER(Name) void Name##Print(std::ostream& os); // NOLINT
#else
@@ -783,7 +788,6 @@ template <class C> inline bool Is(Object* obj);
V(FixedArray) \
V(FixedArrayBase) \
V(FixedArrayExact) \
- V(FixedArrayOfWeakCells) \
V(FixedBigInt64Array) \
V(FixedBigUint64Array) \
V(FixedDoubleArray) \
@@ -831,6 +835,7 @@ template <class C> inline bool Is(Object* obj);
V(JSProxy) \
V(JSReceiver) \
V(JSRegExp) \
+ V(JSRegExpResult) \
V(JSRegExpStringIterator) \
V(JSSet) \
V(JSSetIterator) \
@@ -900,14 +905,16 @@ template <class C> inline bool Is(Object* obj);
V(WasmMemoryObject) \
V(WasmModuleObject) \
V(WasmTableObject) \
- V(WeakCell) \
V(WeakFixedArray) \
V(WeakArrayList)
#ifdef V8_INTL_SUPPORT
#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V) \
+ V(JSCollator) \
+ V(JSListFormat) \
V(JSLocale) \
+ V(JSPluralRules) \
V(JSRelativeTimeFormat)
#else
#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V)
@@ -933,6 +940,152 @@ template <class C> inline bool Is(Object* obj);
V(OptimizedOut, optimized_out) \
V(StaleRegister, stale_register)
+// List of object types that have a single unique instance type.
+#define INSTANCE_TYPE_CHECKERS_SINGLE_BASE(V) \
+ V(AllocationSite, ALLOCATION_SITE_TYPE) \
+ V(BigInt, BIGINT_TYPE) \
+ V(ObjectBoilerplateDescription, OBJECT_BOILERPLATE_DESCRIPTION_TYPE) \
+ V(BreakPoint, TUPLE2_TYPE) \
+ V(BreakPointInfo, TUPLE2_TYPE) \
+ V(ByteArray, BYTE_ARRAY_TYPE) \
+ V(BytecodeArray, BYTECODE_ARRAY_TYPE) \
+ V(CallHandlerInfo, CALL_HANDLER_INFO_TYPE) \
+ V(Cell, CELL_TYPE) \
+ V(Code, CODE_TYPE) \
+ V(CodeDataContainer, CODE_DATA_CONTAINER_TYPE) \
+ V(CoverageInfo, FIXED_ARRAY_TYPE) \
+ V(DescriptorArray, DESCRIPTOR_ARRAY_TYPE) \
+ V(EphemeronHashTable, EPHEMERON_HASH_TABLE_TYPE) \
+ V(FeedbackCell, FEEDBACK_CELL_TYPE) \
+ V(FeedbackMetadata, FEEDBACK_METADATA_TYPE) \
+ V(FeedbackVector, FEEDBACK_VECTOR_TYPE) \
+ V(FixedArrayExact, FIXED_ARRAY_TYPE) \
+ V(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE) \
+ V(Foreign, FOREIGN_TYPE) \
+ V(FreeSpace, FREE_SPACE_TYPE) \
+ V(GlobalDictionary, GLOBAL_DICTIONARY_TYPE) \
+ V(HeapNumber, HEAP_NUMBER_TYPE) \
+ V(JSArgumentsObject, JS_ARGUMENTS_TYPE) \
+ V(JSArray, JS_ARRAY_TYPE) \
+ V(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE) \
+ V(JSArrayIterator, JS_ARRAY_ITERATOR_TYPE) \
+ V(JSAsyncFromSyncIterator, JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
+ V(JSAsyncGeneratorObject, JS_ASYNC_GENERATOR_OBJECT_TYPE) \
+ V(JSBoundFunction, JS_BOUND_FUNCTION_TYPE) \
+ V(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
+ V(JSDataView, JS_DATA_VIEW_TYPE) \
+ V(JSDate, JS_DATE_TYPE) \
+ V(JSError, JS_ERROR_TYPE) \
+ V(JSFunction, JS_FUNCTION_TYPE) \
+ V(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE) \
+ V(JSGlobalProxy, JS_GLOBAL_PROXY_TYPE) \
+ V(JSMap, JS_MAP_TYPE) \
+ V(JSMessageObject, JS_MESSAGE_OBJECT_TYPE) \
+ V(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE) \
+ V(JSPromise, JS_PROMISE_TYPE) \
+ V(JSRegExp, JS_REGEXP_TYPE) \
+ V(JSRegExpResult, JS_ARRAY_TYPE) \
+ V(JSRegExpStringIterator, JS_REGEXP_STRING_ITERATOR_TYPE) \
+ V(JSSet, JS_SET_TYPE) \
+ V(JSStringIterator, JS_STRING_ITERATOR_TYPE) \
+ V(JSTypedArray, JS_TYPED_ARRAY_TYPE) \
+ V(JSValue, JS_VALUE_TYPE) \
+ V(JSWeakMap, JS_WEAK_MAP_TYPE) \
+ V(JSWeakSet, JS_WEAK_SET_TYPE) \
+ V(LoadHandler, LOAD_HANDLER_TYPE) \
+ V(Map, MAP_TYPE) \
+ V(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE) \
+ V(NameDictionary, NAME_DICTIONARY_TYPE) \
+ V(NativeContext, NATIVE_CONTEXT_TYPE) \
+ V(NumberDictionary, NUMBER_DICTIONARY_TYPE) \
+ V(Oddball, ODDBALL_TYPE) \
+ V(OrderedHashMap, ORDERED_HASH_MAP_TYPE) \
+ V(OrderedHashSet, ORDERED_HASH_SET_TYPE) \
+ V(PreParsedScopeData, PRE_PARSED_SCOPE_DATA_TYPE) \
+ V(PropertyArray, PROPERTY_ARRAY_TYPE) \
+ V(PropertyCell, PROPERTY_CELL_TYPE) \
+ V(PropertyDescriptorObject, FIXED_ARRAY_TYPE) \
+ V(ScopeInfo, SCOPE_INFO_TYPE) \
+ V(ScriptContextTable, SCRIPT_CONTEXT_TABLE_TYPE) \
+ V(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE) \
+ V(SimpleNumberDictionary, SIMPLE_NUMBER_DICTIONARY_TYPE) \
+ V(SmallOrderedHashMap, SMALL_ORDERED_HASH_MAP_TYPE) \
+ V(SmallOrderedHashSet, SMALL_ORDERED_HASH_SET_TYPE) \
+ V(SourcePositionTableWithFrameCache, TUPLE2_TYPE) \
+ V(StoreHandler, STORE_HANDLER_TYPE) \
+ V(StringTable, STRING_TABLE_TYPE) \
+ V(Symbol, SYMBOL_TYPE) \
+ V(TemplateObjectDescription, TUPLE2_TYPE) \
+ V(TransitionArray, TRANSITION_ARRAY_TYPE) \
+ V(UncompiledDataWithoutPreParsedScope, \
+ UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE) \
+ V(UncompiledDataWithPreParsedScope, \
+ UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE) \
+ V(WasmGlobalObject, WASM_GLOBAL_TYPE) \
+ V(WasmInstanceObject, WASM_INSTANCE_TYPE) \
+ V(WasmMemoryObject, WASM_MEMORY_TYPE) \
+ V(WasmModuleObject, WASM_MODULE_TYPE) \
+ V(WasmTableObject, WASM_TABLE_TYPE) \
+ V(WeakArrayList, WEAK_ARRAY_LIST_TYPE)
+#ifdef V8_INTL_SUPPORT
+
+#define INSTANCE_TYPE_CHECKERS_SINGLE(V) \
+ INSTANCE_TYPE_CHECKERS_SINGLE_BASE(V) \
+ V(JSCollator, JS_INTL_COLLATOR_TYPE) \
+ V(JSListFormat, JS_INTL_LIST_FORMAT_TYPE) \
+ V(JSLocale, JS_INTL_LOCALE_TYPE) \
+ V(JSPluralRules, JS_INTL_PLURAL_RULES_TYPE) \
+ V(JSRelativeTimeFormat, JS_INTL_RELATIVE_TIME_FORMAT_TYPE)
+
+#else
+
+#define INSTANCE_TYPE_CHECKERS_SINGLE(V) INSTANCE_TYPE_CHECKERS_SINGLE_BASE(V)
+
+#endif // V8_INTL_SUPPORT
+
+#define INSTANCE_TYPE_CHECKERS_RANGE(V) \
+ V(Context, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE) \
+ V(Dictionary, FIRST_DICTIONARY_TYPE, LAST_DICTIONARY_TYPE) \
+ V(FixedArray, FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE) \
+ V(FixedTypedArrayBase, FIRST_FIXED_TYPED_ARRAY_TYPE, \
+ LAST_FIXED_TYPED_ARRAY_TYPE) \
+ V(HashTable, FIRST_HASH_TABLE_TYPE, LAST_HASH_TABLE_TYPE) \
+ V(JSMapIterator, FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE) \
+ V(JSSetIterator, FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE) \
+ V(Microtask, FIRST_MICROTASK_TYPE, LAST_MICROTASK_TYPE) \
+ V(Name, FIRST_TYPE, LAST_NAME_TYPE) \
+ V(String, FIRST_TYPE, FIRST_NONSTRING_TYPE - 1) \
+ V(WeakFixedArray, FIRST_WEAK_FIXED_ARRAY_TYPE, LAST_WEAK_FIXED_ARRAY_TYPE)
+
+#define INSTANCE_TYPE_CHECKERS_CUSTOM(V) \
+ V(FixedArrayBase) \
+ V(InternalizedString) \
+ V(JSObject)
+
+#define INSTANCE_TYPE_CHECKERS(V) \
+ INSTANCE_TYPE_CHECKERS_SINGLE(V) \
+ INSTANCE_TYPE_CHECKERS_RANGE(V) \
+ INSTANCE_TYPE_CHECKERS_CUSTOM(V)
+
+namespace InstanceTypeChecker {
+#define IS_TYPE_FUNCTION_DECL(Type, ...) \
+ V8_INLINE bool Is##Type(InstanceType instance_type);
+
+INSTANCE_TYPE_CHECKERS(IS_TYPE_FUNCTION_DECL)
+
+#define TYPED_ARRAY_IS_TYPE_FUNCTION_DECL(Type, ...) \
+ IS_TYPE_FUNCTION_DECL(Fixed##Type##Array)
+TYPED_ARRAYS(TYPED_ARRAY_IS_TYPE_FUNCTION_DECL)
+#undef TYPED_ARRAY_IS_TYPE_FUNCTION_DECL
+
+#define STRUCT_IS_TYPE_FUNCTION_DECL(NAME, Name, name) \
+ IS_TYPE_FUNCTION_DECL(Name)
+STRUCT_LIST(STRUCT_IS_TYPE_FUNCTION_DECL)
+#undef STRUCT_IS_TYPE_FUNCTION_DECL
+
+#undef IS_TYPE_FUNCTION_DECL
+} // namespace InstanceTypeChecker
+
// The element types selection for CreateListFromArrayLike.
enum class ElementTypes { kAll, kStringAndSymbol };
@@ -993,6 +1146,15 @@ class Object {
#define MAYBE_RETURN_NULL(call) MAYBE_RETURN(call, MaybeHandle<Object>())
+#define MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
+ do { \
+ Isolate* __isolate__ = (isolate); \
+ if (!(call).To(&dst)) { \
+ DCHECK(__isolate__->has_pending_exception()); \
+ return ReadOnlyRoots(__isolate__).exception(); \
+ } \
+ } while (false)
+
#define DECL_STRUCT_PREDICATE(NAME, Name, name) V8_INLINE bool Is##Name() const;
STRUCT_LIST(DECL_STRUCT_PREDICATE)
#undef DECL_STRUCT_PREDICATE
@@ -1128,7 +1290,7 @@ class Object {
// Get length property and apply ToLength.
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetLengthFromArrayLike(
- Isolate* isolate, Handle<Object> object);
+ Isolate* isolate, Handle<JSReceiver> object);
// ES6 section 12.5.6 The typeof Operator
static Handle<String> TypeOf(Isolate* isolate, Handle<Object> object);
@@ -1356,18 +1518,12 @@ bool Object::IsHeapObject() const {
}
struct Brief {
- explicit Brief(const Object* const v) : value(v) {}
- const Object* value;
-};
-
-struct MaybeObjectBrief {
- explicit MaybeObjectBrief(const MaybeObject* const v) : value(v) {}
+ V8_EXPORT_PRIVATE explicit Brief(const Object* v);
+ explicit Brief(const MaybeObject* v) : value(v) {}
const MaybeObject* value;
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const Brief& v);
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
- const MaybeObjectBrief& v);
// Smi represents integer Numbers that can be stored in 31 bits.
// Smis are immediate which means they are NOT allocated in the heap.
@@ -1515,20 +1671,6 @@ class HeapObject: public Object {
// places where it might not be safe to access it.
inline ReadOnlyRoots GetReadOnlyRoots() const;
- // The Heap the object was allocated in. Used also to access Isolate.
-#ifdef DEPRECATE_GET_ISOLATE
- [[deprecated("Pass Heap explicitly or use a NeverReadOnlySpaceObject")]]
-#endif
- inline Heap*
- GetHeap() const;
-
-// Convenience method to get current isolate.
-#ifdef DEPRECATE_GET_ISOLATE
- [[deprecated("Pass Isolate explicitly or use a NeverReadOnlySpaceObject")]]
-#endif
- inline Isolate*
- GetIsolate() const;
-
#define IS_TYPE_FUNCTION_DECL(Type) V8_INLINE bool Is##Type() const;
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
#undef IS_TYPE_FUNCTION_DECL
@@ -1676,10 +1818,12 @@ class NeverReadOnlySpaceObject {
template <int start_offset, int end_offset, int size>
class FixedBodyDescriptor;
-
template <int start_offset>
class FlexibleBodyDescriptor;
+template <class ParentBodyDescriptor, class ChildBodyDescriptor>
+class SubclassBodyDescriptor;
+
// The HeapNumber class describes heap allocated numbers that cannot be
// represented in a Smi (small integer). MutableHeapNumber is the same, but its
// number value can change over time (it is used only as property storage).
@@ -1951,6 +2095,9 @@ class JSReceiver : public HeapObject, public NeverReadOnlySpaceObject {
// ES6 7.3.4 (when passed kDontThrow)
V8_WARN_UNUSED_RESULT static Maybe<bool> CreateDataProperty(
+ Isolate* isolate, Handle<JSReceiver> object, Handle<Name> key,
+ Handle<Object> value, ShouldThrow should_throw);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> CreateDataProperty(
LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
// ES6 9.1.6.1
@@ -2000,6 +2147,10 @@ class JSReceiver : public HeapObject, public NeverReadOnlySpaceObject {
// Returns the class name ([[Class]] property in the specification).
V8_EXPORT_PRIVATE String* class_name();
+ // Returns the constructor (the function that was used to instantiate the
+ // object).
+ static MaybeHandle<JSFunction> GetConstructor(Handle<JSReceiver> receiver);
+
// Returns the constructor name (the name (possibly, inferred name) of the
// function that was used to instantiate the object).
static Handle<String> GetConstructorName(Handle<JSReceiver> receiver);
@@ -2088,6 +2239,11 @@ class JSObject: public JSReceiver {
static MaybeHandle<Context> GetFunctionRealm(Handle<JSObject> object);
+ // 9.1.12 ObjectCreate ( proto [ , internalSlotsList ] )
+ // Notice: This is NOT 19.1.2.2 Object.create ( O, Properties )
+ static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> ObjectCreate(
+ Isolate* isolate, Handle<Object> prototype);
+
// [elements]: The elements (properties with names that are integers).
//
// Elements can be in two general modes: fast and slow. Each mode
@@ -2110,7 +2266,7 @@ class JSObject: public JSReceiver {
static inline void SetMapAndElements(Handle<JSObject> object,
Handle<Map> map,
Handle<FixedArrayBase> elements);
- inline ElementsKind GetElementsKind();
+ inline ElementsKind GetElementsKind() const;
ElementsAccessor* GetElementsAccessor();
// Returns true if an object has elements of PACKED_SMI_ELEMENTS or
// HOLEY_SMI_ELEMENTS ElementsKind.
@@ -2363,7 +2519,12 @@ class JSObject: public JSReceiver {
inline Object* GetEmbedderField(int index);
inline void SetEmbedderField(int index, Object* value);
inline void SetEmbedderField(int index, Smi* value);
- bool WasConstructedFromApiFunction();
+
+ // Returns true when the object is potentially a wrapper that gets special
+ // garbage collection treatment.
+ // TODO(mlippautz): Make check exact and replace the pattern match in
+ // Heap::TracePossibleWrapper.
+ bool IsApiWrapper();
// Returns a new map with all transitions dropped from the object's current
// map and the ElementsKind set.
@@ -2500,7 +2661,7 @@ class JSObject: public JSReceiver {
// If a GC was caused while constructing this object, the elements pointer
// may point to a one pointer filler map. The object won't be rooted, but
// our heap verification code could stumble across it.
- bool ElementsAreSafeToExamine();
+ bool ElementsAreSafeToExamine() const;
#endif
Object* SlowReverseLookup(Object* value);
@@ -2919,8 +3080,7 @@ class AsyncGeneratorRequest : public Struct {
V(Atomics, or, AtomicsOr) \
V(Atomics, xor, AtomicsXor)
-enum BuiltinFunctionId {
- kInvalidBuiltinFunctionId = -1,
+enum class BuiltinFunctionId : uint8_t {
kArrayConstructor,
#define DECL_FUNCTION_ID(ignored1, ignore2, name) k##name,
FUNCTIONS_WITH_ID_LIST(DECL_FUNCTION_ID)
@@ -2964,97 +3124,8 @@ enum BuiltinFunctionId {
kStringIterator,
kStringIteratorNext,
kStringToLowerCaseIntl,
- kStringToUpperCaseIntl
-};
-
-class JSGeneratorObject: public JSObject {
- public:
- // [function]: The function corresponding to this generator object.
- DECL_ACCESSORS(function, JSFunction)
-
- // [context]: The context of the suspended computation.
- DECL_ACCESSORS(context, Context)
-
- // [receiver]: The receiver of the suspended computation.
- DECL_ACCESSORS(receiver, Object)
-
- // [input_or_debug_pos]
- // For executing generators: the most recent input value.
- // For suspended generators: debug information (bytecode offset).
- // There is currently no need to remember the most recent input value for a
- // suspended generator.
- DECL_ACCESSORS(input_or_debug_pos, Object)
-
- // [resume_mode]: The most recent resume mode.
- enum ResumeMode { kNext, kReturn, kThrow };
- DECL_INT_ACCESSORS(resume_mode)
-
- // [continuation]
- //
- // A positive value indicates a suspended generator. The special
- // kGeneratorExecuting and kGeneratorClosed values indicate that a generator
- // cannot be resumed.
- inline int continuation() const;
- inline void set_continuation(int continuation);
- inline bool is_closed() const;
- inline bool is_executing() const;
- inline bool is_suspended() const;
-
- // For suspended generators: the source position at which the generator
- // is suspended.
- int source_position() const;
-
- // [parameters_and_registers]: Saved interpreter register file.
- DECL_ACCESSORS(parameters_and_registers, FixedArray)
-
- DECL_CAST(JSGeneratorObject)
-
- // Dispatched behavior.
- DECL_PRINTER(JSGeneratorObject)
- DECL_VERIFIER(JSGeneratorObject)
-
- // Magic sentinel values for the continuation.
- static const int kGeneratorExecuting = -2;
- static const int kGeneratorClosed = -1;
-
- // Layout description.
- static const int kFunctionOffset = JSObject::kHeaderSize;
- static const int kContextOffset = kFunctionOffset + kPointerSize;
- static const int kReceiverOffset = kContextOffset + kPointerSize;
- static const int kInputOrDebugPosOffset = kReceiverOffset + kPointerSize;
- static const int kResumeModeOffset = kInputOrDebugPosOffset + kPointerSize;
- static const int kContinuationOffset = kResumeModeOffset + kPointerSize;
- static const int kParametersAndRegistersOffset =
- kContinuationOffset + kPointerSize;
- static const int kSize = kParametersAndRegistersOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
-};
-
-class JSAsyncGeneratorObject : public JSGeneratorObject {
- public:
- DECL_CAST(JSAsyncGeneratorObject)
-
- // Dispatched behavior.
- DECL_VERIFIER(JSAsyncGeneratorObject)
-
- // [queue]
- // Pointer to the head of a singly linked list of AsyncGeneratorRequest, or
- // undefined.
- DECL_ACCESSORS(queue, HeapObject)
-
- // [is_awaiting]
- // Whether or not the generator is currently awaiting.
- DECL_INT_ACCESSORS(is_awaiting)
-
- // Layout description.
- static const int kQueueOffset = JSGeneratorObject::kSize;
- static const int kIsAwaitingOffset = kQueueOffset + kPointerSize;
- static const int kSize = kIsAwaitingOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSAsyncGeneratorObject);
+ kStringToUpperCaseIntl,
+ kInvalidBuiltinFunctionId = static_cast<uint8_t>(-1),
};
// JSBoundFunction describes a bound function exotic object.
@@ -3119,7 +3190,7 @@ class JSFunction: public JSObject {
inline Context* context();
inline bool has_context() const;
inline void set_context(Object* context);
- inline JSObject* global_proxy();
+ inline JSGlobalProxy* global_proxy();
inline Context* native_context();
static Handle<Object> GetName(Isolate* isolate, Handle<JSFunction> function);
@@ -3183,6 +3254,11 @@ class JSFunction: public JSObject {
// Clears the optimization marker in the function's feedback vector.
inline void ClearOptimizationMarker();
+ // If slack tracking is active, it computes instance size of the initial map
+ // with minimum permissible object slack. If it is not active, it simply
+ // returns the initial map's instance size.
+ int ComputeInstanceSizeWithMinSlack(Isolate* isolate);
+
// Completes inobject slack tracking on initial map if it is active.
inline void CompleteInobjectSlackTrackingIfActive();
@@ -3222,6 +3298,8 @@ class JSFunction: public JSObject {
inline bool has_instance_prototype();
inline Object* prototype();
inline Object* instance_prototype();
+ inline bool has_prototype_property();
+ inline bool PrototypeRequiresRuntimeLookup();
static void SetPrototype(Handle<JSFunction> function,
Handle<Object> value);
@@ -3955,169 +4033,6 @@ class PropertyCell : public HeapObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(PropertyCell);
};
-
-class WeakCell : public HeapObject {
- public:
- inline Object* value() const;
-
- // This should not be called by anyone except GC.
- inline void clear();
-
- // This should not be called by anyone except allocator.
- inline void initialize(HeapObject* value);
-
- inline bool cleared() const;
-
- DECL_CAST(WeakCell)
-
- DECL_PRINTER(WeakCell)
- DECL_VERIFIER(WeakCell)
-
- // Layout description.
- static const int kValueOffset = HeapObject::kHeaderSize;
- static const int kSize = kValueOffset + kPointerSize;
-
- typedef FixedBodyDescriptor<kValueOffset, kSize, kSize> BodyDescriptor;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(WeakCell);
-};
-
-
-// The JSProxy describes EcmaScript Harmony proxies
-class JSProxy: public JSReceiver {
- public:
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSProxy> New(Isolate* isolate,
- Handle<Object>,
- Handle<Object>);
-
- // [handler]: The handler property.
- DECL_ACCESSORS(handler, Object)
- // [target]: The target property.
- DECL_ACCESSORS(target, Object)
-
- static MaybeHandle<Context> GetFunctionRealm(Handle<JSProxy> proxy);
-
- DECL_CAST(JSProxy)
-
- V8_INLINE bool IsRevoked() const;
- static void Revoke(Handle<JSProxy> proxy);
-
- // ES6 9.5.1
- static MaybeHandle<Object> GetPrototype(Handle<JSProxy> receiver);
-
- // ES6 9.5.2
- V8_WARN_UNUSED_RESULT static Maybe<bool> SetPrototype(
- Handle<JSProxy> proxy, Handle<Object> value, bool from_javascript,
- ShouldThrow should_throw);
- // ES6 9.5.3
- V8_WARN_UNUSED_RESULT static Maybe<bool> IsExtensible(Handle<JSProxy> proxy);
-
- // ES6, #sec-isarray. NOT to be confused with %_IsArray.
- V8_WARN_UNUSED_RESULT static Maybe<bool> IsArray(Handle<JSProxy> proxy);
-
- // ES6 9.5.4 (when passed kDontThrow)
- V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensions(
- Handle<JSProxy> proxy, ShouldThrow should_throw);
-
- // ES6 9.5.5
- V8_WARN_UNUSED_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
- Isolate* isolate, Handle<JSProxy> proxy, Handle<Name> name,
- PropertyDescriptor* desc);
-
- // ES6 9.5.6
- V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
- Isolate* isolate, Handle<JSProxy> object, Handle<Object> key,
- PropertyDescriptor* desc, ShouldThrow should_throw);
-
- // ES6 9.5.7
- V8_WARN_UNUSED_RESULT static Maybe<bool> HasProperty(Isolate* isolate,
- Handle<JSProxy> proxy,
- Handle<Name> name);
-
- // This function never returns false.
- // It returns either true or throws.
- V8_WARN_UNUSED_RESULT static Maybe<bool> CheckHasTrap(
- Isolate* isolate, Handle<Name> name, Handle<JSReceiver> target);
-
- // ES6 9.5.8
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetProperty(
- Isolate* isolate, Handle<JSProxy> proxy, Handle<Name> name,
- Handle<Object> receiver, bool* was_found);
-
- enum AccessKind { kGet, kSet };
-
- static MaybeHandle<Object> CheckGetSetTrapResult(Isolate* isolate,
- Handle<Name> name,
- Handle<JSReceiver> target,
- Handle<Object> trap_result,
- AccessKind access_kind);
-
- // ES6 9.5.9
- V8_WARN_UNUSED_RESULT static Maybe<bool> SetProperty(
- Handle<JSProxy> proxy, Handle<Name> name, Handle<Object> value,
- Handle<Object> receiver, LanguageMode language_mode);
-
- // ES6 9.5.10 (when passed LanguageMode::kSloppy)
- V8_WARN_UNUSED_RESULT static Maybe<bool> DeletePropertyOrElement(
- Handle<JSProxy> proxy, Handle<Name> name, LanguageMode language_mode);
-
- // ES6 9.5.12
- V8_WARN_UNUSED_RESULT static Maybe<bool> OwnPropertyKeys(
- Isolate* isolate, Handle<JSReceiver> receiver, Handle<JSProxy> proxy,
- PropertyFilter filter, KeyAccumulator* accumulator);
-
- V8_WARN_UNUSED_RESULT static Maybe<PropertyAttributes> GetPropertyAttributes(
- LookupIterator* it);
-
- // Dispatched behavior.
- DECL_PRINTER(JSProxy)
- DECL_VERIFIER(JSProxy)
-
- static const int kMaxIterationLimit = 100 * 1024;
-
- // Layout description.
- static const int kTargetOffset = JSReceiver::kHeaderSize;
- static const int kHandlerOffset = kTargetOffset + kPointerSize;
- static const int kSize = kHandlerOffset + kPointerSize;
-
- // kTargetOffset aliases with the elements of JSObject. The fact that
- // JSProxy::target is a Javascript value which cannot be confused with an
- // elements backing store is exploited by loading from this offset from an
- // unknown JSReceiver.
- STATIC_ASSERT(JSObject::kElementsOffset == JSProxy::kTargetOffset);
-
- typedef FixedBodyDescriptor<JSReceiver::kPropertiesOrHashOffset, kSize, kSize>
- BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-
- static Maybe<bool> SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
- Handle<Symbol> private_name,
- PropertyDescriptor* desc,
- ShouldThrow should_throw);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
-};
-
-// JSProxyRevocableResult is just a JSObject with a specific initial map.
-// This initial map adds in-object properties for "proxy" and "revoke".
-// See https://tc39.github.io/ecma262/#sec-proxy.revocable
-class JSProxyRevocableResult : public JSObject {
- public:
- // Offsets of object fields.
- static const int kProxyOffset = JSObject::kHeaderSize;
- static const int kRevokeOffset = kProxyOffset + kPointerSize;
- static const int kSize = kRevokeOffset + kPointerSize;
- // Indices of in-object properties.
- static const int kProxyIndex = 0;
- static const int kRevokeIndex = 1;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxyRevocableResult);
-};
-
// The [Async-from-Sync Iterator] object
// (proposal-async-iteration/#sec-async-from-sync-iterator-objects)
// An object which wraps an ordinary Iterator and converts it to behave
diff --git a/deps/v8/src/objects/api-callbacks.h b/deps/v8/src/objects/api-callbacks.h
index d4f42bee11..6f3629a2c3 100644
--- a/deps/v8/src/objects/api-callbacks.h
+++ b/deps/v8/src/objects/api-callbacks.h
@@ -57,8 +57,7 @@ class AccessorInfo : public Struct {
inline void set_initial_property_attributes(PropertyAttributes attributes);
// Checks whether the given receiver is compatible with this accessor.
- static bool IsCompatibleReceiverMap(Isolate* isolate,
- Handle<AccessorInfo> info,
+ static bool IsCompatibleReceiverMap(Handle<AccessorInfo> info,
Handle<Map> map);
inline bool IsCompatibleReceiver(Object* receiver);
diff --git a/deps/v8/src/objects/arguments-inl.h b/deps/v8/src/objects/arguments-inl.h
index 22f9837478..7d92ce0496 100644
--- a/deps/v8/src/objects/arguments-inl.h
+++ b/deps/v8/src/objects/arguments-inl.h
@@ -7,6 +7,11 @@
#include "src/objects/arguments.h"
+#include "src/contexts-inl.h"
+#include "src/isolate-inl.h"
+#include "src/objects-inl.h"
+#include "src/objects/fixed-array-inl.h"
+
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index 9e23fa8b61..36c6204d1a 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_ARGUMENTS_H_
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index 4bb83a93b6..458aa7c1eb 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -33,8 +33,12 @@ namespace internal {
// Many of the functions in this class use arguments of type {BigIntBase},
// indicating that they will be used in a read-only capacity, and both
// {BigInt} and {MutableBigInt} objects can be passed in.
-class MutableBigInt : public FreshlyAllocatedBigInt {
+class MutableBigInt : public FreshlyAllocatedBigInt,
+ public NeverReadOnlySpaceObject {
public:
+ using NeverReadOnlySpaceObject::GetHeap;
+ using NeverReadOnlySpaceObject::GetIsolate;
+
// Bottleneck for converting MutableBigInts to BigInts.
static MaybeHandle<BigInt> MakeImmutable(MaybeHandle<MutableBigInt> maybe);
static Handle<BigInt> MakeImmutable(Handle<MutableBigInt> result);
@@ -152,7 +156,8 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
static MaybeHandle<String> ToStringBasePowerOfTwo(Isolate* isolate,
Handle<BigIntBase> x,
int radix);
- static MaybeHandle<String> ToStringGeneric(Handle<BigIntBase> x, int radix);
+ static MaybeHandle<String> ToStringGeneric(Isolate* isolate,
+ Handle<BigIntBase> x, int radix);
static double ToDouble(Handle<BigIntBase> x);
enum Rounding { kRoundDown, kTie, kRoundUp };
@@ -926,7 +931,7 @@ MaybeHandle<String> BigInt::ToString(Isolate* isolate, Handle<BigInt> bigint,
if (base::bits::IsPowerOfTwo(radix)) {
return MutableBigInt::ToStringBasePowerOfTwo(isolate, bigint, radix);
}
- return MutableBigInt::ToStringGeneric(bigint, radix);
+ return MutableBigInt::ToStringGeneric(isolate, bigint, radix);
}
MaybeHandle<BigInt> BigInt::FromNumber(Isolate* isolate,
@@ -1981,12 +1986,12 @@ MaybeHandle<String> MutableBigInt::ToStringBasePowerOfTwo(Isolate* isolate,
return result;
}
-MaybeHandle<String> MutableBigInt::ToStringGeneric(Handle<BigIntBase> x,
+MaybeHandle<String> MutableBigInt::ToStringGeneric(Isolate* isolate,
+ Handle<BigIntBase> x,
int radix) {
DCHECK(radix >= 2 && radix <= 36);
DCHECK(!x->is_zero());
- Heap* heap = x->GetHeap();
- Isolate* isolate = heap->isolate();
+ Heap* heap = isolate->heap();
const int length = x->length();
const bool sign = x->sign();
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 57e5f2a565..308e645c84 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -11,6 +11,7 @@
#include "src/isolate.h"
#include "src/objects/dictionary.h"
#include "src/objects/map-inl.h"
+#include "src/objects/maybe-object-inl.h"
#include "src/v8memory.h"
// Has to be the last include (doesn't have include guards):
@@ -132,17 +133,17 @@ BytecodeArray* AbstractCode::GetBytecodeArray() {
}
DependentCode* DependentCode::next_link() {
- return DependentCode::cast(get(kNextLinkIndex));
+ return DependentCode::cast(Get(kNextLinkIndex)->ToStrongHeapObject());
}
void DependentCode::set_next_link(DependentCode* next) {
- set(kNextLinkIndex, next);
+ Set(kNextLinkIndex, HeapObjectReference::Strong(next));
}
-int DependentCode::flags() { return Smi::ToInt(get(kFlagsIndex)); }
+int DependentCode::flags() { return Smi::ToInt(Get(kFlagsIndex)->ToSmi()); }
void DependentCode::set_flags(int flags) {
- set(kFlagsIndex, Smi::FromInt(flags));
+ Set(kFlagsIndex, MaybeObject::FromObject(Smi::FromInt(flags)));
}
int DependentCode::count() { return CountField::decode(flags()); }
@@ -155,16 +156,21 @@ DependentCode::DependencyGroup DependentCode::group() {
return static_cast<DependencyGroup>(GroupField::decode(flags()));
}
-void DependentCode::set_object_at(int i, Object* object) {
- set(kCodesStartIndex + i, object);
+void DependentCode::set_object_at(int i, MaybeObject* object) {
+ Set(kCodesStartIndex + i, object);
}
-Object* DependentCode::object_at(int i) { return get(kCodesStartIndex + i); }
+MaybeObject* DependentCode::object_at(int i) {
+ return Get(kCodesStartIndex + i);
+}
-void DependentCode::clear_at(int i) { set_undefined(kCodesStartIndex + i); }
+void DependentCode::clear_at(int i) {
+ Set(kCodesStartIndex + i,
+ HeapObjectReference::Strong(GetReadOnlyRoots().undefined_value()));
+}
void DependentCode::copy(int from, int to) {
- set(kCodesStartIndex + to, get(kCodesStartIndex + from));
+ Set(kCodesStartIndex + to, Get(kCodesStartIndex + from));
}
INT_ACCESSORS(Code, raw_instruction_size, kInstructionSizeOffset)
@@ -314,8 +320,10 @@ Address Code::entry() const { return raw_instruction_start(); }
bool Code::contains(Address inner_pointer) {
if (is_off_heap_trampoline()) {
DCHECK(FLAG_embedded_builtins);
- return (OffHeapInstructionStart() <= inner_pointer) &&
- (inner_pointer < OffHeapInstructionEnd());
+ if (OffHeapInstructionStart() <= inner_pointer &&
+ inner_pointer < OffHeapInstructionEnd()) {
+ return true;
+ }
}
return (address() <= inner_pointer) && (inner_pointer < address() + Size());
}
@@ -555,7 +563,7 @@ Object* Code::GetObjectFromCodeEntry(Address code_entry) {
}
Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
- return GetObjectFromCodeEntry(Memory::Address_at(location_of_address));
+ return GetObjectFromCodeEntry(Memory<Address>(location_of_address));
}
bool Code::CanContainWeakObjects() {
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 99a159a977..f3c3c0b5b3 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -18,6 +18,7 @@ namespace internal {
class ByteArray;
class BytecodeArray;
class CodeDataContainer;
+class MaybeObject;
namespace interpreter {
class Register;
@@ -355,9 +356,6 @@ class Code : public HeapObject, public NeverReadOnlySpaceObject {
static inline bool IsWeakObjectInOptimizedCode(Object* object);
- static Handle<WeakCell> WeakCellFor(Handle<Code> code);
- WeakCell* CachedWeakCell();
-
// Return true if the function is inlined in the code.
bool Inlines(SharedFunctionInfo* sfi);
@@ -576,10 +574,10 @@ class AbstractCode : public HeapObject, public NeverReadOnlySpaceObject {
static const int kMaxLoopNestingMarker = 6;
};
-// Dependent code is a singly linked list of fixed arrays. Each array contains
-// code objects in weak cells for one dependent group. The suffix of the array
-// can be filled with the undefined value if the number of codes is less than
-// the length of the array.
+// Dependent code is a singly linked list of weak fixed arrays. Each array
+// contains weak pointers to code objects for one dependent group. The suffix of
+// the array can be filled with the undefined value if the number of codes is
+// less than the length of the array.
//
// +------+-----------------+--------+--------+-----+--------+-----------+-----+
// | next | count & group 1 | code 1 | code 2 | ... | code n | undefined | ... |
@@ -591,11 +589,11 @@ class AbstractCode : public HeapObject, public NeverReadOnlySpaceObject {
// +------+-----------------+--------+--------+-----+--------+-----------+-----+
// |
// V
-// empty_fixed_array()
+// empty_weak_fixed_array()
//
-// The list of fixed arrays is ordered by dependency groups.
+// The list of weak fixed arrays is ordered by dependency groups.
-class DependentCode : public FixedArray {
+class DependentCode : public WeakFixedArray {
public:
DECL_CAST(DependentCode)
@@ -626,22 +624,20 @@ class DependentCode : public FixedArray {
};
// Register a code dependency of {cell} on {object}.
- static void InstallDependency(Isolate* isolate, Handle<WeakCell> cell,
+ static void InstallDependency(Isolate* isolate, MaybeObjectHandle code,
Handle<HeapObject> object,
DependencyGroup group);
- bool Contains(DependencyGroup group, WeakCell* code_cell);
+ bool Contains(DependencyGroup group, MaybeObject* code);
bool IsEmpty(DependencyGroup group);
- void RemoveCompilationDependencies(DependencyGroup group, Foreign* info);
-
void DeoptimizeDependentCodeGroup(Isolate* isolate, DependencyGroup group);
bool MarkCodeForDeoptimization(Isolate* isolate, DependencyGroup group);
// The following low-level accessors are exposed only for tests.
inline DependencyGroup group();
- inline Object* object_at(int i);
+ inline MaybeObject* object_at(int i);
inline int count();
inline DependentCode* next_link();
@@ -649,18 +645,19 @@ class DependentCode : public FixedArray {
static const char* DependencyGroupName(DependencyGroup group);
// Get/Set {object}'s {DependentCode}.
- static DependentCode* Get(Handle<HeapObject> object);
- static void Set(Handle<HeapObject> object, Handle<DependentCode> dep);
+ static DependentCode* GetDependentCode(Handle<HeapObject> object);
+ static void SetDependentCode(Handle<HeapObject> object,
+ Handle<DependentCode> dep);
static Handle<DependentCode> New(Isolate* isolate, DependencyGroup group,
- Handle<Object> object,
+ MaybeObjectHandle object,
Handle<DependentCode> next);
static Handle<DependentCode> EnsureSpace(Isolate* isolate,
Handle<DependentCode> entries);
static Handle<DependentCode> InsertWeakCode(Isolate* isolate,
Handle<DependentCode> entries,
DependencyGroup group,
- Handle<WeakCell> code_cell);
+ MaybeObjectHandle code);
// Compact by removing cleared weak cells and return true if there was
// any cleared weak cell.
@@ -678,7 +675,7 @@ class DependentCode : public FixedArray {
inline void set_next_link(DependentCode* next);
inline void set_count(int value);
- inline void set_object_at(int i, Object* object);
+ inline void set_object_at(int i, MaybeObject* object);
inline void clear_at(int i);
inline void copy(int from, int to);
@@ -837,9 +834,8 @@ class DeoptimizationData : public FixedArray {
static const int kOsrPcOffsetIndex = 4;
static const int kOptimizationIdIndex = 5;
static const int kSharedFunctionInfoIndex = 6;
- static const int kWeakCellCacheIndex = 7;
- static const int kInliningPositionsIndex = 8;
- static const int kFirstDeoptEntryIndex = 9;
+ static const int kInliningPositionsIndex = 7;
+ static const int kFirstDeoptEntryIndex = 8;
// Offsets of deopt entry elements relative to the start of the entry.
static const int kBytecodeOffsetRawOffset = 0;
@@ -859,7 +855,6 @@ class DeoptimizationData : public FixedArray {
DECL_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
DECL_ELEMENT_ACCESSORS(OptimizationId, Smi)
DECL_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
- DECL_ELEMENT_ACCESSORS(WeakCellCache, Object)
DECL_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
#undef DECL_ELEMENT_ACCESSORS
diff --git a/deps/v8/src/objects/compilation-cache-inl.h b/deps/v8/src/objects/compilation-cache-inl.h
index cf80ec7076..1637e64d6d 100644
--- a/deps/v8/src/objects/compilation-cache-inl.h
+++ b/deps/v8/src/objects/compilation-cache-inl.h
@@ -7,6 +7,11 @@
#include "src/objects/compilation-cache.h"
+#include "src/objects/name-inl.h"
+#include "src/objects/script-inl.h"
+#include "src/objects/shared-function-info.h"
+#include "src/objects/string.h"
+
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/data-handler-inl.h b/deps/v8/src/objects/data-handler-inl.h
index ce9b5682c7..e754e6e68c 100644
--- a/deps/v8/src/objects/data-handler-inl.h
+++ b/deps/v8/src/objects/data-handler-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_DATA_HANDLER_INL_H_
#define V8_OBJECTS_DATA_HANDLER_INL_H_
+#include "src/objects-inl.h" // Needed for write barriers
#include "src/objects/data-handler.h"
// Has to be the last include (doesn't have include guards):
@@ -13,12 +14,6 @@
namespace v8 {
namespace internal {
-bool HeapObject::IsDataHandler() const {
- return IsLoadHandler() || IsStoreHandler();
-}
-
-CAST_ACCESSOR(DataHandler)
-
ACCESSORS(DataHandler, smi_handler, Object, kSmiHandlerOffset)
ACCESSORS(DataHandler, validity_cell, Object, kValidityCellOffset)
@@ -28,10 +23,10 @@ int DataHandler::data_field_count() const {
WEAK_ACCESSORS_CHECKED(DataHandler, data1, kData1Offset,
map()->instance_size() >= kSizeWithData1)
-ACCESSORS_CHECKED(DataHandler, data2, Object, kData2Offset,
- map()->instance_size() >= kSizeWithData2)
-ACCESSORS_CHECKED(DataHandler, data3, Object, kData3Offset,
- map()->instance_size() >= kSizeWithData3)
+WEAK_ACCESSORS_CHECKED(DataHandler, data2, kData2Offset,
+ map()->instance_size() >= kSizeWithData2)
+WEAK_ACCESSORS_CHECKED(DataHandler, data3, kData3Offset,
+ map()->instance_size() >= kSizeWithData3)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/data-handler.h b/deps/v8/src/objects/data-handler.h
index 96fab2e6bc..0d5065b25c 100644
--- a/deps/v8/src/objects/data-handler.h
+++ b/deps/v8/src/objects/data-handler.h
@@ -32,8 +32,8 @@ class DataHandler : public Struct {
// [data1-3]: These are optional general-purpose fields whose content and
// presence depends on the handler kind.
DECL_ACCESSORS(data1, MaybeObject)
- DECL_ACCESSORS(data2, Object)
- DECL_ACCESSORS(data3, Object)
+ DECL_ACCESSORS(data2, MaybeObject)
+ DECL_ACCESSORS(data3, MaybeObject)
// Layout description.
#define DATA_HANDLER_FIELDS(V) \
diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h
index 548fb15705..ff3c58a82f 100644
--- a/deps/v8/src/objects/debug-objects-inl.h
+++ b/deps/v8/src/objects/debug-objects-inl.h
@@ -24,7 +24,7 @@ CAST_ACCESSOR(BreakPoint)
SMI_ACCESSORS(DebugInfo, flags, kFlagsOffset)
ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
SMI_ACCESSORS(DebugInfo, debugger_hints, kDebuggerHintsOffset)
-ACCESSORS(DebugInfo, function_identifier, Object, kFunctionIdentifierOffset)
+ACCESSORS(DebugInfo, script, Object, kScriptOffset)
ACCESSORS(DebugInfo, original_bytecode_array, Object,
kOriginalBytecodeArrayOffset)
ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateOffset)
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index 749489a1c1..3b94a4e46e 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -45,8 +45,8 @@ class DebugInfo : public Struct, public NeverReadOnlySpaceObject {
// Bit field containing various information collected for debugging.
DECL_INT_ACCESSORS(debugger_hints)
- // Function identifier field from shared function info.
- DECL_ACCESSORS(function_identifier, Object)
+ // Script field from shared function info.
+ DECL_ACCESSORS(script, Object)
// DebugInfo can be detached from the SharedFunctionInfo iff it is empty.
bool IsEmpty() const;
@@ -168,10 +168,8 @@ class DebugInfo : public Struct, public NeverReadOnlySpaceObject {
static const int kSharedFunctionInfoOffset = Struct::kHeaderSize;
static const int kDebuggerHintsOffset =
kSharedFunctionInfoOffset + kPointerSize;
- static const int kFunctionIdentifierOffset =
- kDebuggerHintsOffset + kPointerSize;
- static const int kOriginalBytecodeArrayOffset =
- kFunctionIdentifierOffset + kPointerSize;
+ static const int kScriptOffset = kDebuggerHintsOffset + kPointerSize;
+ static const int kOriginalBytecodeArrayOffset = kScriptOffset + kPointerSize;
static const int kBreakPointsStateOffset =
kOriginalBytecodeArrayOffset + kPointerSize;
static const int kFlagsOffset = kBreakPointsStateOffset + kPointerSize;
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index c77e1000b7..c24deb68ad 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -107,6 +107,10 @@ class DescriptorArray : public WeakFixedArray {
Isolate* isolate, Handle<DescriptorArray> desc, int enumeration_index,
PropertyAttributes attributes, int slack = 0);
+ static Handle<DescriptorArray> CopyForFastObjectClone(
+ Isolate* isolate, Handle<DescriptorArray> desc, int enumeration_index,
+ int slack = 0);
+
// Sort the instance descriptors by the hash codes of their keys.
void Sort();
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index 45107ce7fe..29c4593cfd 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -7,7 +7,9 @@
#include "src/objects/fixed-array.h"
+#include "src/objects-inl.h" // Needed for write barriers
#include "src/objects/bigint.h"
+#include "src/objects/maybe-object-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -22,7 +24,6 @@ CAST_ACCESSOR(FixedArrayBase)
CAST_ACCESSOR(FixedDoubleArray)
CAST_ACCESSOR(FixedTypedArrayBase)
CAST_ACCESSOR(TemplateList)
-CAST_ACCESSOR(FixedArrayOfWeakCells)
CAST_ACCESSOR(WeakFixedArray)
CAST_ACCESSOR(WeakArrayList)
@@ -97,7 +98,7 @@ void FixedArray::set(int index, Object* value) {
DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
RELAXED_WRITE_FIELD(this, offset, value);
- WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset, value);
+ WRITE_BARRIER(this, offset, value);
}
void FixedArray::set(int index, Object* value, WriteBarrierMode mode) {
@@ -106,8 +107,7 @@ void FixedArray::set(int index, Object* value, WriteBarrierMode mode) {
DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset,
- value, mode);
+ CONDITIONAL_WRITE_BARRIER(this, offset, value, mode);
}
void FixedArray::NoWriteBarrierSet(FixedArray* array, int index,
@@ -243,7 +243,7 @@ void WeakFixedArray::Set(int index, MaybeObject* value) {
DCHECK_LT(index, length());
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(this, offset, value);
- WEAK_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset, value);
+ WEAK_WRITE_BARRIER(this, offset, value);
}
void WeakFixedArray::Set(int index, MaybeObject* value, WriteBarrierMode mode) {
@@ -251,8 +251,7 @@ void WeakFixedArray::Set(int index, MaybeObject* value, WriteBarrierMode mode) {
DCHECK_LT(index, length());
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WEAK_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this,
- offset, value, mode);
+ CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode);
}
MaybeObject** WeakFixedArray::data_start() {
@@ -278,52 +277,21 @@ void WeakArrayList::Set(int index, MaybeObject* value, WriteBarrierMode mode) {
DCHECK_LT(index, this->capacity());
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WEAK_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this,
- offset, value, mode);
+ CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode);
}
MaybeObject** WeakArrayList::data_start() {
return HeapObject::RawMaybeWeakField(this, kHeaderSize);
}
-Object* FixedArrayOfWeakCells::Get(int index) const {
- Object* raw = FixedArray::cast(this)->get(index + kFirstIndex);
- if (raw->IsSmi()) return raw;
- DCHECK(raw->IsWeakCell());
- return WeakCell::cast(raw)->value();
-}
-
-bool FixedArrayOfWeakCells::IsEmptySlot(int index) const {
- DCHECK(index < Length());
- return Get(index)->IsSmi();
-}
-
-void FixedArrayOfWeakCells::Clear(int index) {
- FixedArray::cast(this)->set(index + kFirstIndex, Smi::kZero);
-}
-
-int FixedArrayOfWeakCells::Length() const {
- return FixedArray::cast(this)->length() - kFirstIndex;
-}
-
-int FixedArrayOfWeakCells::last_used_index() const {
- return Smi::ToInt(FixedArray::cast(this)->get(kLastUsedIndexIndex));
-}
-
-void FixedArrayOfWeakCells::set_last_used_index(int index) {
- FixedArray::cast(this)->set(kLastUsedIndexIndex, Smi::FromInt(index));
-}
-
-template <class T>
-T* FixedArrayOfWeakCells::Iterator::Next() {
- if (list_ != nullptr) {
- // Assert that list did not change during iteration.
- DCHECK_EQ(last_used_index_, list_->last_used_index());
- while (index_ < list_->Length()) {
- Object* item = list_->Get(index_++);
- if (item != Empty()) return T::cast(item);
+HeapObject* WeakArrayList::Iterator::Next() {
+ if (array_ != nullptr) {
+ while (index_ < array_->length()) {
+ MaybeObject* item = array_->Get(index_++);
+ DCHECK(item->IsWeakHeapObject() || item->IsClearedWeakHeapObject());
+ if (!item->IsClearedWeakHeapObject()) return item->ToWeakHeapObject();
}
- list_ = nullptr;
+ array_ = nullptr;
}
return nullptr;
}
@@ -438,6 +406,11 @@ Handle<PodArray<T>> PodArray<T>::New(Isolate* isolate, int length,
isolate->factory()->NewByteArray(length * sizeof(T), pretenure));
}
+template <class T>
+int PodArray<T>::length() {
+ return ByteArray::length() / sizeof(T);
+}
+
void* FixedTypedArrayBase::external_pointer() const {
intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
return reinterpret_cast<void*>(ptr);
@@ -458,9 +431,9 @@ void* FixedTypedArrayBase::DataPtr() {
int FixedTypedArrayBase::ElementSize(InstanceType type) {
int element_size;
switch (type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- element_size = size; \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ element_size = sizeof(ctype); \
break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -688,9 +661,10 @@ inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::FromHandle(
}
template <class Traits>
-Handle<Object> FixedTypedArray<Traits>::get(FixedTypedArray<Traits>* array,
+Handle<Object> FixedTypedArray<Traits>::get(Isolate* isolate,
+ FixedTypedArray<Traits>* array,
int index) {
- return Traits::ToHandle(array->GetIsolate(), array->get_scalar(index));
+ return Traits::ToHandle(isolate, array->get_scalar(index));
}
template <class Traits>
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index 362064edf7..287015ef7c 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_FIXED_ARRAY_H_
#define V8_OBJECTS_FIXED_ARRAY_H_
+#include "src/maybe-handles.h"
#include "src/objects.h"
// Has to be the last include (doesn't have include guards):
@@ -84,6 +85,15 @@ class FixedArrayBase : public HeapObject {
bool IsCowArray() const;
+// Maximal allowed size, in bytes, of a single FixedArrayBase.
+// Prevents overflowing size computations, as well as extreme memory
+// consumption.
+#ifdef V8_HOST_ARCH_32_BIT
+ static const int kMaxSize = 512 * MB;
+#else
+ static const int kMaxSize = 1024 * MB;
+#endif // V8_HOST_ARCH_32_BIT
+
// Layout description.
// Length is smi tagged when it is stored.
static const int kLengthOffset = HeapObject::kHeaderSize;
@@ -159,13 +169,11 @@ class FixedArray : public FixedArrayBase {
inline Object** RawFieldOfElementAt(int index);
DECL_CAST(FixedArray)
-
- // Maximal allowed size, in bytes, of a single FixedArray.
- // Prevents overflowing size computations, as well as extreme memory
- // consumption.
- static const int kMaxSize = 128 * MB * kPointerSize;
// Maximally allowed length of a FixedArray.
static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
+ static_assert(Internals::IsValidSmi(kMaxLength),
+ "FixedArray maxLength not a Smi");
+
// Maximally allowed length for regular (non large object space) object.
STATIC_ASSERT(kMaxRegularHeapObjectSize < kMaxSize);
static const int kMaxRegularLength =
@@ -238,12 +246,10 @@ class FixedDoubleArray : public FixedArrayBase {
DECL_CAST(FixedDoubleArray)
- // Maximal allowed size, in bytes, of a single FixedDoubleArray.
- // Prevents overflowing size computations, as well as extreme memory
- // consumption.
- static const int kMaxSize = 512 * MB;
// Maximally allowed length of a FixedArray.
static const int kMaxLength = (kMaxSize - kHeaderSize) / kDoubleSize;
+ static_assert(Internals::IsValidSmi(kMaxLength),
+ "FixedDoubleArray maxLength not a Smi");
// Dispatched behavior.
DECL_PRINTER(FixedDoubleArray)
@@ -299,6 +305,8 @@ class WeakFixedArray : public HeapObject {
static const int kMaxLength =
(FixedArray::kMaxSize - kHeaderSize) / kPointerSize;
+ static_assert(Internals::IsValidSmi(kMaxLength),
+ "WeakFixedArray maxLength not a Smi");
protected:
static int OffsetOfElementAt(int index) {
@@ -362,90 +370,40 @@ class WeakArrayList : public HeapObject {
static const int kMaxCapacity =
(FixedArray::kMaxSize - kHeaderSize) / kPointerSize;
- protected:
- static Handle<WeakArrayList> EnsureSpace(Isolate* isolate,
- Handle<WeakArrayList> array,
- int length);
+ static Handle<WeakArrayList> EnsureSpace(
+ Isolate* isolate, Handle<WeakArrayList> array, int length,
+ PretenureFlag pretenure = NOT_TENURED);
- private:
- static int OffsetOfElementAt(int index) {
- return kHeaderSize + index * kPointerSize;
- }
+ // Returns the number of non-cleaned weak references in the array.
+ int CountLiveWeakReferences() const;
- DISALLOW_IMPLICIT_CONSTRUCTORS(WeakArrayList);
-};
-
-// Deprecated. Use WeakFixedArray instead.
-class FixedArrayOfWeakCells : public FixedArray {
- public:
- // If |maybe_array| is not a FixedArrayOfWeakCells, a fresh one will be
- // allocated. This function does not check if the value exists already,
- // callers must ensure this themselves if necessary.
- static Handle<FixedArrayOfWeakCells> Add(Isolate* isolate,
- Handle<Object> maybe_array,
- Handle<HeapObject> value,
- int* assigned_index = nullptr);
-
- // Returns true if an entry was found and removed.
- bool Remove(Handle<HeapObject> value);
-
- class NullCallback {
- public:
- static void Callback(Object* value, int old_index, int new_index) {}
- };
-
- template <class CompactionCallback>
- void Compact(Isolate* isolate);
-
- inline Object* Get(int index) const;
- inline void Clear(int index);
- inline int Length() const;
-
- inline bool IsEmptySlot(int index) const;
- static Object* Empty() { return Smi::kZero; }
+ // Returns whether an entry was found and removed. Will move the elements
+ // around in the array - this method can only be used in cases where the user
+ // doesn't care about the indices! Users should make sure there are no
+ // duplicates.
+ bool RemoveOne(MaybeObjectHandle value);
class Iterator {
public:
- explicit Iterator(Object* maybe_array) : list_(nullptr) {
- Reset(maybe_array);
- }
- void Reset(Object* maybe_array);
+ explicit Iterator(WeakArrayList* array) : index_(0), array_(array) {}
- template <class T>
- inline T* Next();
+ inline HeapObject* Next();
private:
int index_;
- FixedArrayOfWeakCells* list_;
+ WeakArrayList* array_;
#ifdef DEBUG
- int last_used_index_;
DisallowHeapAllocation no_gc_;
#endif // DEBUG
DISALLOW_COPY_AND_ASSIGN(Iterator);
};
- DECL_CAST(FixedArrayOfWeakCells)
-
private:
- static const int kLastUsedIndexIndex = 0;
- static const int kFirstIndex = 1;
-
- static Handle<FixedArrayOfWeakCells> Allocate(
- Isolate* isolate, int size,
- Handle<FixedArrayOfWeakCells> initialize_from);
-
- static void Set(Isolate* isolate, Handle<FixedArrayOfWeakCells> array,
- int index, Handle<HeapObject> value);
- inline void clear(int index);
-
- inline int last_used_index() const;
- inline void set_last_used_index(int index);
+ static int OffsetOfElementAt(int index) {
+ return kHeaderSize + index * kPointerSize;
+ }
- // Disallow inherited setters.
- void set(int index, Smi* value);
- void set(int index, Object* value);
- void set(int index, Object* value, WriteBarrierMode mode);
- DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArrayOfWeakCells);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(WeakArrayList);
};
// Generic array grows dynamically with O(1) amortized insertion.
@@ -557,10 +515,10 @@ class ByteArray : public FixedArrayBase {
// Layout description.
static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
- // Maximal memory consumption for a single ByteArray.
- static const int kMaxSize = 512 * MB;
// Maximal length of a single ByteArray.
static const int kMaxLength = kMaxSize - kHeaderSize;
+ static_assert(Internals::IsValidSmi(kMaxLength),
+ "ByteArray maxLength not a Smi");
class BodyDescriptor;
// No weak fields.
@@ -590,27 +548,13 @@ class PodArray : public ByteArray {
copy_in(index * sizeof(T), reinterpret_cast<const byte*>(&value),
sizeof(T));
}
- int length() { return ByteArray::length() / sizeof(T); }
+ inline int length();
DECL_CAST(PodArray<T>)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PodArray<T>);
};
-// V has parameters (Type, type, TYPE, C type, element_size)
-#define TYPED_ARRAYS(V) \
- V(Uint8, uint8, UINT8, uint8_t, 1) \
- V(Int8, int8, INT8, int8_t, 1) \
- V(Uint16, uint16, UINT16, uint16_t, 2) \
- V(Int16, int16, INT16, int16_t, 2) \
- V(Uint32, uint32, UINT32, uint32_t, 4) \
- V(Int32, int32, INT32, int32_t, 4) \
- V(Float32, float32, FLOAT32, float, 4) \
- V(Float64, float64, FLOAT64, double, 8) \
- V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1) \
- V(BigUint64, biguint64, BIGUINT64, uint64_t, 8) \
- V(BigInt64, bigint64, BIGINT64, int64_t, 8)
-
class FixedTypedArrayBase : public FixedArrayBase {
public:
// [base_pointer]: Either points to the FixedTypedArrayBase itself or nullptr.
@@ -676,7 +620,8 @@ class FixedTypedArray : public FixedTypedArrayBase {
static inline ElementType get_scalar_from_data_ptr(void* data_ptr, int index);
inline ElementType get_scalar(int index);
- static inline Handle<Object> get(FixedTypedArray* array, int index);
+ static inline Handle<Object> get(Isolate* isolate, FixedTypedArray* array,
+ int index);
inline void set(int index, ElementType value);
static inline ElementType from(int value);
@@ -699,18 +644,18 @@ class FixedTypedArray : public FixedTypedArrayBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArray);
};
-#define FIXED_TYPED_ARRAY_TRAITS(Type, type, TYPE, elementType, size) \
- STATIC_ASSERT(size <= FixedTypedArrayBase::kMaxElementSize); \
- class Type##ArrayTraits { \
- public: /* NOLINT */ \
- typedef elementType ElementType; \
- static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \
- static const char* Designator() { return #type " array"; } \
- static inline Handle<Object> ToHandle(Isolate* isolate, \
- elementType scalar); \
- static inline elementType defaultValue(); \
- }; \
- \
+#define FIXED_TYPED_ARRAY_TRAITS(Type, type, TYPE, elementType) \
+ STATIC_ASSERT(sizeof(elementType) <= FixedTypedArrayBase::kMaxElementSize); \
+ class Type##ArrayTraits { \
+ public: /* NOLINT */ \
+ typedef elementType ElementType; \
+ static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \
+ static const char* Designator() { return #type " array"; } \
+ static inline Handle<Object> ToHandle(Isolate* isolate, \
+ elementType scalar); \
+ static inline elementType defaultValue(); \
+ }; \
+ \
typedef FixedTypedArray<Type##ArrayTraits> Fixed##Type##Array;
TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS)
diff --git a/deps/v8/src/objects/frame-array.h b/deps/v8/src/objects/frame-array.h
index 08ed8fe6af..0ae22f9526 100644
--- a/deps/v8/src/objects/frame-array.h
+++ b/deps/v8/src/objects/frame-array.h
@@ -20,7 +20,7 @@ class Handle;
#define FRAME_ARRAY_FIELD_LIST(V) \
V(WasmInstance, WasmInstanceObject) \
V(WasmFunctionIndex, Smi) \
- V(IsWasmInterpreterFrame, Smi) \
+ V(WasmCodeObject, Foreign) \
V(Receiver, Object) \
V(Function, JSFunction) \
V(Code, AbstractCode) \
@@ -74,7 +74,7 @@ class FrameArray : public FixedArray {
static const int kWasmInstanceOffset = 0;
static const int kWasmFunctionIndexOffset = 1;
- static const int kIsWasmInterpreterFrameOffset = 2;
+ static const int kWasmCodeObjectOffset = 2;
static const int kReceiverOffset = 0;
static const int kFunctionOffset = 1;
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index 11aa0392c0..1f2c09316d 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -5,9 +5,13 @@
#ifndef V8_OBJECTS_HASH_TABLE_INL_H_
#define V8_OBJECTS_HASH_TABLE_INL_H_
-#include "src/heap/heap.h"
#include "src/objects/hash-table.h"
+#include "src/heap/heap.h"
+#include "src/objects-inl.h"
+#include "src/objects/fixed-array-inl.h"
+#include "src/roots-inl.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index f83f3274b4..aa86865abf 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -263,7 +263,7 @@ class ObjectHashTableShape : public BaseShape<Handle<Object>> {
static inline bool IsMatch(Handle<Object> key, Object* other);
static inline uint32_t Hash(Isolate* isolate, Handle<Object> key);
static inline uint32_t HashForObject(Isolate* isolate, Object* object);
- static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Object> key);
+ static inline Handle<Object> AsHandle(Handle<Object> key);
static const int kPrefixSize = 0;
static const int kEntryValueIndex = 1;
static const int kEntrySize = 2;
diff --git a/deps/v8/src/objects/intl-objects-inl.h b/deps/v8/src/objects/intl-objects-inl.h
index 1fa2d66f94..62b059ea3c 100644
--- a/deps/v8/src/objects/intl-objects-inl.h
+++ b/deps/v8/src/objects/intl-objects-inl.h
@@ -7,6 +7,8 @@
#include "src/objects/intl-objects.h"
+#include "src/objects-inl.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index 67f691a336..b9ceecf9a1 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -9,14 +9,18 @@
#include "src/objects/intl-objects.h"
#include "src/objects/intl-objects-inl.h"
+#include <algorithm>
#include <memory>
+#include <string>
+#include <vector>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/global-handles.h"
#include "src/heap/factory.h"
#include "src/intl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/objects/js-collator-inl.h"
#include "src/objects/managed.h"
#include "src/objects/string.h"
#include "src/property-descriptor.h"
@@ -197,11 +201,10 @@ void SetResolvedDateSettings(Isolate* isolate, const icu::Locale& icu_locale,
if (U_SUCCESS(status)) {
// In CLDR (http://unicode.org/cldr/trac/ticket/9943), Etc/UTC is made
// a separate timezone ID from Etc/GMT even though they're still the same
- // timezone. We'd not have "Etc/GMT" here because we canonicalize it and
- // other GMT-variants to "UTC" in intl.js and "UTC" is turned to "Etc/UTC"
- // by ICU before getting here.
- // TODO(jshin): Figure out the cause of crbug.com/719609 and re-enable
- // DCHECK(canonical_time_zone != UNICODE_STRING_SIMPLE("Etc/GMT")) .
+ // timezone. We have Etc/UTC because 'UTC', 'Etc/Universal',
+ // 'Etc/Zulu' and others are turned to 'Etc/UTC' by ICU. Etc/GMT comes
+ // from Etc/GMT0, Etc/GMT+0, Etc/GMT-0, Etc/Greenwich.
+ // ecma402##sec-canonicalizetimezonename step 3
if (canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/UTC") ||
canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/GMT")) {
JSObject::SetProperty(
@@ -493,276 +496,6 @@ void SetResolvedNumberSettings(Isolate* isolate, const icu::Locale& icu_locale,
SetResolvedNumericSettings(isolate, icu_locale, number_format, resolved);
}
-icu::Collator* CreateICUCollator(Isolate* isolate,
- const icu::Locale& icu_locale,
- Handle<JSObject> options) {
- // Make collator from options.
- icu::Collator* collator = nullptr;
- UErrorCode status = U_ZERO_ERROR;
- collator = icu::Collator::createInstance(icu_locale, status);
-
- if (U_FAILURE(status)) {
- delete collator;
- return nullptr;
- }
-
- // Set flags first, and then override them with sensitivity if necessary.
- bool numeric;
- if (ExtractBooleanSetting(isolate, options, "numeric", &numeric)) {
- collator->setAttribute(UCOL_NUMERIC_COLLATION, numeric ? UCOL_ON : UCOL_OFF,
- status);
- }
-
- // Normalization is always on, by the spec. We are free to optimize
- // if the strings are already normalized (but we don't have a way to tell
- // that right now).
- collator->setAttribute(UCOL_NORMALIZATION_MODE, UCOL_ON, status);
-
- icu::UnicodeString case_first;
- if (ExtractStringSetting(isolate, options, "caseFirst", &case_first)) {
- if (case_first == UNICODE_STRING_SIMPLE("upper")) {
- collator->setAttribute(UCOL_CASE_FIRST, UCOL_UPPER_FIRST, status);
- } else if (case_first == UNICODE_STRING_SIMPLE("lower")) {
- collator->setAttribute(UCOL_CASE_FIRST, UCOL_LOWER_FIRST, status);
- } else {
- // Default (false/off).
- collator->setAttribute(UCOL_CASE_FIRST, UCOL_OFF, status);
- }
- }
-
- icu::UnicodeString sensitivity;
- if (ExtractStringSetting(isolate, options, "sensitivity", &sensitivity)) {
- if (sensitivity == UNICODE_STRING_SIMPLE("base")) {
- collator->setStrength(icu::Collator::PRIMARY);
- } else if (sensitivity == UNICODE_STRING_SIMPLE("accent")) {
- collator->setStrength(icu::Collator::SECONDARY);
- } else if (sensitivity == UNICODE_STRING_SIMPLE("case")) {
- collator->setStrength(icu::Collator::PRIMARY);
- collator->setAttribute(UCOL_CASE_LEVEL, UCOL_ON, status);
- } else {
- // variant (default)
- collator->setStrength(icu::Collator::TERTIARY);
- }
- }
-
- bool ignore;
- if (ExtractBooleanSetting(isolate, options, "ignorePunctuation", &ignore)) {
- if (ignore) {
- collator->setAttribute(UCOL_ALTERNATE_HANDLING, UCOL_SHIFTED, status);
- }
- }
-
- return collator;
-}
-
-void SetResolvedCollatorSettings(Isolate* isolate,
- const icu::Locale& icu_locale,
- icu::Collator* collator,
- Handle<JSObject> resolved) {
- Factory* factory = isolate->factory();
- UErrorCode status = U_ZERO_ERROR;
-
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("numeric"),
- factory->ToBoolean(
- collator->getAttribute(UCOL_NUMERIC_COLLATION, status) == UCOL_ON),
- LanguageMode::kSloppy)
- .Assert();
-
- switch (collator->getAttribute(UCOL_CASE_FIRST, status)) {
- case UCOL_LOWER_FIRST:
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("caseFirst"),
- factory->NewStringFromStaticChars("lower"), LanguageMode::kSloppy)
- .Assert();
- break;
- case UCOL_UPPER_FIRST:
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("caseFirst"),
- factory->NewStringFromStaticChars("upper"), LanguageMode::kSloppy)
- .Assert();
- break;
- default:
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("caseFirst"),
- factory->NewStringFromStaticChars("false"), LanguageMode::kSloppy)
- .Assert();
- }
-
- switch (collator->getAttribute(UCOL_STRENGTH, status)) {
- case UCOL_PRIMARY: {
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("strength"),
- factory->NewStringFromStaticChars("primary"), LanguageMode::kSloppy)
- .Assert();
-
- // case level: true + s1 -> case, s1 -> base.
- if (UCOL_ON == collator->getAttribute(UCOL_CASE_LEVEL, status)) {
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("case"), LanguageMode::kSloppy)
- .Assert();
- } else {
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("base"), LanguageMode::kSloppy)
- .Assert();
- }
- break;
- }
- case UCOL_SECONDARY:
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("strength"),
- factory->NewStringFromStaticChars("secondary"), LanguageMode::kSloppy)
- .Assert();
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("accent"), LanguageMode::kSloppy)
- .Assert();
- break;
- case UCOL_TERTIARY:
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("strength"),
- factory->NewStringFromStaticChars("tertiary"), LanguageMode::kSloppy)
- .Assert();
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("variant"), LanguageMode::kSloppy)
- .Assert();
- break;
- case UCOL_QUATERNARY:
- // We shouldn't get quaternary and identical from ICU, but if we do
- // put them into variant.
- JSObject::SetProperty(isolate, resolved,
- factory->NewStringFromStaticChars("strength"),
- factory->NewStringFromStaticChars("quaternary"),
- LanguageMode::kSloppy)
- .Assert();
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("variant"), LanguageMode::kSloppy)
- .Assert();
- break;
- default:
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("strength"),
- factory->NewStringFromStaticChars("identical"), LanguageMode::kSloppy)
- .Assert();
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("variant"), LanguageMode::kSloppy)
- .Assert();
- }
-
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("ignorePunctuation"),
- factory->ToBoolean(collator->getAttribute(UCOL_ALTERNATE_HANDLING,
- status) == UCOL_SHIFTED),
- LanguageMode::kSloppy)
- .Assert();
-
- // Set the locale
- char result[ULOC_FULLNAME_CAPACITY];
- status = U_ZERO_ERROR;
- uloc_toLanguageTag(icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY,
- FALSE, &status);
- if (U_SUCCESS(status)) {
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromAsciiChecked(result), LanguageMode::kSloppy)
- .Assert();
- } else {
- // This would never happen, since we got the locale from ICU.
- JSObject::SetProperty(
- isolate, resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromStaticChars("und"), LanguageMode::kSloppy)
- .Assert();
- }
-}
-
-bool CreateICUPluralRules(Isolate* isolate, const icu::Locale& icu_locale,
- Handle<JSObject> options, icu::PluralRules** pl,
- icu::DecimalFormat** nf) {
- // Make formatter from options. Numbering system is added
- // to the locale as Unicode extension (if it was specified at all).
- UErrorCode status = U_ZERO_ERROR;
-
- UPluralType type = UPLURAL_TYPE_CARDINAL;
-
- icu::UnicodeString type_string;
- if (ExtractStringSetting(isolate, options, "type", &type_string)) {
- if (type_string == UNICODE_STRING_SIMPLE("ordinal")) {
- type = UPLURAL_TYPE_ORDINAL;
- } else {
- CHECK(type_string == UNICODE_STRING_SIMPLE("cardinal"));
- }
- }
-
- icu::PluralRules* plural_rules =
- icu::PluralRules::forLocale(icu_locale, type, status);
-
- if (U_FAILURE(status)) {
- delete plural_rules;
- return false;
- }
-
- icu::DecimalFormat* number_format = static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createInstance(icu_locale, UNUM_DECIMAL, status));
-
- if (U_FAILURE(status)) {
- delete plural_rules;
- delete number_format;
- return false;
- }
-
- *pl = plural_rules;
- *nf = number_format;
-
- SetNumericSettings(isolate, number_format, options);
-
- // Set rounding mode.
-
- return true;
-}
-
-bool SetResolvedPluralRulesSettings(Isolate* isolate,
- const icu::Locale& icu_locale,
- icu::PluralRules* plural_rules,
- icu::DecimalFormat* number_format,
- Handle<JSObject> resolved) {
- SetResolvedNumericSettings(isolate, icu_locale, number_format, resolved);
-
- Factory* factory = isolate->factory();
-
- Handle<JSObject> pluralCategories = Handle<JSObject>::cast(
- JSObject::GetProperty(
- isolate, resolved,
- factory->NewStringFromStaticChars("pluralCategories"))
- .ToHandleChecked());
-
- UErrorCode status = U_ZERO_ERROR;
- std::unique_ptr<icu::StringEnumeration> categories(
- plural_rules->getKeywords(status));
- if (U_FAILURE(status)) return false;
-
- if (U_FAILURE(status)) return false;
-
- for (int32_t i = 0;; i++) {
- const icu::UnicodeString* category = categories->snext(status);
- if (U_FAILURE(status)) return false;
- if (category == nullptr) return true;
-
- std::string keyword;
- Handle<String> value = factory->NewStringFromAsciiChecked(
- category->toUTF8String(keyword).data());
-
- LookupIterator it(isolate, pluralCategories, i, LookupIterator::OWN);
- JSObject::DefineOwnPropertyIgnoreAttributes(&it, value,
- PropertyAttributes::NONE)
- .ToHandleChecked();
- }
-}
-
icu::BreakIterator* CreateICUBreakIterator(Isolate* isolate,
const icu::Locale& icu_locale,
Handle<JSObject> options) {
@@ -819,28 +552,61 @@ void SetResolvedBreakIteratorSettings(Isolate* isolate,
.Assert();
}
}
+
+MaybeHandle<JSObject> CachedOrNewService(Isolate* isolate,
+ Handle<String> service,
+ Handle<Object> locales,
+ Handle<Object> options,
+ Handle<Object> internal_options) {
+ Handle<Object> result;
+ Handle<Object> undefined_value(ReadOnlyRoots(isolate).undefined_value(),
+ isolate);
+ Handle<Object> args[] = {service, locales, options, internal_options};
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, isolate->cached_or_new_service(),
+ undefined_value, arraysize(args), args),
+ JSArray);
+ return Handle<JSObject>::cast(result);
+}
} // namespace
-// static
-icu::SimpleDateFormat* DateFormat::InitializeDateTimeFormat(
- Isolate* isolate, Handle<String> locale, Handle<JSObject> options,
- Handle<JSObject> resolved) {
+icu::Locale Intl::CreateICULocale(Isolate* isolate,
+ Handle<String> bcp47_locale_str) {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8::String::Utf8Value bcp47_locale(v8_isolate,
+ v8::Utils::ToLocal(bcp47_locale_str));
+ CHECK_NOT_NULL(*bcp47_locale);
+
+ DisallowHeapAllocation no_gc;
+
// Convert BCP47 into ICU locale format.
UErrorCode status = U_ZERO_ERROR;
- icu::Locale icu_locale;
char icu_result[ULOC_FULLNAME_CAPACITY];
int icu_length = 0;
- v8::String::Utf8Value bcp47_locale(v8_isolate, v8::Utils::ToLocal(locale));
- if (bcp47_locale.length() != 0) {
- uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
- &icu_length, &status);
- if (U_FAILURE(status) || icu_length == 0) {
- return nullptr;
- }
- icu_locale = icu::Locale(icu_result);
+
+ // bcp47_locale_str should be a canonicalized language tag, which
+ // means this shouldn't fail.
+ uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
+ &icu_length, &status);
+ CHECK(U_SUCCESS(status));
+ CHECK_LT(0, icu_length);
+
+ icu::Locale icu_locale(icu_result);
+ if (icu_locale.isBogus()) {
+ FATAL("Failed to create ICU locale, are ICU data files missing?");
}
+ return icu_locale;
+}
+
+// static
+icu::SimpleDateFormat* DateFormat::InitializeDateTimeFormat(
+ Isolate* isolate, Handle<String> locale, Handle<JSObject> options,
+ Handle<JSObject> resolved) {
+ icu::Locale icu_locale = Intl::CreateICULocale(isolate, locale);
+ DCHECK(!icu_locale.isBogus());
+
icu::SimpleDateFormat* date_format =
CreateICUDateFormat(isolate, icu_locale, options);
if (!date_format) {
@@ -859,12 +625,13 @@ icu::SimpleDateFormat* DateFormat::InitializeDateTimeFormat(
SetResolvedDateSettings(isolate, icu_locale, date_format, resolved);
}
+ CHECK_NOT_NULL(date_format);
return date_format;
}
-icu::SimpleDateFormat* DateFormat::UnpackDateFormat(Isolate* isolate,
- Handle<JSObject> obj) {
- return reinterpret_cast<icu::SimpleDateFormat*>(obj->GetEmbedderField(0));
+icu::SimpleDateFormat* DateFormat::UnpackDateFormat(Handle<JSObject> obj) {
+ return reinterpret_cast<icu::SimpleDateFormat*>(
+ obj->GetEmbedderField(DateFormat::kSimpleDateFormatIndex));
}
void DateFormat::DeleteDateFormat(const v8::WeakCallbackInfo<void>& data) {
@@ -872,25 +639,115 @@ void DateFormat::DeleteDateFormat(const v8::WeakCallbackInfo<void>& data) {
GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
}
+MaybeHandle<JSObject> DateFormat::Unwrap(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ const char* method_name) {
+ Handle<Context> native_context =
+ Handle<Context>(isolate->context()->native_context(), isolate);
+ Handle<JSFunction> constructor = Handle<JSFunction>(
+ JSFunction::cast(native_context->intl_date_time_format_function()),
+ isolate);
+ Handle<String> method_name_str =
+ isolate->factory()->NewStringFromAsciiChecked(method_name);
+
+ return Intl::UnwrapReceiver(isolate, receiver, constructor,
+ Intl::Type::kDateTimeFormat, method_name_str,
+ true);
+}
+
+// ecma402/#sec-formatdatetime
+// FormatDateTime( dateTimeFormat, x )
+MaybeHandle<String> DateFormat::FormatDateTime(
+ Isolate* isolate, Handle<JSObject> date_time_format_holder, double x) {
+ double date_value = DateCache::TimeClip(x);
+ if (std::isnan(date_value)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidTimeValue),
+ String);
+ }
+
+ CHECK(Intl::IsObjectOfType(isolate, date_time_format_holder,
+ Intl::Type::kDateTimeFormat));
+ icu::SimpleDateFormat* date_format =
+ DateFormat::UnpackDateFormat(date_time_format_holder);
+ CHECK_NOT_NULL(date_format);
+
+ icu::UnicodeString result;
+ date_format->format(date_value, result);
+
+ return isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
+}
+
+// ecma402/#sec-datetime-format-functions
+// DateTime Format Functions
+MaybeHandle<String> DateFormat::DateTimeFormat(
+ Isolate* isolate, Handle<JSObject> date_time_format_holder,
+ Handle<Object> date) {
+ // 2. Assert: Type(dtf) is Object and dtf has an [[InitializedDateTimeFormat]]
+ // internal slot.
+ DCHECK(Intl::IsObjectOfType(isolate, date_time_format_holder,
+ Intl::Type::kDateTimeFormat));
+
+ // 3. If date is not provided or is undefined, then
+ double x;
+ if (date->IsUndefined()) {
+ // 3.a Let x be Call(%Date_now%, undefined).
+ x = JSDate::CurrentTimeValue(isolate);
+ } else {
+ // 4. Else,
+ // a. Let x be ? ToNumber(date).
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, date, Object::ToNumber(isolate, date),
+ String);
+ CHECK(date->IsNumber());
+ x = date->Number();
+ }
+ // 5. Return FormatDateTime(dtf, x).
+ return DateFormat::FormatDateTime(isolate, date_time_format_holder, x);
+}
+
+MaybeHandle<String> DateFormat::ToLocaleDateTime(
+ Isolate* isolate, Handle<Object> date, Handle<Object> locales,
+ Handle<Object> options, const char* required, const char* defaults,
+ const char* service) {
+ Factory* factory = isolate->factory();
+ // 1. Let x be ? thisTimeValue(this value);
+ if (!date->IsJSDate()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kMethodInvokedOnWrongType,
+ factory->NewStringFromStaticChars("Date")),
+ String);
+ }
+
+ double const x = Handle<JSDate>::cast(date)->value()->Number();
+ // 2. If x is NaN, return "Invalid Date"
+ if (std::isnan(x)) {
+ return factory->NewStringFromStaticChars("Invalid Date");
+ }
+
+ // 3. Let options be ? ToDateTimeOptions(options, required, defaults).
+ Handle<JSObject> internal_options;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, internal_options,
+ DateFormat::ToDateTimeOptions(isolate, options, required, defaults),
+ String);
+
+ // 4. Let dateFormat be ? Construct(%DateTimeFormat%, « locales, options »).
+ Handle<JSObject> date_format;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, date_format,
+ CachedOrNewService(isolate, factory->NewStringFromAsciiChecked(service),
+ locales, options, internal_options),
+ String);
+
+ // 5. Return FormatDateTime(dateFormat, x).
+ return DateFormat::FormatDateTime(isolate, date_format, x);
+}
+
icu::DecimalFormat* NumberFormat::InitializeNumberFormat(
Isolate* isolate, Handle<String> locale, Handle<JSObject> options,
Handle<JSObject> resolved) {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
-
- // Convert BCP47 into ICU locale format.
- UErrorCode status = U_ZERO_ERROR;
- icu::Locale icu_locale;
- char icu_result[ULOC_FULLNAME_CAPACITY];
- int icu_length = 0;
- v8::String::Utf8Value bcp47_locale(v8_isolate, v8::Utils::ToLocal(locale));
- if (bcp47_locale.length() != 0) {
- uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
- &icu_length, &status);
- if (U_FAILURE(status) || icu_length == 0) {
- return nullptr;
- }
- icu_locale = icu::Locale(icu_result);
- }
+ icu::Locale icu_locale = Intl::CreateICULocale(isolate, locale);
+ DCHECK(!icu_locale.isBogus());
icu::DecimalFormat* number_format =
CreateICUNumberFormat(isolate, icu_locale, options);
@@ -911,11 +768,11 @@ icu::DecimalFormat* NumberFormat::InitializeNumberFormat(
SetResolvedNumberSettings(isolate, icu_locale, number_format, resolved);
}
+ CHECK_NOT_NULL(number_format);
return number_format;
}
-icu::DecimalFormat* NumberFormat::UnpackNumberFormat(Isolate* isolate,
- Handle<JSObject> obj) {
+icu::DecimalFormat* NumberFormat::UnpackNumberFormat(Handle<JSObject> obj) {
return reinterpret_cast<icu::DecimalFormat*>(
obj->GetEmbedderField(NumberFormat::kDecimalFormatIndex));
}
@@ -925,134 +782,11 @@ void NumberFormat::DeleteNumberFormat(const v8::WeakCallbackInfo<void>& data) {
GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
}
-bool Collator::InitializeCollator(Isolate* isolate,
- Handle<JSObject> collator_holder,
- Handle<String> locale,
- Handle<JSObject> options,
- Handle<JSObject> resolved) {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- // Convert BCP47 into ICU locale format.
- UErrorCode status = U_ZERO_ERROR;
- icu::Locale icu_locale;
- char icu_result[ULOC_FULLNAME_CAPACITY];
- int icu_length = 0;
- v8::String::Utf8Value bcp47_locale(v8_isolate, v8::Utils::ToLocal(locale));
- if (bcp47_locale.length() != 0) {
- uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
- &icu_length, &status);
- if (U_FAILURE(status) || icu_length == 0) {
- return false;
- }
- icu_locale = icu::Locale(icu_result);
- }
-
- icu::Collator* collator = CreateICUCollator(isolate, icu_locale, options);
- if (!collator) {
- // Remove extensions and try again.
- icu::Locale no_extension_locale(icu_locale.getBaseName());
- collator = CreateICUCollator(isolate, no_extension_locale, options);
-
- if (!collator) {
- FATAL("Failed to create ICU collator, are ICU data files missing?");
- }
-
- // Set resolved settings (pattern, numbering system).
- SetResolvedCollatorSettings(isolate, no_extension_locale, collator,
- resolved);
- } else {
- SetResolvedCollatorSettings(isolate, icu_locale, collator, resolved);
- }
-
- Handle<Managed<icu::Collator>> managed =
- Managed<icu::Collator>::FromRawPtr(isolate, 0, collator);
- collator_holder->SetEmbedderField(0, *managed);
-
- return true;
-}
-
-icu::Collator* Collator::UnpackCollator(Isolate* isolate,
- Handle<JSObject> obj) {
- return Managed<icu::Collator>::cast(obj->GetEmbedderField(0))->raw();
-}
-
-bool PluralRules::InitializePluralRules(Isolate* isolate, Handle<String> locale,
- Handle<JSObject> options,
- Handle<JSObject> resolved,
- icu::PluralRules** plural_rules,
- icu::DecimalFormat** number_format) {
- // Convert BCP47 into ICU locale format.
- UErrorCode status = U_ZERO_ERROR;
- icu::Locale icu_locale;
- char locale_name[ULOC_FULLNAME_CAPACITY];
- int icu_length = 0;
- v8::String::Utf8Value bcp47_locale(reinterpret_cast<v8::Isolate*>(isolate),
- v8::Utils::ToLocal(locale));
- if (bcp47_locale.length() != 0) {
- uloc_forLanguageTag(*bcp47_locale, locale_name, ULOC_FULLNAME_CAPACITY,
- &icu_length, &status);
- if (U_FAILURE(status) || icu_length == 0) {
- return false;
- }
- icu_locale = icu::Locale(locale_name);
- }
-
- bool success = CreateICUPluralRules(isolate, icu_locale, options,
- plural_rules, number_format);
- if (!success) {
- // Remove extensions and try again.
- icu::Locale no_extension_locale(icu_locale.getBaseName());
- success = CreateICUPluralRules(isolate, no_extension_locale, options,
- plural_rules, number_format);
-
- if (!success) {
- FATAL("Failed to create ICU PluralRules, are ICU data files missing?");
- }
-
- // Set resolved settings (pattern, numbering system).
- success = SetResolvedPluralRulesSettings(
- isolate, no_extension_locale, *plural_rules, *number_format, resolved);
- } else {
- success = SetResolvedPluralRulesSettings(isolate, icu_locale, *plural_rules,
- *number_format, resolved);
- }
-
- return success;
-}
-
-icu::PluralRules* PluralRules::UnpackPluralRules(Isolate* isolate,
- Handle<JSObject> obj) {
- return reinterpret_cast<icu::PluralRules*>(obj->GetEmbedderField(0));
-}
-
-icu::DecimalFormat* PluralRules::UnpackNumberFormat(Isolate* isolate,
- Handle<JSObject> obj) {
- return reinterpret_cast<icu::DecimalFormat*>(obj->GetEmbedderField(1));
-}
-
-void PluralRules::DeletePluralRules(const v8::WeakCallbackInfo<void>& data) {
- delete reinterpret_cast<icu::PluralRules*>(data.GetInternalField(0));
- delete reinterpret_cast<icu::DecimalFormat*>(data.GetInternalField(1));
- GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
-}
-
icu::BreakIterator* V8BreakIterator::InitializeBreakIterator(
Isolate* isolate, Handle<String> locale, Handle<JSObject> options,
Handle<JSObject> resolved) {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- // Convert BCP47 into ICU locale format.
- UErrorCode status = U_ZERO_ERROR;
- icu::Locale icu_locale;
- char icu_result[ULOC_FULLNAME_CAPACITY];
- int icu_length = 0;
- v8::String::Utf8Value bcp47_locale(v8_isolate, v8::Utils::ToLocal(locale));
- if (bcp47_locale.length() != 0) {
- uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
- &icu_length, &status);
- if (U_FAILURE(status) || icu_length == 0) {
- return nullptr;
- }
- icu_locale = icu::Locale(icu_result);
- }
+ icu::Locale icu_locale = Intl::CreateICULocale(isolate, locale);
+ DCHECK(!icu_locale.isBogus());
icu::BreakIterator* break_iterator =
CreateICUBreakIterator(isolate, icu_locale, options);
@@ -1074,12 +808,13 @@ icu::BreakIterator* V8BreakIterator::InitializeBreakIterator(
resolved);
}
+ CHECK_NOT_NULL(break_iterator);
return break_iterator;
}
-icu::BreakIterator* V8BreakIterator::UnpackBreakIterator(Isolate* isolate,
- Handle<JSObject> obj) {
- return reinterpret_cast<icu::BreakIterator*>(obj->GetEmbedderField(0));
+icu::BreakIterator* V8BreakIterator::UnpackBreakIterator(Handle<JSObject> obj) {
+ return reinterpret_cast<icu::BreakIterator*>(
+ obj->GetEmbedderField(V8BreakIterator::kBreakIteratorIndex));
}
void V8BreakIterator::DeleteBreakIterator(
@@ -1089,6 +824,84 @@ void V8BreakIterator::DeleteBreakIterator(
GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
}
+void V8BreakIterator::AdoptText(Isolate* isolate,
+ Handle<JSObject> break_iterator_holder,
+ Handle<String> text) {
+ icu::BreakIterator* break_iterator =
+ V8BreakIterator::UnpackBreakIterator(break_iterator_holder);
+ CHECK_NOT_NULL(break_iterator);
+
+ icu::UnicodeString* u_text = reinterpret_cast<icu::UnicodeString*>(
+ break_iterator_holder->GetEmbedderField(
+ V8BreakIterator::kUnicodeStringIndex));
+ delete u_text;
+
+ int length = text->length();
+ text = String::Flatten(isolate, text);
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = text->GetFlatContent();
+ std::unique_ptr<uc16[]> sap;
+ const UChar* text_value = GetUCharBufferFromFlat(flat, &sap, length);
+ u_text = new icu::UnicodeString(text_value, length);
+ break_iterator_holder->SetEmbedderField(V8BreakIterator::kUnicodeStringIndex,
+ reinterpret_cast<Smi*>(u_text));
+
+ break_iterator->setText(*u_text);
+}
+
+MaybeHandle<String> Intl::ToString(Isolate* isolate,
+ const icu::UnicodeString& string) {
+ return isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(string.getBuffer()), string.length()));
+}
+
+MaybeHandle<String> Intl::ToString(Isolate* isolate,
+ const icu::UnicodeString& string,
+ int32_t begin, int32_t end) {
+ return Intl::ToString(isolate, string.tempSubStringBetween(begin, end));
+}
+
+namespace {
+
+Handle<JSObject> InnerAddElement(Isolate* isolate, Handle<JSArray> array,
+ int index, Handle<String> field_type_string,
+ Handle<String> value) {
+ // let element = $array[$index] = {
+ // type: $field_type_string,
+ // value: $value
+ // }
+ // return element;
+ Factory* factory = isolate->factory();
+ Handle<JSObject> element = factory->NewJSObject(isolate->object_function());
+ JSObject::AddProperty(isolate, element, factory->type_string(),
+ field_type_string, NONE);
+
+ JSObject::AddProperty(isolate, element, factory->value_string(), value, NONE);
+ JSObject::AddDataElement(array, index, element, NONE);
+ return element;
+}
+
+} // namespace
+
+void Intl::AddElement(Isolate* isolate, Handle<JSArray> array, int index,
+ Handle<String> field_type_string, Handle<String> value) {
+ // Same as $array[$index] = {type: $field_type_string, value: $value};
+ InnerAddElement(isolate, array, index, field_type_string, value);
+}
+
+void Intl::AddElement(Isolate* isolate, Handle<JSArray> array, int index,
+ Handle<String> field_type_string, Handle<String> value,
+ Handle<String> additional_property_name,
+ Handle<String> additional_property_value) {
+ // Same as $array[$index] = {
+ // type: $field_type_string, value: $value,
+ // $additional_property_name: $additional_property_value
+ // }
+ Handle<JSObject> element =
+ InnerAddElement(isolate, array, index, field_type_string, value);
+ JSObject::AddProperty(isolate, element, additional_property_name,
+ additional_property_value, NONE);
+}
// Build the shortened locale; eg, convert xx_Yyyy_ZZ to xx_ZZ.
bool Intl::RemoveLocaleScriptTag(const std::string& icu_locale,
std::string* locale_less_script) {
@@ -1107,6 +920,110 @@ bool Intl::RemoveLocaleScriptTag(const std::string& icu_locale,
return true;
}
+namespace {
+
+Maybe<bool> IsPropertyUndefined(Isolate* isolate, Handle<JSObject> options,
+ const char* property) {
+ Factory* factory = isolate->factory();
+ // i. Let prop be the property name.
+ // ii. Let value be ? Get(options, prop).
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value,
+ Object::GetPropertyOrElement(
+ isolate, options, factory->NewStringFromAsciiChecked(property)),
+ Nothing<bool>());
+ return Just(value->IsUndefined(isolate));
+}
+
+} // namespace
+
+// ecma-402/#sec-todatetimeoptions
+MaybeHandle<JSObject> DateFormat::ToDateTimeOptions(
+ Isolate* isolate, Handle<Object> input_options, const char* required,
+ const char* defaults) {
+ Factory* factory = isolate->factory();
+ // 1. If options is undefined, let options be null; otherwise let options be ?
+ // ToObject(options).
+ Handle<JSObject> options;
+ if (input_options->IsUndefined(isolate)) {
+ options = factory->NewJSObjectWithNullProto();
+ } else {
+ Handle<JSReceiver> options_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options_obj,
+ Object::ToObject(isolate, input_options),
+ JSObject);
+ // 2. Let options be ObjectCreate(options).
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
+ JSObject::ObjectCreate(isolate, options_obj),
+ JSObject);
+ }
+
+ // 3. Let needDefaults be true.
+ bool needs_default = true;
+
+ bool required_is_any = strcmp(required, "any") == 0;
+ // 4. If required is "date" or "any", then
+ if (required_is_any || (strcmp(required, "date") == 0)) {
+ // a. For each of the property names "weekday", "year", "month", "day", do
+ for (auto& prop : {"weekday", "year", "month", "day"}) {
+ // i. Let prop be the property name.
+ // ii. Let value be ? Get(options, prop)
+ Maybe<bool> maybe_undefined = IsPropertyUndefined(isolate, options, prop);
+ MAYBE_RETURN(maybe_undefined, Handle<JSObject>());
+ // iii. If value is not undefined, let needDefaults be false.
+ if (!maybe_undefined.FromJust()) {
+ needs_default = false;
+ }
+ }
+ }
+
+ // 5. If required is "time" or "any", then
+ if (required_is_any || (strcmp(required, "time") == 0)) {
+ // a. For each of the property names "hour", "minute", "second", do
+ for (auto& prop : {"hour", "minute", "second"}) {
+ // i. Let prop be the property name.
+ // ii. Let value be ? Get(options, prop)
+ Maybe<bool> maybe_undefined = IsPropertyUndefined(isolate, options, prop);
+ MAYBE_RETURN(maybe_undefined, Handle<JSObject>());
+ // iii. If value is not undefined, let needDefaults be false.
+ if (!maybe_undefined.FromJust()) {
+ needs_default = false;
+ }
+ }
+ }
+
+ // 6. If needDefaults is true and defaults is either "date" or "all", then
+ if (needs_default) {
+ bool default_is_all = strcmp(defaults, "all") == 0;
+ if (default_is_all || (strcmp(defaults, "date") == 0)) {
+ // a. For each of the property names "year", "month", "day", do
+ // i. Perform ? CreateDataPropertyOrThrow(options, prop, "numeric").
+ for (auto& prop : {"year", "month", "day"}) {
+ MAYBE_RETURN(
+ JSReceiver::CreateDataProperty(
+ isolate, options, factory->NewStringFromAsciiChecked(prop),
+ factory->numeric_string(), kThrowOnError),
+ Handle<JSObject>());
+ }
+ }
+ // 7. If needDefaults is true and defaults is either "time" or "all", then
+ if (default_is_all || (strcmp(defaults, "time") == 0)) {
+ // a. For each of the property names "hour", "minute", "second", do
+ // i. Perform ? CreateDataPropertyOrThrow(options, prop, "numeric").
+ for (auto& prop : {"hour", "minute", "second"}) {
+ MAYBE_RETURN(
+ JSReceiver::CreateDataProperty(
+ isolate, options, factory->NewStringFromAsciiChecked(prop),
+ factory->numeric_string(), kThrowOnError),
+ Handle<JSObject>());
+ }
+ }
+ }
+ // 8. Return options.
+ return options;
+}
+
std::set<std::string> Intl::GetAvailableLocales(const IcuService& service) {
const icu::Locale* icu_available_locales = nullptr;
int32_t count = 0;
@@ -1170,6 +1087,14 @@ std::set<std::string> Intl::GetAvailableLocales(const IcuService& service) {
std::inserter(locales, locales.begin()));
return locales;
}
+ case IcuService::kListFormatter: {
+ // TODO(ftang): for now just use
+ // icu::Locale::getAvailableLocales(count) until we migrate to
+ // Intl::GetAvailableLocales().
+ // ICU FR at https://unicode-org.atlassian.net/browse/ICU-20015
+ icu_available_locales = icu::Locale::getAvailableLocales(count);
+ break;
+ }
}
UErrorCode error = U_ZERO_ERROR;
@@ -1198,6 +1123,65 @@ std::set<std::string> Intl::GetAvailableLocales(const IcuService& service) {
return locales;
}
+IcuService Intl::StringToIcuService(Handle<String> service) {
+ if (service->IsUtf8EqualTo(CStrVector("collator"))) {
+ return IcuService::kCollator;
+ } else if (service->IsUtf8EqualTo(CStrVector("numberformat"))) {
+ return IcuService::kNumberFormat;
+ } else if (service->IsUtf8EqualTo(CStrVector("dateformat"))) {
+ return IcuService::kDateFormat;
+ } else if (service->IsUtf8EqualTo(CStrVector("breakiterator"))) {
+ return IcuService::kBreakIterator;
+ } else if (service->IsUtf8EqualTo(CStrVector("pluralrules"))) {
+ return IcuService::kPluralRules;
+ } else if (service->IsUtf8EqualTo(CStrVector("relativetimeformat"))) {
+ return IcuService::kRelativeDateTimeFormatter;
+ } else if (service->IsUtf8EqualTo(CStrVector("listformat"))) {
+ return IcuService::kListFormatter;
+ }
+ UNREACHABLE();
+}
+
+V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> Intl::AvailableLocalesOf(
+ Isolate* isolate, Handle<String> service) {
+ Factory* factory = isolate->factory();
+ std::set<std::string> results =
+ Intl::GetAvailableLocales(StringToIcuService(service));
+ Handle<JSObject> locales = factory->NewJSObjectWithNullProto();
+
+ int32_t i = 0;
+ for (auto iter = results.begin(); iter != results.end(); ++iter) {
+ RETURN_ON_EXCEPTION(
+ isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ locales, factory->NewStringFromAsciiChecked(iter->c_str()),
+ factory->NewNumber(i++), NONE),
+ JSObject);
+ }
+ return locales;
+}
+
+std::string Intl::DefaultLocale(Isolate* isolate) {
+ if (isolate->default_locale().empty()) {
+ icu::Locale default_locale;
+ // Translate ICU's fallback locale to a well-known locale.
+ if (strcmp(default_locale.getName(), "en_US_POSIX") == 0) {
+ isolate->set_default_locale("en-US");
+ } else {
+ // Set the locale
+ char result[ULOC_FULLNAME_CAPACITY];
+ UErrorCode status = U_ZERO_ERROR;
+ int32_t length =
+ uloc_toLanguageTag(default_locale.getName(), result,
+ ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ isolate->set_default_locale(
+ U_SUCCESS(status) ? std::string(result, length) : "und");
+ }
+ DCHECK(!isolate->default_locale().empty());
+ }
+ return isolate->default_locale();
+}
+
bool Intl::IsObjectOfType(Isolate* isolate, Handle<Object> input,
Intl::Type expected_type) {
if (!input->IsJSObject()) return false;
@@ -1260,6 +1244,9 @@ MaybeHandle<JSObject> Intl::UnwrapReceiver(Isolate* isolate,
Intl::Type type,
Handle<String> method_name,
bool check_legacy_constructor) {
+ DCHECK(type == Intl::Type::kCollator || type == Intl::Type::kNumberFormat ||
+ type == Intl::Type::kDateTimeFormat ||
+ type == Intl::Type::kBreakIterator);
Handle<Object> new_receiver = receiver;
if (check_legacy_constructor) {
ASSIGN_RETURN_ON_EXCEPTION(
@@ -1267,6 +1254,20 @@ MaybeHandle<JSObject> Intl::UnwrapReceiver(Isolate* isolate,
LegacyUnwrapReceiver(isolate, receiver, constructor, type), JSObject);
}
+ // Collator has been ported to use regular instance types. We
+ // shouldn't be using Intl::IsObjectOfType anymore.
+ if (type == Intl::Type::kCollator) {
+ if (!receiver->IsJSCollator()) {
+ // 3. a. Throw a TypeError exception.
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ method_name, receiver),
+ JSObject);
+ }
+ return Handle<JSCollator>::cast(receiver);
+ }
+
+ DCHECK_NE(type, Intl::Type::kCollator);
// 3. If Type(new_receiver) is not Object or nf does not have an
// [[Initialized...]] internal slot, then
if (!Intl::IsObjectOfType(isolate, new_receiver, type)) {
@@ -1296,10 +1297,10 @@ MaybeHandle<JSObject> NumberFormat::Unwrap(Isolate* isolate,
Intl::Type::kNumberFormat, method_name_str, true);
}
-MaybeHandle<Object> NumberFormat::FormatNumber(
+MaybeHandle<String> NumberFormat::FormatNumber(
Isolate* isolate, Handle<JSObject> number_format_holder, double value) {
icu::DecimalFormat* number_format =
- NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
+ NumberFormat::UnpackNumberFormat(number_format_holder);
CHECK_NOT_NULL(number_format);
icu::UnicodeString result;
@@ -1309,60 +1310,100 @@ MaybeHandle<Object> NumberFormat::FormatNumber(
reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
}
+void Intl::DefineWEProperty(Isolate* isolate, Handle<JSObject> target,
+ Handle<Name> key, Handle<Object> value) {
+ PropertyDescriptor desc;
+ desc.set_writable(true);
+ desc.set_enumerable(true);
+ desc.set_value(value);
+ Maybe<bool> success =
+ JSReceiver::DefineOwnProperty(isolate, target, key, &desc, kDontThrow);
+ DCHECK(success.IsJust() && success.FromJust());
+ USE(success);
+}
+
namespace {
-// TODO(bstell): Make all these a constexpr on the Intl class.
+// Define general regexp macros.
+// Note "(?:" means the regexp group a non-capture group.
+#define REGEX_ALPHA "[a-z]"
+#define REGEX_DIGIT "[0-9]"
+#define REGEX_ALPHANUM "(?:" REGEX_ALPHA "|" REGEX_DIGIT ")"
+
void BuildLanguageTagRegexps(Isolate* isolate) {
- std::string alpha = "[a-zA-Z]";
- std::string digit = "[0-9]";
- std::string alphanum = "(" + alpha + "|" + digit + ")";
- std::string regular =
- "(art-lojban|cel-gaulish|no-bok|no-nyn|zh-guoyu|zh-hakka|"
- "zh-min|zh-min-nan|zh-xiang)";
- std::string irregular =
- "(en-GB-oed|i-ami|i-bnn|i-default|i-enochian|i-hak|"
- "i-klingon|i-lux|i-mingo|i-navajo|i-pwn|i-tao|i-tay|"
- "i-tsu|sgn-BE-FR|sgn-BE-NL|sgn-CH-DE)";
- std::string grandfathered = "(" + irregular + "|" + regular + ")";
- std::string private_use = "(x(-" + alphanum + "{1,8})+)";
-
- std::string singleton = "(" + digit + "|[A-WY-Za-wy-z])";
- std::string language_singleton_regexp = "^" + singleton + "$";
-
- std::string extension = "(" + singleton + "(-" + alphanum + "{2,8})+)";
-
- std::string variant = "(" + alphanum + "{5,8}|(" + digit + alphanum + "{3}))";
- std::string language_variant_regexp = "^" + variant + "$";
-
- std::string region = "(" + alpha + "{2}|" + digit + "{3})";
- std::string script = "(" + alpha + "{4})";
- std::string ext_lang = "(" + alpha + "{3}(-" + alpha + "{3}){0,2})";
- std::string language = "(" + alpha + "{2,3}(-" + ext_lang + ")?|" + alpha +
- "{4}|" + alpha + "{5,8})";
- std::string lang_tag = language + "(-" + script + ")?(-" + region + ")?(-" +
- variant + ")*(-" + extension + ")*(-" + private_use +
- ")?";
-
- std::string language_tag =
- "^(" + lang_tag + "|" + private_use + "|" + grandfathered + ")$";
- std::string language_tag_regexp = std::string(language_tag);
+// Define the language tag regexp macros.
+// For info on BCP 47 see https://tools.ietf.org/html/bcp47 .
+// Because language tags are case insensitive per BCP 47 2.1.1 and regexp's
+// defined below will always be used after lowercasing the input, uppercase
+// ranges in BCP 47 2.1 are dropped and grandfathered tags are all lowercased.
+// clang-format off
+#define BCP47_REGULAR \
+ "(?:art-lojban|cel-gaulish|no-bok|no-nyn|zh-guoyu|zh-hakka|" \
+ "zh-min|zh-min-nan|zh-xiang)"
+#define BCP47_IRREGULAR \
+ "(?:en-gb-oed|i-ami|i-bnn|i-default|i-enochian|i-hak|" \
+ "i-klingon|i-lux|i-mingo|i-navajo|i-pwn|i-tao|i-tay|" \
+ "i-tsu|sgn-be-fr|sgn-be-nl|sgn-ch-de)"
+#define BCP47_GRANDFATHERED "(?:" BCP47_IRREGULAR "|" BCP47_REGULAR ")"
+#define BCP47_PRIVATE_USE "(?:x(?:-" REGEX_ALPHANUM "{1,8})+)"
+
+#define BCP47_SINGLETON "(?:" REGEX_DIGIT "|" "[a-wy-z])"
+
+#define BCP47_EXTENSION "(?:" BCP47_SINGLETON "(?:-" REGEX_ALPHANUM "{2,8})+)"
+#define BCP47_VARIANT \
+ "(?:" REGEX_ALPHANUM "{5,8}" "|" "(?:" REGEX_DIGIT REGEX_ALPHANUM "{3}))"
+
+#define BCP47_REGION "(?:" REGEX_ALPHA "{2}" "|" REGEX_DIGIT "{3})"
+#define BCP47_SCRIPT "(?:" REGEX_ALPHA "{4})"
+#define BCP47_EXT_LANG "(?:" REGEX_ALPHA "{3}(?:-" REGEX_ALPHA "{3}){0,2})"
+#define BCP47_LANGUAGE "(?:" REGEX_ALPHA "{2,3}(?:-" BCP47_EXT_LANG ")?" \
+ "|" REGEX_ALPHA "{4}" "|" REGEX_ALPHA "{5,8})"
+#define BCP47_LANG_TAG \
+ BCP47_LANGUAGE \
+ "(?:-" BCP47_SCRIPT ")?" \
+ "(?:-" BCP47_REGION ")?" \
+ "(?:-" BCP47_VARIANT ")*" \
+ "(?:-" BCP47_EXTENSION ")*" \
+ "(?:-" BCP47_PRIVATE_USE ")?"
+ // clang-format on
+
+ constexpr char kLanguageTagSingletonRegexp[] = "^" BCP47_SINGLETON "$";
+ constexpr char kLanguageTagVariantRegexp[] = "^" BCP47_VARIANT "$";
+ constexpr char kLanguageTagRegexp[] =
+ "^(?:" BCP47_LANG_TAG "|" BCP47_PRIVATE_USE "|" BCP47_GRANDFATHERED ")$";
UErrorCode status = U_ZERO_ERROR;
icu::RegexMatcher* language_singleton_regexp_matcher = new icu::RegexMatcher(
- icu::UnicodeString::fromUTF8(language_singleton_regexp), 0, status);
- CHECK(U_SUCCESS(status));
+ icu::UnicodeString(kLanguageTagSingletonRegexp, -1, US_INV), 0, status);
icu::RegexMatcher* language_tag_regexp_matcher = new icu::RegexMatcher(
- icu::UnicodeString::fromUTF8(language_tag_regexp), 0, status);
- CHECK(U_SUCCESS(status));
+ icu::UnicodeString(kLanguageTagRegexp, -1, US_INV), 0, status);
icu::RegexMatcher* language_variant_regexp_matcher = new icu::RegexMatcher(
- icu::UnicodeString::fromUTF8(language_variant_regexp), 0, status);
+ icu::UnicodeString(kLanguageTagVariantRegexp, -1, US_INV), 0, status);
CHECK(U_SUCCESS(status));
isolate->set_language_tag_regexp_matchers(language_singleton_regexp_matcher,
language_tag_regexp_matcher,
language_variant_regexp_matcher);
+// Undefine the language tag regexp macros.
+#undef BCP47_EXTENSION
+#undef BCP47_EXT_LANG
+#undef BCP47_GRANDFATHERED
+#undef BCP47_IRREGULAR
+#undef BCP47_LANG_TAG
+#undef BCP47_LANGUAGE
+#undef BCP47_PRIVATE_USE
+#undef BCP47_REGION
+#undef BCP47_REGULAR
+#undef BCP47_SCRIPT
+#undef BCP47_SINGLETON
+#undef BCP47_VARIANT
}
+// Undefine the general regexp macros.
+#undef REGEX_ALPHA
+#undef REGEX_DIGIT
+#undef REGEX_ALPHANUM
+
icu::RegexMatcher* GetLanguageSingletonRegexMatcher(Isolate* isolate) {
icu::RegexMatcher* language_singleton_regexp_matcher =
isolate->language_singleton_regexp_matcher();
@@ -1406,8 +1447,7 @@ MaybeHandle<JSObject> Intl::ResolveLocale(Isolate* isolate, const char* service,
Handle<JSFunction> resolve_locale_function = isolate->resolve_locale();
Handle<Object> result;
- Handle<Object> undefined_value(ReadOnlyRoots(isolate).undefined_value(),
- isolate);
+ Handle<Object> undefined_value = isolate->factory()->undefined_value();
Handle<Object> args[] = {service_str, requestedLocales, options};
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result,
@@ -1418,6 +1458,23 @@ MaybeHandle<JSObject> Intl::ResolveLocale(Isolate* isolate, const char* service,
return Handle<JSObject>::cast(result);
}
+MaybeHandle<JSObject> Intl::CanonicalizeLocaleListJS(Isolate* isolate,
+ Handle<Object> locales) {
+ Handle<JSFunction> canonicalize_locale_list_function =
+ isolate->canonicalize_locale_list();
+
+ Handle<Object> result;
+ Handle<Object> undefined_value = isolate->factory()->undefined_value();
+ Handle<Object> args[] = {locales};
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, canonicalize_locale_list_function,
+ undefined_value, arraysize(args), args),
+ JSObject);
+
+ return Handle<JSObject>::cast(result);
+}
+
Maybe<bool> Intl::GetStringOption(Isolate* isolate, Handle<JSReceiver> options,
const char* property,
std::vector<const char*> values,
@@ -1494,9 +1551,7 @@ V8_WARN_UNUSED_RESULT Maybe<bool> Intl::GetBoolOption(
return Just(false);
}
-// TODO(bstell): enable this anonymous namespace once
-// IsStructurallyValidLanguageTag called.
-// namespace {
+namespace {
char AsciiToLower(char c) {
if (c < 'A' || c > 'Z') {
@@ -1532,16 +1587,16 @@ bool IsStructurallyValidLanguageTag(Isolate* isolate,
GetLanguageTagRegexMatcher(isolate);
// Check if it's well-formed, including grandfathered tags.
- language_tag_regexp_matcher->reset(
- icu::UnicodeString(locale.c_str(), -1, US_INV));
+ icu::UnicodeString locale_uni(locale.c_str(), -1, US_INV);
+ // Note: icu::RegexMatcher::reset does not make a copy of the input string
+ // so cannot use a temp value; ie: cannot create it as a call parameter.
+ language_tag_regexp_matcher->reset(locale_uni);
UErrorCode status = U_ZERO_ERROR;
bool is_valid_lang_tag = language_tag_regexp_matcher->matches(status);
if (!is_valid_lang_tag || V8_UNLIKELY(U_FAILURE(status))) {
return false;
}
- std::transform(locale.begin(), locale.end(), locale.begin(), AsciiToLower);
-
// Just return if it's a x- form. It's all private.
if (locale.find("x-") == 0) {
return true;
@@ -1574,33 +1629,31 @@ bool IsStructurallyValidLanguageTag(Isolate* isolate,
std::vector<std::string> variants;
std::vector<std::string> extensions;
- for (const auto& value : parts) {
- language_variant_regexp_matcher->reset(
- icu::UnicodeString::fromUTF8(value.c_str()));
+ for (auto it = parts.begin() + 1; it != parts.end(); it++) {
+ icu::UnicodeString part(it->data(), -1, US_INV);
+ language_variant_regexp_matcher->reset(part);
bool is_language_variant = language_variant_regexp_matcher->matches(status);
if (V8_UNLIKELY(U_FAILURE(status))) {
return false;
}
if (is_language_variant && extensions.size() == 0) {
- if (std::find(variants.begin(), variants.end(), value) ==
- variants.end()) {
- variants.push_back(value);
+ if (std::find(variants.begin(), variants.end(), *it) == variants.end()) {
+ variants.push_back(*it);
} else {
return false;
}
}
- language_singleton_regexp_matcher->reset(
- icu::UnicodeString(value.c_str(), -1, US_INV));
+ language_singleton_regexp_matcher->reset(part);
bool is_language_singleton =
language_singleton_regexp_matcher->matches(status);
if (V8_UNLIKELY(U_FAILURE(status))) {
return false;
}
if (is_language_singleton) {
- if (std::find(extensions.begin(), extensions.end(), value) ==
+ if (std::find(extensions.begin(), extensions.end(), *it) ==
extensions.end()) {
- extensions.push_back(value);
+ extensions.push_back(*it);
} else {
return false;
}
@@ -1609,7 +1662,807 @@ bool IsStructurallyValidLanguageTag(Isolate* isolate,
return true;
}
-// } // anonymous namespace
+
+bool IsLowerAscii(char c) { return c >= 'a' && c < 'z'; }
+
+bool IsTwoLetterLanguage(const std::string& locale) {
+ // Two letters, both in range 'a'-'z'...
+ return locale.length() == 2 && IsLowerAscii(locale[0]) &&
+ IsLowerAscii(locale[1]);
+}
+
+bool IsDeprecatedLanguage(const std::string& locale) {
+ // Check if locale is one of the deprecated language tags:
+ return locale == "in" || locale == "iw" || locale == "ji" || locale == "jw";
+}
+
+// Reference:
+// https://www.iana.org/assignments/language-subtag-registry/language-subtag-registry
+bool IsGrandfatheredTagWithoutPreferredVaule(const std::string& locale) {
+ if (V8_UNLIKELY(locale == "zh-min" || locale == "cel-gaulish")) return true;
+ if (locale.length() > 6 /* i-mingo is 7 chars long */ &&
+ V8_UNLIKELY(locale[0] == 'i' && locale[1] == '-')) {
+ return locale.substr(2) == "default" || locale.substr(2) == "enochian" ||
+ locale.substr(2) == "mingo";
+ }
+ return false;
+}
+
+} // anonymous namespace
+
+Maybe<std::string> Intl::CanonicalizeLanguageTag(Isolate* isolate,
+ Handle<Object> locale_in) {
+ Handle<String> locale_str;
+ // This does part of the validity checking spec'ed in CanonicalizeLocaleList:
+ // 7c ii. If Type(kValue) is not String or Object, throw a TypeError
+ // exception.
+ // 7c iii. Let tag be ? ToString(kValue).
+ // 7c iv. If IsStructurallyValidLanguageTag(tag) is false, throw a
+ // RangeError exception.
+
+ if (locale_in->IsString()) {
+ locale_str = Handle<String>::cast(locale_in);
+ } else if (locale_in->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, locale_str,
+ Object::ToString(isolate, locale_in),
+ Nothing<std::string>());
+ } else {
+ THROW_NEW_ERROR_RETURN_VALUE(isolate,
+ NewTypeError(MessageTemplate::kLanguageID),
+ Nothing<std::string>());
+ }
+ std::string locale(locale_str->ToCString().get());
+
+ // Optimize for the most common case: a 2-letter language code in the
+ // canonical form/lowercase that is not one of the deprecated codes
+ // (in, iw, ji, jw). Don't check for ~70 of 3-letter deprecated language
+ // codes. Instead, let them be handled by ICU in the slow path. However,
+ // fast-track 'fil' (3-letter canonical code).
+ if ((IsTwoLetterLanguage(locale) && !IsDeprecatedLanguage(locale)) ||
+ locale == "fil") {
+ return Just(locale);
+ }
+
+ // Because per BCP 47 2.1.1 language tags are case-insensitive, lowercase
+ // the input before any more check.
+ std::transform(locale.begin(), locale.end(), locale.begin(), AsciiToLower);
+ if (!IsStructurallyValidLanguageTag(isolate, locale)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidLanguageTag, locale_str),
+ Nothing<std::string>());
+ }
+
+ // ICU maps a few grandfathered tags to what looks like a regular language
+ // tag even though IANA language tag registry does not have a preferred
+ // entry map for them. Return them as they're with lowercasing.
+ if (IsGrandfatheredTagWithoutPreferredVaule(locale)) {
+ return Just(locale);
+ }
+
+ // // ECMA 402 6.2.3
+ // TODO(jshin): uloc_{for,to}TanguageTag can fail even for a structually valid
+ // language tag if it's too long (much longer than 100 chars). Even if we
+ // allocate a longer buffer, ICU will still fail if it's too long. Either
+ // propose to Ecma 402 to put a limit on the locale length or change ICU to
+ // handle long locale names better. See
+ // https://unicode-org.atlassian.net/browse/ICU-13417
+ UErrorCode error = U_ZERO_ERROR;
+ char icu_result[ULOC_FULLNAME_CAPACITY];
+ uloc_forLanguageTag(locale.c_str(), icu_result, ULOC_FULLNAME_CAPACITY,
+ nullptr, &error);
+ if (U_FAILURE(error) || error == U_STRING_NOT_TERMINATED_WARNING) {
+ // TODO(jshin): This should not happen because the structural validity
+ // is already checked. If that's the case, remove this.
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidLanguageTag, locale_str),
+ Nothing<std::string>());
+ }
+
+ // Force strict BCP47 rules.
+ char result[ULOC_FULLNAME_CAPACITY];
+ int32_t result_len = uloc_toLanguageTag(icu_result, result,
+ ULOC_FULLNAME_CAPACITY, TRUE, &error);
+
+ if (U_FAILURE(error)) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidLanguageTag, locale_str),
+ Nothing<std::string>());
+ }
+
+ return Just(std::string(result, result_len));
+}
+
+Maybe<std::vector<std::string>> Intl::CanonicalizeLocaleList(
+ Isolate* isolate, Handle<Object> locales, bool only_return_one_result) {
+ // 1. If locales is undefined, then
+ if (locales->IsUndefined(isolate)) {
+ // 1a. Return a new empty List.
+ return Just(std::vector<std::string>());
+ }
+ // 2. Let seen be a new empty List.
+ std::vector<std::string> seen;
+ // 3. If Type(locales) is String, then
+ if (locales->IsString()) {
+ // 3a. Let O be CreateArrayFromList(« locales »).
+ // Instead of creating a one-element array and then iterating over it,
+ // we inline the body of the iteration:
+ std::string canonicalized_tag;
+ if (!CanonicalizeLanguageTag(isolate, locales).To(&canonicalized_tag)) {
+ return Nothing<std::vector<std::string>>();
+ }
+ seen.push_back(canonicalized_tag);
+ return Just(seen);
+ }
+ // 4. Else,
+ // 4a. Let O be ? ToObject(locales).
+ Handle<JSReceiver> o;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, o,
+ Object::ToObject(isolate, locales),
+ Nothing<std::vector<std::string>>());
+ // 5. Let len be ? ToLength(? Get(O, "length")).
+ Handle<Object> length_obj;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, length_obj,
+ Object::GetLengthFromArrayLike(isolate, o),
+ Nothing<std::vector<std::string>>());
+ // TODO(jkummerow): Spec violation: strictly speaking, we have to iterate
+ // up to 2^53-1 if {length_obj} says so. Since cases above 2^32 probably
+ // don't happen in practice (and would be very slow if they do), we'll keep
+ // the code simple for now by using a saturating to-uint32 conversion.
+ double raw_length = length_obj->Number();
+ uint32_t len =
+ raw_length >= kMaxUInt32 ? kMaxUInt32 : static_cast<uint32_t>(raw_length);
+ // 6. Let k be 0.
+ // 7. Repeat, while k < len
+ for (uint32_t k = 0; k < len; k++) {
+ // 7a. Let Pk be ToString(k).
+ // 7b. Let kPresent be ? HasProperty(O, Pk).
+ LookupIterator it(isolate, o, k);
+ // 7c. If kPresent is true, then
+ if (!it.IsFound()) continue;
+ // 7c i. Let kValue be ? Get(O, Pk).
+ Handle<Object> k_value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, k_value, Object::GetProperty(&it),
+ Nothing<std::vector<std::string>>());
+ // 7c ii. If Type(kValue) is not String or Object, throw a TypeError
+ // exception.
+ // 7c iii. Let tag be ? ToString(kValue).
+ // 7c iv. If IsStructurallyValidLanguageTag(tag) is false, throw a
+ // RangeError exception.
+ // 7c v. Let canonicalizedTag be CanonicalizeLanguageTag(tag).
+ std::string canonicalized_tag;
+ if (!CanonicalizeLanguageTag(isolate, k_value).To(&canonicalized_tag)) {
+ return Nothing<std::vector<std::string>>();
+ }
+ // 7c vi. If canonicalizedTag is not an element of seen, append
+ // canonicalizedTag as the last element of seen.
+ if (std::find(seen.begin(), seen.end(), canonicalized_tag) == seen.end()) {
+ seen.push_back(canonicalized_tag);
+ }
+ // 7d. Increase k by 1. (See loop header.)
+ // Optimization: some callers only need one result.
+ if (only_return_one_result) return Just(seen);
+ }
+ // 8. Return seen.
+ return Just(seen);
+}
+
+// ecma-402/#sec-currencydigits
+Handle<Smi> Intl::CurrencyDigits(Isolate* isolate, Handle<String> currency) {
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8::String::Value currency_string(v8_isolate, v8::Utils::ToLocal(currency));
+ CHECK_NOT_NULL(*currency_string);
+
+ DisallowHeapAllocation no_gc;
+ UErrorCode status = U_ZERO_ERROR;
+ uint32_t fraction_digits = ucurr_getDefaultFractionDigits(
+ reinterpret_cast<const UChar*>(*currency_string), &status);
+ // For missing currency codes, default to the most common, 2
+ if (U_FAILURE(status)) fraction_digits = 2;
+ return Handle<Smi>(Smi::FromInt(fraction_digits), isolate);
+}
+
+MaybeHandle<JSObject> Intl::CreateNumberFormat(Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved) {
+ Handle<JSFunction> constructor(
+ isolate->native_context()->intl_number_format_function(), isolate);
+
+ Handle<JSObject> local_object;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, local_object,
+ JSObject::New(constructor, constructor), JSObject);
+
+ // Set number formatter as embedder field of the resulting JS object.
+ icu::DecimalFormat* number_format =
+ NumberFormat::InitializeNumberFormat(isolate, locale, options, resolved);
+
+ CHECK_NOT_NULL(number_format);
+
+ local_object->SetEmbedderField(NumberFormat::kDecimalFormatIndex,
+ reinterpret_cast<Smi*>(number_format));
+
+ Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
+ GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
+ NumberFormat::DeleteNumberFormat,
+ WeakCallbackType::kInternalFields);
+ return local_object;
+}
+
+/**
+ * Parses Unicode extension into key - value map.
+ * Returns empty object if the extension string is invalid.
+ * We are not concerned with the validity of the values at this point.
+ * 'attribute' in RFC 6047 is not supported. Keys without explicit
+ * values are assigned UNDEFINED.
+ * TODO(jshin): Fix the handling of 'attribute' (in RFC 6047, but none
+ * has been defined so that it's not used) and boolean keys without
+ * an explicit value.
+ */
+void Intl::ParseExtension(Isolate* isolate, const std::string& extension,
+ std::map<std::string, std::string>& out) {
+ if (extension.compare(0, 3, "-u-") != 0) return;
+
+ // Key is {2}alphanum, value is {3,8}alphanum.
+ // Some keys may not have explicit values (booleans).
+ std::string key;
+ std::string value;
+ // Skip the "-u-".
+ size_t start = 3;
+ size_t end;
+ do {
+ end = extension.find("-", start);
+ size_t length =
+ (end == std::string::npos) ? extension.length() - start : end - start;
+ std::string element = extension.substr(start, length);
+ // Key is {2}alphanum
+ if (length == 2) {
+ if (!key.empty()) {
+ out.insert(std::pair<std::string, std::string>(key, value));
+ value.clear();
+ }
+ key = element;
+ // value is {3,8}alphanum.
+ } else if (length >= 3 && length <= 8 && !key.empty()) {
+ value = value.empty() ? element : (value + "-" + element);
+ } else {
+ return;
+ }
+ start = end + 1;
+ } while (end != std::string::npos);
+ if (!key.empty()) out.insert(std::pair<std::string, std::string>(key, value));
+}
+
+namespace {
+
+bool IsAToZ(char ch) {
+ return IsInRange(AsciiAlphaToLower(ch), 'a', 'z');
+}
+
+} // namespace
+
+// Verifies that the input is a well-formed ISO 4217 currency code.
+// ecma402/#sec-currency-codes
+bool Intl::IsWellFormedCurrencyCode(Isolate* isolate, Handle<String> currency) {
+ // 2. If the number of elements in normalized is not 3, return false.
+ if (currency->length() != 3) return false;
+
+ currency = String::Flatten(isolate, currency);
+ {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = currency->GetFlatContent();
+
+ // 1. Let normalized be the result of mapping currency to upper case as
+ // described in 6.1. 3. If normalized contains any character that is not in
+ // the range "A" to "Z" (U+0041 to U+005A), return false. 4. Return true.
+ // Don't uppercase to test. It could convert invalid code into a valid one.
+ // For example \u00DFP (Eszett+P) becomes SSP.
+ return (IsAToZ(flat.Get(0)) && IsAToZ(flat.Get(1)) && IsAToZ(flat.Get(2)));
+ }
+}
+
+// ecma402 #sup-string.prototype.tolocalelowercase
+// ecma402 #sup-string.prototype.tolocaleuppercase
+MaybeHandle<String> Intl::StringLocaleConvertCase(Isolate* isolate,
+ Handle<String> s,
+ bool to_upper,
+ Handle<Object> locales) {
+ std::vector<std::string> requested_locales;
+ if (!CanonicalizeLocaleList(isolate, locales, true).To(&requested_locales)) {
+ return MaybeHandle<String>();
+ }
+ std::string requested_locale = requested_locales.size() == 0
+ ? Intl::DefaultLocale(isolate)
+ : requested_locales[0];
+ size_t dash = requested_locale.find("-");
+ if (dash != std::string::npos) {
+ requested_locale = requested_locale.substr(0, dash);
+ }
+
+ // Primary language tag can be up to 8 characters long in theory.
+ // https://tools.ietf.org/html/bcp47#section-2.2.1
+ DCHECK_LE(requested_locale.length(), 8);
+ s = String::Flatten(isolate, s);
+
+ // All the languages requiring special-handling have two-letter codes.
+ // Note that we have to check for '!= 2' here because private-use language
+ // tags (x-foo) or grandfathered irregular tags (e.g. i-enochian) would have
+ // only 'x' or 'i' when they get here.
+ if (V8_UNLIKELY(requested_locale.length() != 2)) {
+ return ConvertCase(s, to_upper, isolate);
+ }
+ // TODO(jshin): Consider adding a fast path for ASCII or Latin-1. The fastpath
+ // in the root locale needs to be adjusted for az, lt and tr because even case
+ // mapping of ASCII range characters are different in those locales.
+ // Greek (el) does not require any adjustment.
+ if (V8_UNLIKELY((requested_locale == "tr") || (requested_locale == "el") ||
+ (requested_locale == "lt") || (requested_locale == "az"))) {
+ return LocaleConvertCase(s, isolate, to_upper, requested_locale.c_str());
+ } else {
+ return ConvertCase(s, to_upper, isolate);
+ }
+}
+
+MaybeHandle<Object> Intl::StringLocaleCompare(Isolate* isolate,
+ Handle<String> string1,
+ Handle<String> string2,
+ Handle<Object> locales,
+ Handle<Object> options) {
+ Factory* factory = isolate->factory();
+ Handle<JSObject> collator;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, collator,
+ CachedOrNewService(isolate, factory->NewStringFromStaticChars("collator"),
+ locales, options, factory->undefined_value()),
+ Object);
+ CHECK(collator->IsJSCollator());
+ return Intl::CompareStrings(isolate, Handle<JSCollator>::cast(collator),
+ string1, string2);
+}
+
+// ecma402/#sec-collator-comparestrings
+Handle<Object> Intl::CompareStrings(Isolate* isolate,
+ Handle<JSCollator> collator,
+ Handle<String> string1,
+ Handle<String> string2) {
+ Factory* factory = isolate->factory();
+ icu::Collator* icu_collator = collator->icu_collator()->raw();
+ CHECK_NOT_NULL(icu_collator);
+
+ string1 = String::Flatten(isolate, string1);
+ string2 = String::Flatten(isolate, string2);
+
+ UCollationResult result;
+ UErrorCode status = U_ZERO_ERROR;
+ {
+ DisallowHeapAllocation no_gc;
+ int32_t length1 = string1->length();
+ int32_t length2 = string2->length();
+ String::FlatContent flat1 = string1->GetFlatContent();
+ String::FlatContent flat2 = string2->GetFlatContent();
+ std::unique_ptr<uc16[]> sap1;
+ std::unique_ptr<uc16[]> sap2;
+ icu::UnicodeString string_val1(
+ FALSE, GetUCharBufferFromFlat(flat1, &sap1, length1), length1);
+ icu::UnicodeString string_val2(
+ FALSE, GetUCharBufferFromFlat(flat2, &sap2, length2), length2);
+ result = icu_collator->compare(string_val1, string_val2, status);
+ }
+ DCHECK(U_SUCCESS(status));
+
+ return factory->NewNumberFromInt(result);
+}
+
+// ecma402/#sup-properties-of-the-number-prototype-object
+MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
+ Handle<Object> num,
+ Handle<Object> locales,
+ Handle<Object> options) {
+ Factory* factory = isolate->factory();
+ Handle<JSObject> number_format_holder;
+ // 2. Let numberFormat be ? Construct(%NumberFormat%, « locales, options »).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, number_format_holder,
+ CachedOrNewService(isolate,
+ factory->NewStringFromStaticChars("numberformat"),
+ locales, options, factory->undefined_value()),
+ String);
+ DCHECK(
+ Intl::IsObjectOfType(isolate, number_format_holder, Intl::kNumberFormat));
+ Handle<Object> number_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, number_obj,
+ Object::ToNumber(isolate, num), String);
+
+ // Spec treats -0 and +0 as 0.
+ double number = number_obj->Number() + 0;
+ // Return FormatNumber(numberFormat, x).
+ return NumberFormat::FormatNumber(isolate, number_format_holder, number);
+}
+
+// ecma402/#sec-defaultnumberoption
+Maybe<int> Intl::DefaultNumberOption(Isolate* isolate, Handle<Object> value,
+ int min, int max, int fallback,
+ Handle<String> property) {
+ // 2. Else, return fallback.
+ if (value->IsUndefined()) return Just(fallback);
+
+ // 1. If value is not undefined, then
+ // a. Let value be ? ToNumber(value).
+ Handle<Object> value_num;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value_num, Object::ToNumber(isolate, value), Nothing<int>());
+ DCHECK(value_num->IsNumber());
+
+ // b. If value is NaN or less than minimum or greater than maximum, throw a
+ // RangeError exception.
+ if (value_num->IsNaN() || value_num->Number() < min ||
+ value_num->Number() > max) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(MessageTemplate::kPropertyValueOutOfRange, property),
+ Nothing<int>());
+ }
+
+ // The max and min arguments are integers and the above check makes
+ // sure that we are within the integer range making this double to
+ // int conversion safe.
+ //
+ // c. Return floor(value).
+ return Just(FastD2I(floor(value_num->Number())));
+}
+
+// ecma402/#sec-getnumberoption
+Maybe<int> Intl::GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
+ Handle<String> property, int min, int max,
+ int fallback) {
+ // 1. Let value be ? Get(options, property).
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value, JSReceiver::GetProperty(isolate, options, property),
+ Nothing<int>());
+
+ // Return ? DefaultNumberOption(value, minimum, maximum, fallback).
+ return DefaultNumberOption(isolate, value, min, max, fallback, property);
+}
+
+Maybe<int> Intl::GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
+ const char* property, int min, int max,
+ int fallback) {
+ Handle<String> property_str =
+ isolate->factory()->NewStringFromAsciiChecked(property);
+ return GetNumberOption(isolate, options, property_str, min, max, fallback);
+}
+
+Maybe<bool> Intl::SetNumberFormatDigitOptions(Isolate* isolate,
+ icu::DecimalFormat* number_format,
+ Handle<JSReceiver> options,
+ int mnfd_default,
+ int mxfd_default) {
+ CHECK_NOT_NULL(number_format);
+
+ // 5. Let mnid be ? GetNumberOption(options, "minimumIntegerDigits,", 1, 21,
+ // 1).
+ int mnid;
+ if (!GetNumberOption(isolate, options, "minimumIntegerDigits", 1, 21, 1)
+ .To(&mnid)) {
+ return Nothing<bool>();
+ }
+
+ // 6. Let mnfd be ? GetNumberOption(options, "minimumFractionDigits", 0, 20,
+ // mnfdDefault).
+ int mnfd;
+ if (!GetNumberOption(isolate, options, "minimumFractionDigits", 0, 20,
+ mnfd_default)
+ .To(&mnfd)) {
+ return Nothing<bool>();
+ }
+
+ // 7. Let mxfdActualDefault be max( mnfd, mxfdDefault ).
+ int mxfd_actual_default = std::max(mnfd, mxfd_default);
+
+ // 8. Let mxfd be ? GetNumberOption(options,
+ // "maximumFractionDigits", mnfd, 20, mxfdActualDefault).
+ int mxfd;
+ if (!GetNumberOption(isolate, options, "maximumFractionDigits", mnfd, 20,
+ mxfd_actual_default)
+ .To(&mxfd)) {
+ return Nothing<bool>();
+ }
+
+ // 9. Let mnsd be ? Get(options, "minimumSignificantDigits").
+ Handle<Object> mnsd_obj;
+ Handle<String> mnsd_str =
+ isolate->factory()->NewStringFromStaticChars("minimumSignificantDigits");
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, mnsd_obj, JSReceiver::GetProperty(isolate, options, mnsd_str),
+ Nothing<bool>());
+
+ // 10. Let mxsd be ? Get(options, "maximumSignificantDigits").
+ Handle<Object> mxsd_obj;
+ Handle<String> mxsd_str =
+ isolate->factory()->NewStringFromStaticChars("maximumSignificantDigits");
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, mxsd_obj, JSReceiver::GetProperty(isolate, options, mxsd_str),
+ Nothing<bool>());
+
+ // 11. Set intlObj.[[MinimumIntegerDigits]] to mnid.
+ number_format->setMinimumIntegerDigits(mnid);
+
+ // 12. Set intlObj.[[MinimumFractionDigits]] to mnfd.
+ number_format->setMinimumFractionDigits(mnfd);
+
+ // 13. Set intlObj.[[MaximumFractionDigits]] to mxfd.
+ number_format->setMaximumFractionDigits(mxfd);
+
+ bool significant_digits_used = false;
+ // 14. If mnsd is not undefined or mxsd is not undefined, then
+ if (!mnsd_obj->IsUndefined(isolate) || !mxsd_obj->IsUndefined(isolate)) {
+ // 14. a. Let mnsd be ? DefaultNumberOption(mnsd, 1, 21, 1).
+ int mnsd;
+ if (!DefaultNumberOption(isolate, mnsd_obj, 1, 21, 1, mnsd_str).To(&mnsd)) {
+ return Nothing<bool>();
+ }
+
+ // 14. b. Let mxsd be ? DefaultNumberOption(mxsd, mnsd, 21, 21).
+ int mxsd;
+ if (!DefaultNumberOption(isolate, mxsd_obj, mnsd, 21, 21, mxsd_str)
+ .To(&mxsd)) {
+ return Nothing<bool>();
+ }
+
+ significant_digits_used = true;
+
+ // 14. c. Set intlObj.[[MinimumSignificantDigits]] to mnsd.
+ number_format->setMinimumSignificantDigits(mnsd);
+
+ // 14. d. Set intlObj.[[MaximumSignificantDigits]] to mxsd.
+ number_format->setMaximumSignificantDigits(mxsd);
+ }
+
+ number_format->setSignificantDigitsUsed(significant_digits_used);
+ number_format->setRoundingMode(icu::DecimalFormat::kRoundHalfUp);
+ return Just(true);
+}
+
+namespace {
+
+// ECMA 402 9.2.2 BestAvailableLocale(availableLocales, locale)
+// https://tc39.github.io/ecma402/#sec-bestavailablelocale
+std::string BestAvailableLocale(std::set<std::string> available_locales,
+ std::string locale) {
+ const char separator = '-';
+
+ // 1. Let candidate be locale.
+ // 2. Repeat,
+ do {
+ // 2.a. If availableLocales contains an element equal to candidate, return
+ // candidate.
+ if (available_locales.find(locale) != available_locales.end()) {
+ return locale;
+ }
+ // 2.b. Let pos be the character index of the last occurrence of "-"
+ // (U+002D) within candidate. If that character does not occur, return
+ // undefined.
+ size_t pos = locale.rfind(separator);
+ if (pos == std::string::npos) {
+ return "";
+ }
+ // 2.c. If pos ≥ 2 and the character "-" occurs at index pos-2 of candidate,
+ // decrease pos by 2.
+ if (pos >= 2 && locale[pos - 2] == separator) {
+ pos -= 2;
+ }
+ // 2.d. Let candidate be the substring of candidate from position 0,
+ // inclusive, to position pos, exclusive.
+ locale = locale.substr(0, pos);
+ } while (true);
+}
+
+#define ANY_EXTENSION_REGEXP "-[a-z0-9]{1}-.*"
+
+std::unique_ptr<icu::RegexMatcher> GetAnyExtensionRegexpMatcher() {
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::RegexMatcher> matcher(new icu::RegexMatcher(
+ icu::UnicodeString(ANY_EXTENSION_REGEXP, -1, US_INV), 0, status));
+ DCHECK(U_SUCCESS(status));
+ return matcher;
+}
+
+#undef ANY_EXTENSION_REGEXP
+
+// ECMA 402 9.2.7 LookupSupportedLocales(availableLocales, requestedLocales)
+// https://tc39.github.io/ecma402/#sec-lookupsupportedlocales
+std::vector<std::string> LookupSupportedLocales(
+ std::set<std::string> available_locales,
+ std::vector<std::string> requested_locales) {
+ std::unique_ptr<icu::RegexMatcher> matcher = GetAnyExtensionRegexpMatcher();
+
+ // 1. Let subset be a new empty List.
+ std::vector<std::string> subset;
+
+ // 2. For each element locale of requestedLocales in List order, do
+ for (auto locale : requested_locales) {
+ // 2.a. Let noExtensionsLocale be the String value that is locale with all
+ // Unicode locale extension sequences removed.
+ icu::UnicodeString locale_uni(locale.c_str(), -1, US_INV);
+ // TODO(bstell): look at using uloc_forLanguageTag to convert the language
+ // tag to locale id
+ // TODO(bstell): look at using uloc_getBaseName to just get the name without
+ // all the keywords
+ matcher->reset(locale_uni);
+ UErrorCode status = U_ZERO_ERROR;
+ // TODO(bstell): need to determine if this is the correct behavior.
+ // This matches the JS implementation but might not match the spec.
+ // According to
+ // https://tc39.github.io/ecma402/#sec-unicode-locale-extension-sequences:
+ //
+ // This standard uses the term "Unicode locale extension sequence" for
+ // any substring of a language tag that is not part of a private use
+ // subtag sequence, starts with a separator "-" and the singleton "u",
+ // and includes the maximum sequence of following non-singleton subtags
+ // and their preceding "-" separators.
+ //
+ // According to the spec a locale "en-t-aaa-u-bbb-v-ccc-x-u-ddd", should
+ // remove only the "-u-bbb" part, and keep everything else, whereas this
+ // regexp matcher would leave only the "en".
+ icu::UnicodeString no_extensions_locale_uni =
+ matcher->replaceAll("", status);
+ DCHECK(U_SUCCESS(status));
+ std::string no_extensions_locale;
+ no_extensions_locale_uni.toUTF8String(no_extensions_locale);
+ // 2.b. Let availableLocale be BestAvailableLocale(availableLocales,
+ // noExtensionsLocale).
+ std::string available_locale =
+ BestAvailableLocale(available_locales, no_extensions_locale);
+ // 2.c. If availableLocale is not undefined, append locale to the end of
+ // subset.
+ if (!available_locale.empty()) {
+ subset.push_back(locale);
+ }
+ }
+
+ // 3. Return subset.
+ return subset;
+}
+
+// ECMA 402 9.2.8 BestFitSupportedLocales(availableLocales, requestedLocales)
+// https://tc39.github.io/ecma402/#sec-bestfitsupportedlocales
+std::vector<std::string> BestFitSupportedLocales(
+ std::set<std::string> available_locales,
+ std::vector<std::string> requested_locales) {
+ return LookupSupportedLocales(available_locales, requested_locales);
+}
+
+enum MatcherOption { kBestFit, kLookup };
+
+// TODO(bstell): should this be moved somewhere where it is reusable?
+// Implement steps 5, 6, 7 for ECMA 402 9.2.9 SupportedLocales
+// https://tc39.github.io/ecma402/#sec-supportedlocales
+MaybeHandle<JSObject> CreateReadOnlyArray(Isolate* isolate,
+ std::vector<std::string> elements) {
+ Factory* factory = isolate->factory();
+ if (elements.size() >= kMaxUInt32) {
+ THROW_NEW_ERROR(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayLength), JSObject);
+ }
+
+ PropertyAttributes attr =
+ static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+
+ // 5. Let subset be CreateArrayFromList(elements).
+ // 6. Let keys be subset.[[OwnPropertyKeys]]().
+ Handle<JSArray> subset = factory->NewJSArray(0);
+
+ // 7. For each element P of keys in List order, do
+ uint32_t length = static_cast<uint32_t>(elements.size());
+ for (uint32_t i = 0; i < length; i++) {
+ const std::string& part = elements[i];
+ Handle<String> value =
+ factory->NewStringFromUtf8(CStrVector(part.c_str())).ToHandleChecked();
+ JSObject::AddDataElement(subset, i, value, attr);
+ }
+
+ // 7.a. Let desc be PropertyDescriptor { [[Configurable]]: false,
+ // [[Writable]]: false }.
+ PropertyDescriptor desc;
+ desc.set_writable(false);
+ desc.set_configurable(false);
+
+ // 7.b. Perform ! DefinePropertyOrThrow(subset, P, desc).
+ JSArray::ArraySetLength(isolate, subset, &desc, kThrowOnError).ToChecked();
+ return subset;
+}
+
+// ECMA 402 9.2.9 SupportedLocales(availableLocales, requestedLocales, options)
+// https://tc39.github.io/ecma402/#sec-supportedlocales
+MaybeHandle<JSObject> SupportedLocales(
+ Isolate* isolate, std::string service,
+ std::set<std::string> available_locales,
+ std::vector<std::string> requested_locales, Handle<Object> options) {
+ std::vector<std::string> supported_locales;
+
+ // 1. If options is not undefined, then
+ // a. Let options be ? ToObject(options).
+ // b. Let matcher be ? GetOption(options, "localeMatcher", "string",
+ // « "lookup", "best fit" », "best fit").
+ // 2. Else, let matcher be "best fit".
+ MatcherOption matcher = kBestFit;
+ if (!options->IsUndefined(isolate)) {
+ Handle<JSReceiver> options_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options_obj,
+ Object::ToObject(isolate, options), JSObject);
+ std::unique_ptr<char[]> matcher_str = nullptr;
+ std::vector<const char*> matcher_values = {"lookup", "best fit"};
+ Maybe<bool> maybe_found_matcher =
+ Intl::GetStringOption(isolate, options_obj, "localeMatcher",
+ matcher_values, service.c_str(), &matcher_str);
+ MAYBE_RETURN(maybe_found_matcher, MaybeHandle<JSObject>());
+ if (maybe_found_matcher.FromJust()) {
+ DCHECK_NOT_NULL(matcher_str.get());
+ if (strcmp(matcher_str.get(), "lookup") == 0) {
+ matcher = kLookup;
+ }
+ }
+ }
+
+ // 3. If matcher is "best fit", then
+ // a. Let supportedLocales be BestFitSupportedLocales(availableLocales,
+ // requestedLocales).
+ if (matcher == kBestFit) {
+ supported_locales =
+ BestFitSupportedLocales(available_locales, requested_locales);
+ } else {
+ // 4. Else,
+ // a. Let supportedLocales be LookupSupportedLocales(availableLocales,
+ // requestedLocales).
+ DCHECK_EQ(matcher, kLookup);
+ supported_locales =
+ LookupSupportedLocales(available_locales, requested_locales);
+ }
+
+ // TODO(jkummerow): Possibly revisit why the spec has the individual entries
+ // readonly but the array is not frozen.
+ // https://github.com/tc39/ecma402/issues/258
+
+ // 5. Let subset be CreateArrayFromList(supportedLocales).
+ // 6. Let keys be subset.[[OwnPropertyKeys]]().
+ // 7. For each element P of keys in List order, do
+ // a. Let desc be PropertyDescriptor { [[Configurable]]: false,
+ // [[Writable]]: false }.
+ // b. Perform ! DefinePropertyOrThrow(subset, P, desc).
+ MaybeHandle<JSObject> subset =
+ CreateReadOnlyArray(isolate, supported_locales);
+
+ // 8. Return subset.
+ return subset;
+}
+} // namespace
+
+// ECMA 402 10.2.2 Intl.Collator.supportedLocalesOf
+// https://tc39.github.io/ecma402/#sec-intl.collator.supportedlocalesof
+// of Intl::SupportedLocalesOf thru JS
+MaybeHandle<JSObject> Intl::SupportedLocalesOf(Isolate* isolate,
+ Handle<String> service,
+ Handle<Object> locales_in,
+ Handle<Object> options_in) {
+ // Let availableLocales be %Collator%.[[AvailableLocales]].
+ IcuService icu_service = Intl::StringToIcuService(service);
+ std::set<std::string> available_locales = GetAvailableLocales(icu_service);
+ std::vector<std::string> requested_locales;
+ // Let requestedLocales be ? CanonicalizeLocaleList(locales).
+ bool got_requested_locales =
+ CanonicalizeLocaleList(isolate, locales_in, false).To(&requested_locales);
+ if (!got_requested_locales) {
+ return MaybeHandle<JSObject>();
+ }
+
+ // Return ? SupportedLocales(availableLocales, requestedLocales, options).
+ std::string service_str(service->ToCString().get());
+ return SupportedLocales(isolate, service_str, available_locales,
+ requested_locales, options_in);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index d9cfd67965..38d11772a4 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -9,12 +9,14 @@
#ifndef V8_OBJECTS_INTL_OBJECTS_H_
#define V8_OBJECTS_INTL_OBJECTS_H_
+#include <map>
#include <set>
#include <string>
#include "src/contexts.h"
#include "src/intl.h"
#include "src/objects.h"
+#include "unicode/locid.h"
#include "unicode/uversion.h"
namespace U_ICU_NAMESPACE {
@@ -23,6 +25,7 @@ class Collator;
class DecimalFormat;
class PluralRules;
class SimpleDateFormat;
+class UnicodeString;
}
namespace v8 {
@@ -40,16 +43,64 @@ class DateFormat {
Handle<JSObject> resolved);
// Unpacks date format object from corresponding JavaScript object.
- static icu::SimpleDateFormat* UnpackDateFormat(Isolate* isolate,
- Handle<JSObject> obj);
+ static icu::SimpleDateFormat* UnpackDateFormat(Handle<JSObject> obj);
// Release memory we allocated for the DateFormat once the JS object that
// holds the pointer gets garbage collected.
static void DeleteDateFormat(const v8::WeakCallbackInfo<void>& data);
+ // ecma402/#sec-formatdatetime
+ // FormatDateTime( dateTimeFormat, x )
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> FormatDateTime(
+ Isolate* isolate, Handle<JSObject> date_time_format_holder, double x);
+
+ // ecma402/#sec-datetime-format-functions
+ // DateTime Format Functions
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> DateTimeFormat(
+ Isolate* isolate, Handle<JSObject> date_time_format_holder,
+ Handle<Object> date);
+
+ // The UnwrapDateTimeFormat abstract operation gets the underlying
+ // DateTimeFormat operation for various methods which implement ECMA-402 v1
+ // semantics for supporting initializing existing Intl objects.
+ //
+ // ecma402/#sec-unwrapdatetimeformat
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> Unwrap(
+ Isolate* isolate, Handle<JSReceiver> receiver, const char* method_name);
+
+ // ecma-402/#sec-todatetimeoptions
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> ToDateTimeOptions(
+ Isolate* isolate, Handle<Object> input_options, const char* required,
+ const char* defaults);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToLocaleDateTime(
+ Isolate* isolate, Handle<Object> date, Handle<Object> locales,
+ Handle<Object> options, const char* required, const char* defaults,
+ const char* service);
+
// Layout description.
- static const int kSimpleDateFormat = JSObject::kHeaderSize;
- static const int kSize = kSimpleDateFormat + kPointerSize;
+#define DATE_FORMAT_FIELDS(V) \
+ V(kSimpleDateFormat, kPointerSize) \
+ V(kBoundFormat, kPointerSize) \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, DATE_FORMAT_FIELDS)
+#undef DATE_FORMAT_FIELDS
+
+ // ContextSlot defines the context structure for the bound
+ // DateTimeFormat.prototype.format function
+ enum ContextSlot {
+ kDateFormat = Context::MIN_CONTEXT_SLOTS,
+
+ kLength
+ };
+
+ // TODO(ryzokuken): Remove this and use regular accessors once DateFormat is a
+ // subclass of JSObject
+ //
+ // This needs to be consistent with the above Layout Description
+ static const int kSimpleDateFormatIndex = 0;
+ static const int kBoundFormatIndex = 1;
private:
DateFormat();
@@ -65,8 +116,7 @@ class NumberFormat {
Handle<JSObject> resolved);
// Unpacks number format object from corresponding JavaScript object.
- static icu::DecimalFormat* UnpackNumberFormat(Isolate* isolate,
- Handle<JSObject> obj);
+ static icu::DecimalFormat* UnpackNumberFormat(Handle<JSObject> obj);
// Release memory we allocated for the NumberFormat once the JS object that
// holds the pointer gets garbage collected.
@@ -83,7 +133,7 @@ class NumberFormat {
const char* method_name);
// ecm402/#sec-formatnumber
- static MaybeHandle<Object> FormatNumber(Isolate* isolate,
+ static MaybeHandle<String> FormatNumber(Isolate* isolate,
Handle<JSObject> number_format_holder,
double value);
@@ -118,64 +168,6 @@ class NumberFormat {
NumberFormat();
};
-class Collator {
- public:
- // Create a collator for the specificied locale and options. Stores the
- // collator in the provided collator_holder.
- static bool InitializeCollator(Isolate* isolate,
- Handle<JSObject> collator_holder,
- Handle<String> locale,
- Handle<JSObject> options,
- Handle<JSObject> resolved);
-
- // Unpacks collator object from corresponding JavaScript object.
- static icu::Collator* UnpackCollator(Isolate* isolate, Handle<JSObject> obj);
-
- // Layout description.
- static const int kCollator = JSObject::kHeaderSize;
- static const int kSize = kCollator + kPointerSize;
-
- private:
- Collator();
-};
-
-class PluralRules {
- public:
- // Create a PluralRules and DecimalFormat for the specificied locale and
- // options. Returns false on an ICU failure.
- static bool InitializePluralRules(Isolate* isolate, Handle<String> locale,
- Handle<JSObject> options,
- Handle<JSObject> resolved,
- icu::PluralRules** plural_rules,
- icu::DecimalFormat** decimal_format);
-
- // Unpacks PluralRules object from corresponding JavaScript object.
- static icu::PluralRules* UnpackPluralRules(Isolate* isolate,
- Handle<JSObject> obj);
-
- // Unpacks NumberFormat object from corresponding JavaScript PluralRUles
- // object.
- static icu::DecimalFormat* UnpackNumberFormat(Isolate* isolate,
- Handle<JSObject> obj);
-
- // Release memory we allocated for the Collator once the JS object that holds
- // the pointer gets garbage collected.
- static void DeletePluralRules(const v8::WeakCallbackInfo<void>& data);
-
- // Layout description.
- static const int kPluralRules = JSObject::kHeaderSize;
- // Values are formatted with this NumberFormat and then parsed as a Number
- // to round them based on the options passed into the PluralRules objct.
- // TODO(littledan): If a future version of ICU supports the rounding
- // built-in to PluralRules, switch to that, see this bug:
- // http://bugs.icu-project.org/trac/ticket/12763
- static const int kNumberFormat = kPluralRules + kPointerSize;
- static const int kSize = kNumberFormat + kPointerSize;
-
- private:
- PluralRules();
-};
-
class V8BreakIterator {
public:
// Create a BreakIterator for the specificied locale and options. Returns the
@@ -186,17 +178,42 @@ class V8BreakIterator {
Handle<JSObject> resolved);
// Unpacks break iterator object from corresponding JavaScript object.
- static icu::BreakIterator* UnpackBreakIterator(Isolate* isolate,
- Handle<JSObject> obj);
+ static icu::BreakIterator* UnpackBreakIterator(Handle<JSObject> obj);
// Release memory we allocated for the BreakIterator once the JS object that
// holds the pointer gets garbage collected.
static void DeleteBreakIterator(const v8::WeakCallbackInfo<void>& data);
+ static void AdoptText(Isolate* isolate,
+ Handle<JSObject> break_iterator_holder,
+ Handle<String> text);
+
// Layout description.
- static const int kBreakIterator = JSObject::kHeaderSize;
- static const int kUnicodeString = kBreakIterator + kPointerSize;
- static const int kSize = kUnicodeString + kPointerSize;
+#define BREAK_ITERATOR_FIELDS(V) \
+ /* Pointer fields. */ \
+ V(kBreakIterator, kPointerSize) \
+ V(kUnicodeString, kPointerSize) \
+ V(kBoundAdoptText, kPointerSize) \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, BREAK_ITERATOR_FIELDS)
+#undef BREAK_ITERATOR_FIELDS
+
+ // ContextSlot defines the context structure for the bound
+ // v8BreakIterator.prototype.adoptText function
+ enum class ContextSlot {
+ kV8BreakIterator = Context::MIN_CONTEXT_SLOTS,
+
+ kLength
+ };
+
+ // TODO(ryzokuken): Remove this and use regular accessors once v8BreakIterator
+ // is a subclass of JSObject
+ //
+ // This needs to be consistent with the above Layour Description
+ static const int kBreakIteratorIndex = 0;
+ static const int kUnicodeStringIndex = 1;
+ static const int kBoundAdoptTextIndex = 2;
private:
V8BreakIterator();
@@ -226,12 +243,27 @@ class Intl {
static bool IsObjectOfType(Isolate* isolate, Handle<Object> object,
Intl::Type expected_type);
+ static IcuService StringToIcuService(Handle<String> service);
+
// Gets the ICU locales for a given service. If there is a locale with a
// script tag then the locales also include a locale without the script; eg,
// pa_Guru_IN (language=Panjabi, script=Gurmukhi, country-India) would include
// pa_IN.
static std::set<std::string> GetAvailableLocales(const IcuService& service);
+ static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> AvailableLocalesOf(
+ Isolate* isolate, Handle<String> service);
+
+ static MaybeHandle<JSObject> SupportedLocalesOf(Isolate* isolate,
+ Handle<String> service,
+ Handle<Object> locales_in,
+ Handle<Object> options_in);
+
+ static std::string DefaultLocale(Isolate* isolate);
+
+ static void DefineWEProperty(Isolate* isolate, Handle<JSObject> target,
+ Handle<Name> key, Handle<Object> value);
+
// If locale has a script tag then return true and the locale without the
// script else return false and an empty string
static bool RemoveLocaleScriptTag(const std::string& icu_locale,
@@ -264,6 +296,16 @@ class Intl {
Isolate* isolate, const char* service, Handle<Object> requestedLocales,
Handle<Object> options);
+ // This currently calls out to the JavaScript implementation of
+ // CanonicalizeLocaleList.
+ // Note: This is deprecated glue code, required only as long as ResolveLocale
+ // still calls a JS implementation. The C++ successor is the overloaded
+ // version below that returns a Maybe<std::vector<std::string>>.
+ //
+ // ecma402/#sec-canonicalizelocalelist
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> CanonicalizeLocaleListJS(
+ Isolate* isolate, Handle<Object> locales);
+
// ECMA402 9.2.10. GetOption( options, property, type, values, fallback)
// ecma402/#sec-getoption
//
@@ -299,6 +341,99 @@ class Intl {
V8_WARN_UNUSED_RESULT static Maybe<bool> GetBoolOption(
Isolate* isolate, Handle<JSReceiver> options, const char* property,
const char* service, bool* result);
+
+ // Canonicalize the locale.
+ // https://tc39.github.io/ecma402/#sec-canonicalizelanguagetag,
+ // including type check and structural validity check.
+ static Maybe<std::string> CanonicalizeLanguageTag(Isolate* isolate,
+ Handle<Object> locale_in);
+
+ // https://tc39.github.io/ecma402/#sec-canonicalizelocalelist
+ // {only_return_one_result} is an optimization for callers that only
+ // care about the first result.
+ static Maybe<std::vector<std::string>> CanonicalizeLocaleList(
+ Isolate* isolate, Handle<Object> locales,
+ bool only_return_one_result = false);
+
+ // ecma-402/#sec-currencydigits
+ // The currency is expected to an all upper case string value.
+ static Handle<Smi> CurrencyDigits(Isolate* isolate, Handle<String> currency);
+
+ // TODO(ftang): Remove this and use ICU to the conversion in the future
+ static void ParseExtension(Isolate* isolate, const std::string& extension,
+ std::map<std::string, std::string>& out);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> CreateNumberFormat(
+ Isolate* isolate, Handle<String> locale, Handle<JSObject> options,
+ Handle<JSObject> resolved);
+
+ // ecma402/#sec-iswellformedcurrencycode
+ static bool IsWellFormedCurrencyCode(Isolate* isolate,
+ Handle<String> currency);
+
+ // For locale sensitive functions
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> StringLocaleConvertCase(
+ Isolate* isolate, Handle<String> s, bool is_upper,
+ Handle<Object> locales);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> StringLocaleCompare(
+ Isolate* isolate, Handle<String> s1, Handle<String> s2,
+ Handle<Object> locales, Handle<Object> options);
+
+ V8_WARN_UNUSED_RESULT static Handle<Object> CompareStrings(
+ Isolate* isolate, Handle<JSCollator> collator, Handle<String> s1,
+ Handle<String> s2);
+
+ // ecma402/#sup-properties-of-the-number-prototype-object
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> NumberToLocaleString(
+ Isolate* isolate, Handle<Object> num, Handle<Object> locales,
+ Handle<Object> options);
+
+ // ecma402/#sec-defaultnumberoption
+ V8_WARN_UNUSED_RESULT static Maybe<int> DefaultNumberOption(
+ Isolate* isolate, Handle<Object> value, int min, int max, int fallback,
+ Handle<String> property);
+
+ // ecma402/#sec-getnumberoption
+ V8_WARN_UNUSED_RESULT static Maybe<int> GetNumberOption(
+ Isolate* isolate, Handle<JSReceiver> options, Handle<String> property,
+ int min, int max, int fallback);
+ V8_WARN_UNUSED_RESULT static Maybe<int> GetNumberOption(
+ Isolate* isolate, Handle<JSReceiver> options, const char* property,
+ int min, int max, int fallback);
+
+ // ecma402/#sec-setnfdigitoptions
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetNumberFormatDigitOptions(
+ Isolate* isolate, icu::DecimalFormat* number_format,
+ Handle<JSReceiver> options, int mnfd_default, int mxfd_default);
+
+ icu::Locale static CreateICULocale(Isolate* isolate,
+ Handle<String> bcp47_locale_str);
+
+ // Helper funciton to convert a UnicodeString to a Handle<String>
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToString(
+ Isolate* isolate, const icu::UnicodeString& string);
+
+ // Helper function to convert a substring of UnicodeString to a Handle<String>
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToString(
+ Isolate* isolate, const icu::UnicodeString& string, int32_t begin,
+ int32_t end);
+
+ // A helper function to implement formatToParts which add element to array as
+ // $array[$index] = { type: $field_type_string, value: $value }
+ static void AddElement(Isolate* isolate, Handle<JSArray> array, int index,
+ Handle<String> field_type_string,
+ Handle<String> value);
+
+ // A helper function to implement formatToParts which add element to array as
+ // $array[$index] = {
+ // type: $field_type_string, value: $value,
+ // $additional_property_name: $additional_property_value
+ // }
+ static void AddElement(Isolate* isolate, Handle<JSArray> array, int index,
+ Handle<String> field_type_string, Handle<String> value,
+ Handle<String> additional_property_name,
+ Handle<String> additional_property_value);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
new file mode 100644
index 0000000000..43bc294d04
--- /dev/null
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -0,0 +1,210 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_ARRAY_BUFFER_INL_H_
+#define V8_OBJECTS_JS_ARRAY_BUFFER_INL_H_
+
+#include "src/objects/js-array-buffer.h"
+
+#include "src/objects-inl.h" // Needed for write barriers
+#include "src/wasm/wasm-engine.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(JSArrayBuffer)
+CAST_ACCESSOR(JSArrayBufferView)
+CAST_ACCESSOR(JSTypedArray)
+
+void* JSArrayBuffer::backing_store() const {
+ intptr_t ptr = READ_INTPTR_FIELD(this, kBackingStoreOffset);
+ return reinterpret_cast<void*>(ptr);
+}
+
+void JSArrayBuffer::set_backing_store(void* value, WriteBarrierMode mode) {
+ intptr_t ptr = reinterpret_cast<intptr_t>(value);
+ WRITE_INTPTR_FIELD(this, kBackingStoreOffset, ptr);
+}
+
+ACCESSORS(JSArrayBuffer, byte_length, Object, kByteLengthOffset)
+
+size_t JSArrayBuffer::allocation_length() const {
+ if (backing_store() == nullptr) {
+ return 0;
+ }
+ // If this buffer is managed by the WasmMemoryTracker
+ if (is_wasm_memory()) {
+ const auto* data =
+ GetIsolate()->wasm_engine()->memory_tracker()->FindAllocationData(
+ backing_store());
+ DCHECK_NOT_NULL(data);
+ return data->allocation_length;
+ }
+ return byte_length()->Number();
+}
+
+void* JSArrayBuffer::allocation_base() const {
+ if (backing_store() == nullptr) {
+ return nullptr;
+ }
+ // If this buffer is managed by the WasmMemoryTracker
+ if (is_wasm_memory()) {
+ const auto* data =
+ GetIsolate()->wasm_engine()->memory_tracker()->FindAllocationData(
+ backing_store());
+ DCHECK_NOT_NULL(data);
+ return data->allocation_base;
+ }
+ return backing_store();
+}
+
+bool JSArrayBuffer::is_wasm_memory() const {
+ bool const is_wasm_memory = IsWasmMemory::decode(bit_field());
+ DCHECK_EQ(is_wasm_memory,
+ GetIsolate()->wasm_engine()->memory_tracker()->IsWasmMemory(
+ backing_store()));
+ return is_wasm_memory;
+}
+
+void JSArrayBuffer::set_bit_field(uint32_t bits) {
+ if (kInt32Size != kPointerSize) {
+#if V8_TARGET_LITTLE_ENDIAN
+ WRITE_UINT32_FIELD(this, kBitFieldSlot + kInt32Size, 0);
+#else
+ WRITE_UINT32_FIELD(this, kBitFieldSlot, 0);
+#endif
+ }
+ WRITE_UINT32_FIELD(this, kBitFieldOffset, bits);
+}
+
+uint32_t JSArrayBuffer::bit_field() const {
+ return READ_UINT32_FIELD(this, kBitFieldOffset);
+}
+
+bool JSArrayBuffer::is_external() { return IsExternal::decode(bit_field()); }
+
+void JSArrayBuffer::set_is_external(bool value) {
+ set_bit_field(IsExternal::update(bit_field(), value));
+}
+
+bool JSArrayBuffer::is_neuterable() {
+ return IsNeuterable::decode(bit_field());
+}
+
+void JSArrayBuffer::set_is_neuterable(bool value) {
+ set_bit_field(IsNeuterable::update(bit_field(), value));
+}
+
+bool JSArrayBuffer::was_neutered() { return WasNeutered::decode(bit_field()); }
+
+void JSArrayBuffer::set_was_neutered(bool value) {
+ set_bit_field(WasNeutered::update(bit_field(), value));
+}
+
+bool JSArrayBuffer::is_shared() { return IsShared::decode(bit_field()); }
+
+void JSArrayBuffer::set_is_shared(bool value) {
+ set_bit_field(IsShared::update(bit_field(), value));
+}
+
+bool JSArrayBuffer::is_growable() { return IsGrowable::decode(bit_field()); }
+
+void JSArrayBuffer::set_is_growable(bool value) {
+ set_bit_field(IsGrowable::update(bit_field(), value));
+}
+
+Object* JSArrayBufferView::byte_offset() const {
+ if (WasNeutered()) return Smi::kZero;
+ return Object::cast(READ_FIELD(this, kByteOffsetOffset));
+}
+
+void JSArrayBufferView::set_byte_offset(Object* value, WriteBarrierMode mode) {
+ WRITE_FIELD(this, kByteOffsetOffset, value);
+ CONDITIONAL_WRITE_BARRIER(this, kByteOffsetOffset, value, mode);
+}
+
+Object* JSArrayBufferView::byte_length() const {
+ if (WasNeutered()) return Smi::kZero;
+ return Object::cast(READ_FIELD(this, kByteLengthOffset));
+}
+
+void JSArrayBufferView::set_byte_length(Object* value, WriteBarrierMode mode) {
+ WRITE_FIELD(this, kByteLengthOffset, value);
+ CONDITIONAL_WRITE_BARRIER(this, kByteLengthOffset, value, mode);
+}
+
+ACCESSORS(JSArrayBufferView, buffer, Object, kBufferOffset)
+#ifdef VERIFY_HEAP
+ACCESSORS(JSArrayBufferView, raw_byte_offset, Object, kByteOffsetOffset)
+ACCESSORS(JSArrayBufferView, raw_byte_length, Object, kByteLengthOffset)
+#endif
+
+bool JSArrayBufferView::WasNeutered() const {
+ return JSArrayBuffer::cast(buffer())->was_neutered();
+}
+
+Object* JSTypedArray::length() const {
+ if (WasNeutered()) return Smi::kZero;
+ return Object::cast(READ_FIELD(this, kLengthOffset));
+}
+
+size_t JSTypedArray::length_value() const {
+ if (WasNeutered()) return 0;
+ double val = Object::cast(READ_FIELD(this, kLengthOffset))->Number();
+ DCHECK_LE(val, kMaxSafeInteger); // 2^53-1
+ DCHECK_GE(val, -kMaxSafeInteger); // -2^53+1
+ DCHECK_LE(val, std::numeric_limits<size_t>::max());
+ DCHECK_GE(val, std::numeric_limits<size_t>::min());
+ return static_cast<size_t>(val);
+}
+
+void JSTypedArray::set_length(Object* value, WriteBarrierMode mode) {
+ WRITE_FIELD(this, kLengthOffset, value);
+ CONDITIONAL_WRITE_BARRIER(this, kLengthOffset, value, mode);
+}
+
+bool JSTypedArray::is_on_heap() const {
+ DisallowHeapAllocation no_gc;
+ // Checking that buffer()->backing_store() is not nullptr is not sufficient;
+ // it will be nullptr when byte_length is 0 as well.
+ FixedTypedArrayBase* fta(FixedTypedArrayBase::cast(elements()));
+ return fta->base_pointer() == fta;
+}
+
+// static
+MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
+ Handle<Object> receiver,
+ const char* method_name) {
+ if (V8_UNLIKELY(!receiver->IsJSTypedArray())) {
+ const MessageTemplate::Template message = MessageTemplate::kNotTypedArray;
+ THROW_NEW_ERROR(isolate, NewTypeError(message), JSTypedArray);
+ }
+
+ Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(receiver);
+ if (V8_UNLIKELY(array->WasNeutered())) {
+ const MessageTemplate::Template message =
+ MessageTemplate::kDetachedOperation;
+ Handle<String> operation =
+ isolate->factory()->NewStringFromAsciiChecked(method_name);
+ THROW_NEW_ERROR(isolate, NewTypeError(message, operation), JSTypedArray);
+ }
+
+ // spec describes to return `buffer`, but it may disrupt current
+ // implementations, and it's much useful to return array for now.
+ return array;
+}
+
+#ifdef VERIFY_HEAP
+ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset)
+#endif
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_ARRAY_BUFFER_INL_H_
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
new file mode 100644
index 0000000000..5ff7828ead
--- /dev/null
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -0,0 +1,310 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/js-array-buffer.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/property-descriptor.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+bool CanonicalNumericIndexString(Isolate* isolate, Handle<Object> s,
+ Handle<Object>* index) {
+ DCHECK(s->IsString() || s->IsSmi());
+
+ Handle<Object> result;
+ if (s->IsSmi()) {
+ result = s;
+ } else {
+ result = String::ToNumber(isolate, Handle<String>::cast(s));
+ if (!result->IsMinusZero()) {
+ Handle<String> str = Object::ToString(isolate, result).ToHandleChecked();
+ // Avoid treating strings like "2E1" and "20" as the same key.
+ if (!str->SameValue(*s)) return false;
+ }
+ }
+ *index = result;
+ return true;
+}
+
+inline int ConvertToMb(size_t size) {
+ return static_cast<int>(size / static_cast<size_t>(MB));
+}
+
+} // anonymous namespace
+
+void JSArrayBuffer::Neuter() {
+ CHECK(is_neuterable());
+ CHECK(!was_neutered());
+ CHECK(is_external());
+ set_backing_store(nullptr);
+ set_byte_length(Smi::kZero);
+ set_was_neutered(true);
+ set_is_neuterable(false);
+ // Invalidate the neutering protector.
+ Isolate* const isolate = GetIsolate();
+ if (isolate->IsArrayBufferNeuteringIntact()) {
+ isolate->InvalidateArrayBufferNeuteringProtector();
+ }
+}
+
+void JSArrayBuffer::StopTrackingWasmMemory(Isolate* isolate) {
+ DCHECK(is_wasm_memory());
+ isolate->wasm_engine()->memory_tracker()->ReleaseAllocation(isolate,
+ backing_store());
+ set_is_wasm_memory(false);
+}
+
+void JSArrayBuffer::FreeBackingStoreFromMainThread() {
+ if (allocation_base() == nullptr) {
+ return;
+ }
+ FreeBackingStore(GetIsolate(), {allocation_base(), allocation_length(),
+ backing_store(), is_wasm_memory()});
+ // Zero out the backing store and allocation base to avoid dangling
+ // pointers.
+ set_backing_store(nullptr);
+}
+
+// static
+void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
+ if (allocation.is_wasm_memory) {
+ wasm::WasmMemoryTracker* memory_tracker =
+ isolate->wasm_engine()->memory_tracker();
+ if (!memory_tracker->FreeMemoryIfIsWasmMemory(isolate,
+ allocation.backing_store)) {
+ CHECK(FreePages(allocation.allocation_base, allocation.length));
+ }
+ } else {
+ isolate->array_buffer_allocator()->Free(allocation.allocation_base,
+ allocation.length);
+ }
+}
+
+void JSArrayBuffer::set_is_wasm_memory(bool is_wasm_memory) {
+ set_bit_field(IsWasmMemory::update(bit_field(), is_wasm_memory));
+}
+
+void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
+ bool is_external, void* data, size_t byte_length,
+ SharedFlag shared, bool is_wasm_memory) {
+ DCHECK_EQ(array_buffer->GetEmbedderFieldCount(),
+ v8::ArrayBuffer::kEmbedderFieldCount);
+ for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) {
+ array_buffer->SetEmbedderField(i, Smi::kZero);
+ }
+ array_buffer->set_bit_field(0);
+ array_buffer->set_is_external(is_external);
+ array_buffer->set_is_neuterable(shared == SharedFlag::kNotShared);
+ array_buffer->set_is_shared(shared == SharedFlag::kShared);
+ array_buffer->set_is_wasm_memory(is_wasm_memory);
+
+ Handle<Object> heap_byte_length =
+ isolate->factory()->NewNumberFromSize(byte_length);
+ CHECK(heap_byte_length->IsSmi() || heap_byte_length->IsHeapNumber());
+ array_buffer->set_byte_length(*heap_byte_length);
+ // Initialize backing store at last to avoid handling of |JSArrayBuffers| that
+ // are currently being constructed in the |ArrayBufferTracker|. The
+ // registration method below handles the case of registering a buffer that has
+ // already been promoted.
+ array_buffer->set_backing_store(data);
+
+ if (data && !is_external) {
+ isolate->heap()->RegisterNewArrayBuffer(*array_buffer);
+ }
+}
+
+bool JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
+ Isolate* isolate,
+ size_t allocated_length,
+ bool initialize, SharedFlag shared) {
+ void* data;
+ CHECK_NOT_NULL(isolate->array_buffer_allocator());
+ if (allocated_length != 0) {
+ if (allocated_length >= MB)
+ isolate->counters()->array_buffer_big_allocations()->AddSample(
+ ConvertToMb(allocated_length));
+ if (shared == SharedFlag::kShared)
+ isolate->counters()->shared_array_allocations()->AddSample(
+ ConvertToMb(allocated_length));
+ if (initialize) {
+ data = isolate->array_buffer_allocator()->Allocate(allocated_length);
+ } else {
+ data = isolate->array_buffer_allocator()->AllocateUninitialized(
+ allocated_length);
+ }
+ if (data == nullptr) {
+ isolate->counters()->array_buffer_new_size_failures()->AddSample(
+ ConvertToMb(allocated_length));
+ return false;
+ }
+ } else {
+ data = nullptr;
+ }
+
+ const bool is_external = false;
+ JSArrayBuffer::Setup(array_buffer, isolate, is_external, data,
+ allocated_length, shared);
+ return true;
+}
+
+Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
+ Handle<JSTypedArray> typed_array) {
+ DCHECK(typed_array->is_on_heap());
+
+ Isolate* isolate = typed_array->GetIsolate();
+
+ DCHECK(IsFixedTypedArrayElementsKind(typed_array->GetElementsKind()));
+
+ Handle<FixedTypedArrayBase> fixed_typed_array(
+ FixedTypedArrayBase::cast(typed_array->elements()), isolate);
+
+ Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(typed_array->buffer()),
+ isolate);
+ // This code does not know how to materialize from wasm buffers.
+ DCHECK(!buffer->is_wasm_memory());
+
+ void* backing_store =
+ isolate->array_buffer_allocator()->AllocateUninitialized(
+ fixed_typed_array->DataSize());
+ if (backing_store == nullptr) {
+ isolate->heap()->FatalProcessOutOfMemory(
+ "JSTypedArray::MaterializeArrayBuffer");
+ }
+ buffer->set_is_external(false);
+ DCHECK(buffer->byte_length()->IsSmi() ||
+ buffer->byte_length()->IsHeapNumber());
+ DCHECK(NumberToInt32(buffer->byte_length()) == fixed_typed_array->DataSize());
+ // Initialize backing store at last to avoid handling of |JSArrayBuffers| that
+ // are currently being constructed in the |ArrayBufferTracker|. The
+ // registration method below handles the case of registering a buffer that has
+ // already been promoted.
+ buffer->set_backing_store(backing_store);
+ // RegisterNewArrayBuffer expects a valid length for adjusting counters.
+ isolate->heap()->RegisterNewArrayBuffer(*buffer);
+ memcpy(buffer->backing_store(), fixed_typed_array->DataPtr(),
+ fixed_typed_array->DataSize());
+ Handle<FixedTypedArrayBase> new_elements =
+ isolate->factory()->NewFixedTypedArrayWithExternalPointer(
+ fixed_typed_array->length(), typed_array->type(),
+ static_cast<uint8_t*>(buffer->backing_store()));
+
+ typed_array->set_elements(*new_elements);
+ DCHECK(!typed_array->is_on_heap());
+
+ return buffer;
+}
+
+Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
+ if (!is_on_heap()) {
+ Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(buffer()),
+ GetIsolate());
+ return array_buffer;
+ }
+ Handle<JSTypedArray> self(this, GetIsolate());
+ return MaterializeArrayBuffer(self);
+}
+
+// ES#sec-integer-indexed-exotic-objects-defineownproperty-p-desc
+// static
+Maybe<bool> JSTypedArray::DefineOwnProperty(Isolate* isolate,
+ Handle<JSTypedArray> o,
+ Handle<Object> key,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ // 1. Assert: IsPropertyKey(P) is true.
+ DCHECK(key->IsName() || key->IsNumber());
+ // 2. Assert: O is an Object that has a [[ViewedArrayBuffer]] internal slot.
+ // 3. If Type(P) is String, then
+ if (key->IsString() || key->IsSmi()) {
+ // 3a. Let numericIndex be ! CanonicalNumericIndexString(P)
+ // 3b. If numericIndex is not undefined, then
+ Handle<Object> numeric_index;
+ if (CanonicalNumericIndexString(isolate, key, &numeric_index)) {
+ // 3b i. If IsInteger(numericIndex) is false, return false.
+ // 3b ii. If numericIndex = -0, return false.
+ // 3b iii. If numericIndex < 0, return false.
+ // FIXME: the standard allows up to 2^53 elements.
+ uint32_t index;
+ if (numeric_index->IsMinusZero() || !numeric_index->ToUint32(&index)) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kInvalidTypedArrayIndex));
+ }
+ // 3b iv. Let length be O.[[ArrayLength]].
+ uint32_t length = o->length()->Number();
+ // 3b v. If numericIndex ≥ length, return false.
+ if (index >= length) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kInvalidTypedArrayIndex));
+ }
+ // 3b vi. If IsAccessorDescriptor(Desc) is true, return false.
+ if (PropertyDescriptor::IsAccessorDescriptor(desc)) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed, key));
+ }
+ // 3b vii. If Desc has a [[Configurable]] field and if
+ // Desc.[[Configurable]] is true, return false.
+ // 3b viii. If Desc has an [[Enumerable]] field and if Desc.[[Enumerable]]
+ // is false, return false.
+ // 3b ix. If Desc has a [[Writable]] field and if Desc.[[Writable]] is
+ // false, return false.
+ if ((desc->has_configurable() && desc->configurable()) ||
+ (desc->has_enumerable() && !desc->enumerable()) ||
+ (desc->has_writable() && !desc->writable())) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed, key));
+ }
+ // 3b x. If Desc has a [[Value]] field, then
+ // 3b x 1. Let value be Desc.[[Value]].
+ // 3b x 2. Return ? IntegerIndexedElementSet(O, numericIndex, value).
+ if (desc->has_value()) {
+ if (!desc->has_configurable()) desc->set_configurable(false);
+ if (!desc->has_enumerable()) desc->set_enumerable(true);
+ if (!desc->has_writable()) desc->set_writable(true);
+ Handle<Object> value = desc->value();
+ RETURN_ON_EXCEPTION_VALUE(isolate,
+ SetOwnElementIgnoreAttributes(
+ o, index, value, desc->ToAttributes()),
+ Nothing<bool>());
+ }
+ // 3b xi. Return true.
+ return Just(true);
+ }
+ }
+ // 4. Return ! OrdinaryDefineOwnProperty(O, P, Desc).
+ return OrdinaryDefineOwnProperty(isolate, o, key, desc, should_throw);
+}
+
+ExternalArrayType JSTypedArray::type() {
+ switch (elements()->map()->instance_type()) {
+#define INSTANCE_TYPE_TO_ARRAY_TYPE(Type, type, TYPE, ctype) \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ return kExternal##Type##Array;
+
+ TYPED_ARRAYS(INSTANCE_TYPE_TO_ARRAY_TYPE)
+#undef INSTANCE_TYPE_TO_ARRAY_TYPE
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+size_t JSTypedArray::element_size() {
+ switch (elements()->map()->instance_type()) {
+#define INSTANCE_TYPE_TO_ELEMENT_SIZE(Type, type, TYPE, ctype) \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ return sizeof(ctype);
+
+ TYPED_ARRAYS(INSTANCE_TYPE_TO_ELEMENT_SIZE)
+#undef INSTANCE_TYPE_TO_ELEMENT_SIZE
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
new file mode 100644
index 0000000000..109aacbc47
--- /dev/null
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -0,0 +1,230 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_ARRAY_BUFFER_H_
+#define V8_OBJECTS_JS_ARRAY_BUFFER_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Whether a JSArrayBuffer is a SharedArrayBuffer or not.
+enum class SharedFlag { kNotShared, kShared };
+
+class JSArrayBuffer : public JSObject {
+ public:
+ // [byte_length]: length in bytes
+ DECL_ACCESSORS(byte_length, Object)
+
+ // [backing_store]: backing memory for this array
+ DECL_ACCESSORS(backing_store, void)
+
+ // For non-wasm, allocation_length and allocation_base are byte_length and
+ // backing_store, respectively.
+ inline size_t allocation_length() const;
+ inline void* allocation_base() const;
+
+ inline uint32_t bit_field() const;
+ inline void set_bit_field(uint32_t bits);
+
+ // [is_external]: true indicates that the embedder is in charge of freeing the
+ // backing_store, while is_external == false means that v8 will free the
+ // memory block once all ArrayBuffers referencing it are collected by the GC.
+ inline bool is_external();
+ inline void set_is_external(bool value);
+
+ inline bool is_neuterable();
+ inline void set_is_neuterable(bool value);
+
+ inline bool was_neutered();
+ inline void set_was_neutered(bool value);
+
+ inline bool is_shared();
+ inline void set_is_shared(bool value);
+
+ inline bool is_growable();
+ inline void set_is_growable(bool value);
+
+ DECL_CAST(JSArrayBuffer)
+
+ void Neuter();
+
+ struct Allocation {
+ Allocation(void* allocation_base, size_t length, void* backing_store,
+ bool is_wasm_memory)
+ : allocation_base(allocation_base),
+ length(length),
+ backing_store(backing_store),
+ is_wasm_memory(is_wasm_memory) {}
+
+ void* allocation_base;
+ size_t length;
+ void* backing_store;
+ bool is_wasm_memory;
+ };
+
+ // Returns whether the buffer is tracked by the WasmMemoryTracker.
+ inline bool is_wasm_memory() const;
+
+ // Sets whether the buffer is tracked by the WasmMemoryTracker.
+ void set_is_wasm_memory(bool is_wasm_memory);
+
+ // Removes the backing store from the WasmMemoryTracker and sets
+ // |is_wasm_memory| to false.
+ void StopTrackingWasmMemory(Isolate* isolate);
+
+ void FreeBackingStoreFromMainThread();
+ static void FreeBackingStore(Isolate* isolate, Allocation allocation);
+
+ V8_EXPORT_PRIVATE static void Setup(
+ Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external,
+ void* data, size_t allocated_length,
+ SharedFlag shared = SharedFlag::kNotShared, bool is_wasm_memory = false);
+
+ // Returns false if array buffer contents could not be allocated.
+ // In this case, |array_buffer| will not be set up.
+ static bool SetupAllocatingData(
+ Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
+ size_t allocated_length, bool initialize = true,
+ SharedFlag shared = SharedFlag::kNotShared) V8_WARN_UNUSED_RESULT;
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSArrayBuffer)
+ DECL_VERIFIER(JSArrayBuffer)
+
+ static const int kByteLengthOffset = JSObject::kHeaderSize;
+ // The rest of the fields are not JSObjects, so they are not iterated over in
+ // objects-body-descriptors-inl.h.
+ static const int kBackingStoreOffset = kByteLengthOffset + kPointerSize;
+ static const int kBitFieldSlot = kBackingStoreOffset + kPointerSize;
+#if V8_TARGET_LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT
+ static const int kBitFieldOffset = kBitFieldSlot;
+#else
+ static const int kBitFieldOffset = kBitFieldSlot + kInt32Size;
+#endif
+ static const int kSize = kBitFieldSlot + kPointerSize;
+
+ static const int kSizeWithEmbedderFields =
+ kSize + v8::ArrayBuffer::kEmbedderFieldCount * kPointerSize;
+
+ // Iterates all fields in the object including internal ones except
+ // kBackingStoreOffset and kBitFieldSlot.
+ class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ class IsExternal : public BitField<bool, 1, 1> {};
+ class IsNeuterable : public BitField<bool, 2, 1> {};
+ class WasNeutered : public BitField<bool, 3, 1> {};
+ class IsShared : public BitField<bool, 4, 1> {};
+ class IsGrowable : public BitField<bool, 5, 1> {};
+ class IsWasmMemory : public BitField<bool, 6, 1> {};
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBuffer);
+};
+
+class JSArrayBufferView : public JSObject {
+ public:
+ // [buffer]: ArrayBuffer that this typed array views.
+ DECL_ACCESSORS(buffer, Object)
+
+ // [byte_offset]: offset of typed array in bytes.
+ DECL_ACCESSORS(byte_offset, Object)
+
+ // [byte_length]: length of typed array in bytes.
+ DECL_ACCESSORS(byte_length, Object)
+
+ DECL_CAST(JSArrayBufferView)
+
+ DECL_VERIFIER(JSArrayBufferView)
+
+ inline bool WasNeutered() const;
+
+ static const int kBufferOffset = JSObject::kHeaderSize;
+ static const int kByteOffsetOffset = kBufferOffset + kPointerSize;
+ static const int kByteLengthOffset = kByteOffsetOffset + kPointerSize;
+ static const int kViewSize = kByteLengthOffset + kPointerSize;
+
+ private:
+#ifdef VERIFY_HEAP
+ DECL_ACCESSORS(raw_byte_offset, Object)
+ DECL_ACCESSORS(raw_byte_length, Object)
+#endif
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBufferView);
+};
+
+class JSTypedArray : public JSArrayBufferView {
+ public:
+ // [length]: length of typed array in elements.
+ DECL_ACCESSORS(length, Object)
+ inline size_t length_value() const;
+
+ // ES6 9.4.5.3
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
+ Isolate* isolate, Handle<JSTypedArray> o, Handle<Object> key,
+ PropertyDescriptor* desc, ShouldThrow should_throw);
+
+ DECL_CAST(JSTypedArray)
+
+ ExternalArrayType type();
+ V8_EXPORT_PRIVATE size_t element_size();
+
+ Handle<JSArrayBuffer> GetBuffer();
+
+ // Whether the buffer's backing store is on-heap or off-heap.
+ inline bool is_on_heap() const;
+
+ static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate,
+ Handle<Object> receiver,
+ const char* method_name);
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSTypedArray)
+ DECL_VERIFIER(JSTypedArray)
+
+ static const int kLengthOffset = kViewSize;
+ static const int kSize = kLengthOffset + kPointerSize;
+
+ static const int kSizeWithEmbedderFields =
+ kSize + v8::ArrayBufferView::kEmbedderFieldCount * kPointerSize;
+
+ private:
+ static Handle<JSArrayBuffer> MaterializeArrayBuffer(
+ Handle<JSTypedArray> typed_array);
+#ifdef VERIFY_HEAP
+ DECL_ACCESSORS(raw_length, Object)
+#endif
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSTypedArray);
+};
+
+class JSDataView : public JSArrayBufferView {
+ public:
+ DECL_CAST(JSDataView)
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSDataView)
+ DECL_VERIFIER(JSDataView)
+
+ static const int kSize = kViewSize;
+
+ static const int kSizeWithEmbedderFields =
+ kSize + v8::ArrayBufferView::kEmbedderFieldCount * kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSDataView);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_ARRAY_BUFFER_H_
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index 04e484c803..7ec69285ed 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -6,7 +6,8 @@
#define V8_OBJECTS_JS_ARRAY_INL_H_
#include "src/objects/js-array.h"
-#include "src/wasm/wasm-engine.h"
+
+#include "src/objects-inl.h" // Needed for write barriers
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -15,18 +16,10 @@ namespace v8 {
namespace internal {
CAST_ACCESSOR(JSArray)
-CAST_ACCESSOR(JSArrayBuffer)
-CAST_ACCESSOR(JSArrayBufferView)
CAST_ACCESSOR(JSArrayIterator)
-CAST_ACCESSOR(JSTypedArray)
ACCESSORS(JSArray, length, Object, kLengthOffset)
-template <>
-inline bool Is<JSArray>(Object* obj) {
- return obj->IsJSArray();
-}
-
void JSArray::set_length(Smi* length) {
// Don't need a write barrier for a Smi.
set_length(static_cast<Object*>(length), SKIP_WRITE_BARRIER);
@@ -62,185 +55,6 @@ bool JSArray::HasArrayPrototype(Isolate* isolate) {
return map()->prototype() == *isolate->initial_array_prototype();
}
-void* JSArrayBuffer::backing_store() const {
- intptr_t ptr = READ_INTPTR_FIELD(this, kBackingStoreOffset);
- return reinterpret_cast<void*>(ptr);
-}
-
-void JSArrayBuffer::set_backing_store(void* value, WriteBarrierMode mode) {
- intptr_t ptr = reinterpret_cast<intptr_t>(value);
- WRITE_INTPTR_FIELD(this, kBackingStoreOffset, ptr);
-}
-
-ACCESSORS(JSArrayBuffer, byte_length, Object, kByteLengthOffset)
-
-size_t JSArrayBuffer::allocation_length() const {
- if (backing_store() == nullptr) {
- return 0;
- }
- // If this buffer is managed by the WasmMemoryTracker
- if (is_wasm_memory()) {
- const auto* data =
- GetIsolate()->wasm_engine()->memory_tracker()->FindAllocationData(
- backing_store());
- DCHECK_NOT_NULL(data);
- return data->allocation_length;
- }
- return byte_length()->Number();
-}
-
-void* JSArrayBuffer::allocation_base() const {
- if (backing_store() == nullptr) {
- return nullptr;
- }
- // If this buffer is managed by the WasmMemoryTracker
- if (is_wasm_memory()) {
- const auto* data =
- GetIsolate()->wasm_engine()->memory_tracker()->FindAllocationData(
- backing_store());
- DCHECK_NOT_NULL(data);
- return data->allocation_base;
- }
- return backing_store();
-}
-
-bool JSArrayBuffer::is_wasm_memory() const {
- bool const is_wasm_memory = IsWasmMemory::decode(bit_field());
- DCHECK_EQ(is_wasm_memory,
- GetIsolate()->wasm_engine()->memory_tracker()->IsWasmMemory(
- backing_store()));
- return is_wasm_memory;
-}
-
-void JSArrayBuffer::set_bit_field(uint32_t bits) {
- if (kInt32Size != kPointerSize) {
-#if V8_TARGET_LITTLE_ENDIAN
- WRITE_UINT32_FIELD(this, kBitFieldSlot + kInt32Size, 0);
-#else
- WRITE_UINT32_FIELD(this, kBitFieldSlot, 0);
-#endif
- }
- WRITE_UINT32_FIELD(this, kBitFieldOffset, bits);
-}
-
-uint32_t JSArrayBuffer::bit_field() const {
- return READ_UINT32_FIELD(this, kBitFieldOffset);
-}
-
-bool JSArrayBuffer::is_external() { return IsExternal::decode(bit_field()); }
-
-void JSArrayBuffer::set_is_external(bool value) {
- set_bit_field(IsExternal::update(bit_field(), value));
-}
-
-bool JSArrayBuffer::is_neuterable() {
- return IsNeuterable::decode(bit_field());
-}
-
-void JSArrayBuffer::set_is_neuterable(bool value) {
- set_bit_field(IsNeuterable::update(bit_field(), value));
-}
-
-bool JSArrayBuffer::was_neutered() { return WasNeutered::decode(bit_field()); }
-
-void JSArrayBuffer::set_was_neutered(bool value) {
- set_bit_field(WasNeutered::update(bit_field(), value));
-}
-
-bool JSArrayBuffer::is_shared() { return IsShared::decode(bit_field()); }
-
-void JSArrayBuffer::set_is_shared(bool value) {
- set_bit_field(IsShared::update(bit_field(), value));
-}
-
-bool JSArrayBuffer::is_growable() { return IsGrowable::decode(bit_field()); }
-
-void JSArrayBuffer::set_is_growable(bool value) {
- set_bit_field(IsGrowable::update(bit_field(), value));
-}
-
-Object* JSArrayBufferView::byte_offset() const {
- if (WasNeutered()) return Smi::kZero;
- return Object::cast(READ_FIELD(this, kByteOffsetOffset));
-}
-
-void JSArrayBufferView::set_byte_offset(Object* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kByteOffsetOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kByteOffsetOffset, value, mode);
-}
-
-Object* JSArrayBufferView::byte_length() const {
- if (WasNeutered()) return Smi::kZero;
- return Object::cast(READ_FIELD(this, kByteLengthOffset));
-}
-
-void JSArrayBufferView::set_byte_length(Object* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kByteLengthOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kByteLengthOffset, value, mode);
-}
-
-ACCESSORS(JSArrayBufferView, buffer, Object, kBufferOffset)
-#ifdef VERIFY_HEAP
-ACCESSORS(JSArrayBufferView, raw_byte_offset, Object, kByteOffsetOffset)
-ACCESSORS(JSArrayBufferView, raw_byte_length, Object, kByteLengthOffset)
-#endif
-
-bool JSArrayBufferView::WasNeutered() const {
- return JSArrayBuffer::cast(buffer())->was_neutered();
-}
-
-Object* JSTypedArray::length() const {
- if (WasNeutered()) return Smi::kZero;
- return Object::cast(READ_FIELD(this, kLengthOffset));
-}
-
-uint32_t JSTypedArray::length_value() const {
- if (WasNeutered()) return 0;
- uint32_t index = 0;
- CHECK(Object::cast(READ_FIELD(this, kLengthOffset))->ToArrayLength(&index));
- return index;
-}
-
-void JSTypedArray::set_length(Object* value, WriteBarrierMode mode) {
- WRITE_FIELD(this, kLengthOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kLengthOffset, value, mode);
-}
-
-bool JSTypedArray::is_on_heap() const {
- DisallowHeapAllocation no_gc;
- // Checking that buffer()->backing_store() is not nullptr is not sufficient;
- // it will be nullptr when byte_length is 0 as well.
- FixedTypedArrayBase* fta(FixedTypedArrayBase::cast(elements()));
- return fta->base_pointer() == fta;
-}
-
-// static
-MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
- Handle<Object> receiver,
- const char* method_name) {
- if (V8_UNLIKELY(!receiver->IsJSTypedArray())) {
- const MessageTemplate::Template message = MessageTemplate::kNotTypedArray;
- THROW_NEW_ERROR(isolate, NewTypeError(message), JSTypedArray);
- }
-
- Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(receiver);
- if (V8_UNLIKELY(array->WasNeutered())) {
- const MessageTemplate::Template message =
- MessageTemplate::kDetachedOperation;
- Handle<String> operation =
- isolate->factory()->NewStringFromAsciiChecked(method_name);
- THROW_NEW_ERROR(isolate, NewTypeError(message, operation), JSTypedArray);
- }
-
- // spec describes to return `buffer`, but it may disrupt current
- // implementations, and it's much useful to return array for now.
- return array;
-}
-
-#ifdef VERIFY_HEAP
-ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset)
-#endif
-
ACCESSORS(JSArrayIterator, iterated_object, Object, kIteratedObjectOffset)
ACCESSORS(JSArrayIterator, next_index, Object, kNextIndexOffset)
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index 28cfe71f5d..b212848ce7 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -117,6 +117,28 @@ class JSArrayIterator : public JSObject {
DECL_ACCESSORS(iterated_object, Object)
// [next_index]: The [[ArrayIteratorNextIndex]] inobject property.
+ // The next_index is always a positive integer, and it points to
+ // the next index that is to be returned by this iterator. It's
+ // possible range is fixed depending on the [[iterated_object]]:
+ //
+ // 1. For JSArray's the next_index is always in Unsigned32
+ // range, and when the iterator reaches the end it's set
+ // to kMaxUInt32 to indicate that this iterator should
+ // never produce values anymore even if the "length"
+ // property of the JSArray changes at some later point.
+ // 2. For JSTypedArray's the next_index is always in
+ // UnsignedSmall range, and when the iterator terminates
+ // it's set to Smi::kMaxValue.
+ // 3. For all other JSReceiver's it's always between 0 and
+ // kMaxSafeInteger, and the latter value is used to mark
+ // termination.
+ //
+ // It's important that for 1. and 2. the value fits into the
+ // Unsigned32 range (UnsignedSmall is a subset of Unsigned32),
+ // since we use this knowledge in the fast-path for the array
+ // iterator next calls in TurboFan (in the JSCallReducer) to
+ // keep the index in Word32 representation. This invariant is
+ // checked in JSArrayIterator::JSArrayIteratorVerify().
DECL_ACCESSORS(next_index, Object)
// [kind]: the [[ArrayIterationKind]] inobject property.
@@ -132,215 +154,6 @@ class JSArrayIterator : public JSObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayIterator);
};
-// Whether a JSArrayBuffer is a SharedArrayBuffer or not.
-enum class SharedFlag { kNotShared, kShared };
-
-class JSArrayBuffer : public JSObject {
- public:
- // [byte_length]: length in bytes
- DECL_ACCESSORS(byte_length, Object)
-
- // [backing_store]: backing memory for this array
- DECL_ACCESSORS(backing_store, void)
-
- // For non-wasm, allocation_length and allocation_base are byte_length and
- // backing_store, respectively.
- inline size_t allocation_length() const;
- inline void* allocation_base() const;
-
- inline uint32_t bit_field() const;
- inline void set_bit_field(uint32_t bits);
-
- // [is_external]: true indicates that the embedder is in charge of freeing the
- // backing_store, while is_external == false means that v8 will free the
- // memory block once all ArrayBuffers referencing it are collected by the GC.
- inline bool is_external();
- inline void set_is_external(bool value);
-
- inline bool is_neuterable();
- inline void set_is_neuterable(bool value);
-
- inline bool was_neutered();
- inline void set_was_neutered(bool value);
-
- inline bool is_shared();
- inline void set_is_shared(bool value);
-
- inline bool is_growable();
- inline void set_is_growable(bool value);
-
- DECL_CAST(JSArrayBuffer)
-
- void Neuter();
-
- struct Allocation {
- Allocation(void* allocation_base, size_t length, void* backing_store,
- bool is_wasm_memory)
- : allocation_base(allocation_base),
- length(length),
- backing_store(backing_store),
- is_wasm_memory(is_wasm_memory) {}
-
- void* allocation_base;
- size_t length;
- void* backing_store;
- bool is_wasm_memory;
- };
-
- // Returns whether the buffer is tracked by the WasmMemoryTracker.
- inline bool is_wasm_memory() const;
-
- // Sets whether the buffer is tracked by the WasmMemoryTracker.
- void set_is_wasm_memory(bool is_wasm_memory);
-
- // Removes the backing store from the WasmMemoryTracker and sets
- // |is_wasm_memory| to false.
- void StopTrackingWasmMemory(Isolate* isolate);
-
- void FreeBackingStoreFromMainThread();
- static void FreeBackingStore(Isolate* isolate, Allocation allocation);
-
- V8_EXPORT_PRIVATE static void Setup(
- Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external,
- void* data, size_t allocated_length,
- SharedFlag shared = SharedFlag::kNotShared, bool is_wasm_memory = false);
-
- // Returns false if array buffer contents could not be allocated.
- // In this case, |array_buffer| will not be set up.
- static bool SetupAllocatingData(
- Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
- size_t allocated_length, bool initialize = true,
- SharedFlag shared = SharedFlag::kNotShared) V8_WARN_UNUSED_RESULT;
-
- // Dispatched behavior.
- DECL_PRINTER(JSArrayBuffer)
- DECL_VERIFIER(JSArrayBuffer)
-
- static const int kByteLengthOffset = JSObject::kHeaderSize;
- // The rest of the fields are not JSObjects, so they are not iterated over in
- // objects-body-descriptors-inl.h.
- static const int kBackingStoreOffset = kByteLengthOffset + kPointerSize;
- static const int kBitFieldSlot = kBackingStoreOffset + kPointerSize;
-#if V8_TARGET_LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT
- static const int kBitFieldOffset = kBitFieldSlot;
-#else
- static const int kBitFieldOffset = kBitFieldSlot + kInt32Size;
-#endif
- static const int kSize = kBitFieldSlot + kPointerSize;
-
- static const int kSizeWithEmbedderFields =
- kSize + v8::ArrayBuffer::kEmbedderFieldCount * kPointerSize;
-
- // Iterates all fields in the object including internal ones except
- // kBackingStoreOffset and kBitFieldSlot.
- class BodyDescriptor;
- // No weak fields.
- typedef BodyDescriptor BodyDescriptorWeak;
-
- class IsExternal : public BitField<bool, 1, 1> {};
- class IsNeuterable : public BitField<bool, 2, 1> {};
- class WasNeutered : public BitField<bool, 3, 1> {};
- class IsShared : public BitField<bool, 4, 1> {};
- class IsGrowable : public BitField<bool, 5, 1> {};
- class IsWasmMemory : public BitField<bool, 6, 1> {};
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBuffer);
-};
-
-class JSArrayBufferView : public JSObject {
- public:
- // [buffer]: ArrayBuffer that this typed array views.
- DECL_ACCESSORS(buffer, Object)
-
- // [byte_offset]: offset of typed array in bytes.
- DECL_ACCESSORS(byte_offset, Object)
-
- // [byte_length]: length of typed array in bytes.
- DECL_ACCESSORS(byte_length, Object)
-
- DECL_CAST(JSArrayBufferView)
-
- DECL_VERIFIER(JSArrayBufferView)
-
- inline bool WasNeutered() const;
-
- static const int kBufferOffset = JSObject::kHeaderSize;
- static const int kByteOffsetOffset = kBufferOffset + kPointerSize;
- static const int kByteLengthOffset = kByteOffsetOffset + kPointerSize;
- static const int kViewSize = kByteLengthOffset + kPointerSize;
-
- private:
-#ifdef VERIFY_HEAP
- DECL_ACCESSORS(raw_byte_offset, Object)
- DECL_ACCESSORS(raw_byte_length, Object)
-#endif
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBufferView);
-};
-
-class JSTypedArray : public JSArrayBufferView {
- public:
- // [length]: length of typed array in elements.
- DECL_ACCESSORS(length, Object)
- inline uint32_t length_value() const;
-
- // ES6 9.4.5.3
- V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
- Isolate* isolate, Handle<JSTypedArray> o, Handle<Object> key,
- PropertyDescriptor* desc, ShouldThrow should_throw);
-
- DECL_CAST(JSTypedArray)
-
- ExternalArrayType type();
- V8_EXPORT_PRIVATE size_t element_size();
-
- Handle<JSArrayBuffer> GetBuffer();
-
- // Whether the buffer's backing store is on-heap or off-heap.
- inline bool is_on_heap() const;
-
- static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate,
- Handle<Object> receiver,
- const char* method_name);
-
- // Dispatched behavior.
- DECL_PRINTER(JSTypedArray)
- DECL_VERIFIER(JSTypedArray)
-
- static const int kLengthOffset = kViewSize;
- static const int kSize = kLengthOffset + kPointerSize;
-
- static const int kSizeWithEmbedderFields =
- kSize + v8::ArrayBufferView::kEmbedderFieldCount * kPointerSize;
-
- private:
- static Handle<JSArrayBuffer> MaterializeArrayBuffer(
- Handle<JSTypedArray> typed_array);
-#ifdef VERIFY_HEAP
- DECL_ACCESSORS(raw_length, Object)
-#endif
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSTypedArray);
-};
-
-class JSDataView : public JSArrayBufferView {
- public:
- DECL_CAST(JSDataView)
-
- // Dispatched behavior.
- DECL_PRINTER(JSDataView)
- DECL_VERIFIER(JSDataView)
-
- static const int kSize = kViewSize;
-
- static const int kSizeWithEmbedderFields =
- kSize + v8::ArrayBufferView::kEmbedderFieldCount * kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSDataView);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-collator-inl.h b/deps/v8/src/objects/js-collator-inl.h
new file mode 100644
index 0000000000..279a8bfd49
--- /dev/null
+++ b/deps/v8/src/objects/js-collator-inl.h
@@ -0,0 +1,43 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_COLLATOR_INL_H_
+#define V8_OBJECTS_JS_COLLATOR_INL_H_
+
+#include "src/objects-inl.h"
+#include "src/objects/js-collator.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+ACCESSORS(JSCollator, icu_collator, Managed<icu::Collator>, kICUCollatorOffset)
+ACCESSORS(JSCollator, bound_compare, Object, kBoundCompareOffset);
+SMI_ACCESSORS(JSCollator, flags, kFlagsOffset)
+
+inline void JSCollator::set_usage(Usage usage) {
+ DCHECK_LT(usage, Usage::COUNT);
+ int hints = flags();
+ hints = UsageBits::update(hints, usage);
+ set_flags(hints);
+}
+
+inline JSCollator::Usage JSCollator::usage() const {
+ return UsageBits::decode(flags());
+}
+
+CAST_ACCESSOR(JSCollator);
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_COLLATOR_INL_H_
diff --git a/deps/v8/src/objects/js-collator.cc b/deps/v8/src/objects/js-collator.cc
new file mode 100644
index 0000000000..c6cbecfb01
--- /dev/null
+++ b/deps/v8/src/objects/js-collator.cc
@@ -0,0 +1,541 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#include "src/objects/js-collator.h"
+
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects/js-collator-inl.h"
+#include "unicode/coll.h"
+#include "unicode/locid.h"
+#include "unicode/strenum.h"
+#include "unicode/ucol.h"
+#include "unicode/uloc.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// TODO(gsathya): Consider internalizing the value strings.
+void CreateDataPropertyForOptions(Isolate* isolate, Handle<JSObject> options,
+ Handle<String> key, const char* value) {
+ CHECK_NOT_NULL(value);
+ Handle<String> value_str =
+ isolate->factory()->NewStringFromAsciiChecked(value);
+
+ // This is a brand new JSObject that shouldn't already have the same
+ // key so this shouldn't fail.
+ CHECK(JSReceiver::CreateDataProperty(isolate, options, key, value_str,
+ kDontThrow)
+ .FromJust());
+}
+
+void CreateDataPropertyForOptions(Isolate* isolate, Handle<JSObject> options,
+ Handle<String> key, bool value) {
+ Handle<Object> value_obj = isolate->factory()->ToBoolean(value);
+
+ // This is a brand new JSObject that shouldn't already have the same
+ // key so this shouldn't fail.
+ CHECK(JSReceiver::CreateDataProperty(isolate, options, key, value_obj,
+ kDontThrow)
+ .FromJust());
+}
+
+} // anonymous namespace
+
+// static
+Handle<JSObject> JSCollator::ResolvedOptions(Isolate* isolate,
+ Handle<JSCollator> collator) {
+ Handle<JSObject> options =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ JSCollator::Usage usage = collator->usage();
+ CreateDataPropertyForOptions(isolate, options,
+ isolate->factory()->usage_string(),
+ JSCollator::UsageToString(usage));
+
+ icu::Collator* icu_collator = collator->icu_collator()->raw();
+ CHECK_NOT_NULL(icu_collator);
+
+ UErrorCode status = U_ZERO_ERROR;
+ bool numeric =
+ icu_collator->getAttribute(UCOL_NUMERIC_COLLATION, status) == UCOL_ON;
+ CHECK(U_SUCCESS(status));
+ CreateDataPropertyForOptions(isolate, options,
+ isolate->factory()->numeric_string(), numeric);
+
+ const char* case_first = nullptr;
+ status = U_ZERO_ERROR;
+ switch (icu_collator->getAttribute(UCOL_CASE_FIRST, status)) {
+ case UCOL_LOWER_FIRST:
+ case_first = "lower";
+ break;
+ case UCOL_UPPER_FIRST:
+ case_first = "upper";
+ break;
+ default:
+ case_first = "false";
+ }
+ CHECK(U_SUCCESS(status));
+ CreateDataPropertyForOptions(
+ isolate, options, isolate->factory()->caseFirst_string(), case_first);
+
+ const char* sensitivity = nullptr;
+ status = U_ZERO_ERROR;
+ switch (icu_collator->getAttribute(UCOL_STRENGTH, status)) {
+ case UCOL_PRIMARY: {
+ CHECK(U_SUCCESS(status));
+ status = U_ZERO_ERROR;
+ // case level: true + s1 -> case, s1 -> base.
+ if (UCOL_ON == icu_collator->getAttribute(UCOL_CASE_LEVEL, status)) {
+ sensitivity = "case";
+ } else {
+ sensitivity = "base";
+ }
+ CHECK(U_SUCCESS(status));
+ break;
+ }
+ case UCOL_SECONDARY:
+ sensitivity = "accent";
+ break;
+ case UCOL_TERTIARY:
+ sensitivity = "variant";
+ break;
+ case UCOL_QUATERNARY:
+ // We shouldn't get quaternary and identical from ICU, but if we do
+ // put them into variant.
+ sensitivity = "variant";
+ break;
+ default:
+ sensitivity = "variant";
+ }
+ CHECK(U_SUCCESS(status));
+ CreateDataPropertyForOptions(
+ isolate, options, isolate->factory()->sensitivity_string(), sensitivity);
+
+ status = U_ZERO_ERROR;
+ bool ignore_punctuation = icu_collator->getAttribute(UCOL_ALTERNATE_HANDLING,
+ status) == UCOL_SHIFTED;
+ CHECK(U_SUCCESS(status));
+ CreateDataPropertyForOptions(isolate, options,
+ isolate->factory()->ignorePunctuation_string(),
+ ignore_punctuation);
+
+ status = U_ZERO_ERROR;
+ const char* collation;
+ std::unique_ptr<icu::StringEnumeration> collation_values(
+ icu_collator->getKeywordValues("co", status));
+ // Collation wasn't provided as a keyword to icu, use default.
+ if (status == U_ILLEGAL_ARGUMENT_ERROR) {
+ CreateDataPropertyForOptions(
+ isolate, options, isolate->factory()->collation_string(), "default");
+ } else {
+ CHECK(U_SUCCESS(status));
+ CHECK_NOT_NULL(collation_values.get());
+
+ int32_t length;
+ status = U_ZERO_ERROR;
+ collation = collation_values->next(&length, status);
+ CHECK(U_SUCCESS(status));
+
+ // There has to be at least one value.
+ CHECK_NOT_NULL(collation);
+ CreateDataPropertyForOptions(
+ isolate, options, isolate->factory()->collation_string(), collation);
+
+ status = U_ZERO_ERROR;
+ collation_values->reset(status);
+ CHECK(U_SUCCESS(status));
+ }
+
+ status = U_ZERO_ERROR;
+ icu::Locale icu_locale = icu_collator->getLocale(ULOC_VALID_LOCALE, status);
+ CHECK(U_SUCCESS(status));
+
+ char result[ULOC_FULLNAME_CAPACITY];
+ status = U_ZERO_ERROR;
+ uloc_toLanguageTag(icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY,
+ FALSE, &status);
+ CHECK(U_SUCCESS(status));
+
+ CreateDataPropertyForOptions(isolate, options,
+ isolate->factory()->locale_string(), result);
+
+ return options;
+}
+
+namespace {
+
+std::map<std::string, std::string> LookupUnicodeExtensions(
+ const icu::Locale& icu_locale, const std::set<std::string>& relevant_keys) {
+ std::map<std::string, std::string> extensions;
+
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> keywords(
+ icu_locale.createKeywords(status));
+ if (U_FAILURE(status)) return extensions;
+
+ if (!keywords) return extensions;
+ char value[ULOC_FULLNAME_CAPACITY];
+
+ int32_t length;
+ status = U_ZERO_ERROR;
+ for (const char* keyword = keywords->next(&length, status);
+ keyword != nullptr; keyword = keywords->next(&length, status)) {
+ // Ignore failures in ICU and skip to the next keyword.
+ //
+ // This is fine.™
+ if (U_FAILURE(status)) {
+ status = U_ZERO_ERROR;
+ continue;
+ }
+
+ icu_locale.getKeywordValue(keyword, value, ULOC_FULLNAME_CAPACITY, status);
+
+ // Ignore failures in ICU and skip to the next keyword.
+ //
+ // This is fine.™
+ if (U_FAILURE(status)) {
+ status = U_ZERO_ERROR;
+ continue;
+ }
+
+ const char* bcp47_key = uloc_toUnicodeLocaleKey(keyword);
+
+ // Ignore keywords that we don't recognize - spec allows that.
+ if (bcp47_key && (relevant_keys.find(bcp47_key) != relevant_keys.end())) {
+ const char* bcp47_value = uloc_toUnicodeLocaleType(bcp47_key, value);
+ extensions.insert(
+ std::pair<std::string, std::string>(bcp47_key, bcp47_value));
+ }
+ }
+
+ return extensions;
+}
+
+void SetCaseFirstOption(icu::Collator* icu_collator, const char* value) {
+ CHECK_NOT_NULL(icu_collator);
+ CHECK_NOT_NULL(value);
+ UErrorCode status = U_ZERO_ERROR;
+ if (strcmp(value, "upper") == 0) {
+ icu_collator->setAttribute(UCOL_CASE_FIRST, UCOL_UPPER_FIRST, status);
+ } else if (strcmp(value, "lower") == 0) {
+ icu_collator->setAttribute(UCOL_CASE_FIRST, UCOL_LOWER_FIRST, status);
+ } else {
+ icu_collator->setAttribute(UCOL_CASE_FIRST, UCOL_OFF, status);
+ }
+ CHECK(U_SUCCESS(status));
+}
+
+} // anonymous namespace
+
+// static
+MaybeHandle<JSCollator> JSCollator::InitializeCollator(
+ Isolate* isolate, Handle<JSCollator> collator, Handle<Object> locales,
+ Handle<Object> options_obj) {
+ // 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
+ Handle<JSObject> requested_locales;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, requested_locales,
+ Intl::CanonicalizeLocaleListJS(isolate, locales),
+ JSCollator);
+
+ // 2. If options is undefined, then
+ if (options_obj->IsUndefined(isolate)) {
+ // 2. a. Let options be ObjectCreate(null).
+ options_obj = isolate->factory()->NewJSObjectWithNullProto();
+ } else {
+ // 3. Else
+ // 3. a. Let options be ? ToObject(options).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, options_obj,
+ Object::ToObject(isolate, options_obj, "Intl.Collator"), JSCollator);
+ }
+
+ // At this point, options_obj can either be a JSObject or a JSProxy only.
+ Handle<JSReceiver> options = Handle<JSReceiver>::cast(options_obj);
+
+ // 4. Let usage be ? GetOption(options, "usage", "string", « "sort",
+ // "search" », "sort").
+ std::vector<const char*> values = {"sort", "search"};
+ std::unique_ptr<char[]> usage_str = nullptr;
+ JSCollator::Usage usage = JSCollator::Usage::SORT;
+ Maybe<bool> found_usage = Intl::GetStringOption(
+ isolate, options, "usage", values, "Intl.Collator", &usage_str);
+ MAYBE_RETURN(found_usage, MaybeHandle<JSCollator>());
+
+ if (found_usage.FromJust()) {
+ DCHECK_NOT_NULL(usage_str.get());
+ if (strcmp(usage_str.get(), "search") == 0) {
+ usage = JSCollator::Usage::SEARCH;
+ }
+ }
+
+ // 5. Set collator.[[Usage]] to usage.
+ collator->set_usage(usage);
+
+ // 6. If usage is "sort", then
+ // a. Let localeData be %Collator%.[[SortLocaleData]].
+ // 7. Else,
+ // a. Let localeData be %Collator%.[[SearchLocaleData]].
+ //
+ // The above two spec operations aren't required, the Intl spec is
+ // crazy. See https://github.com/tc39/ecma402/issues/256
+
+ // TODO(gsathya): This is currently done as part of the
+ // Intl::ResolveLocale call below. Fix this once resolveLocale is
+ // changed to not do the lookup.
+ //
+ // 9. Let matcher be ? GetOption(options, "localeMatcher", "string",
+ // « "lookup", "best fit" », "best fit").
+ // 10. Set opt.[[localeMatcher]] to matcher.
+
+ // 11. Let numeric be ? GetOption(options, "numeric", "boolean",
+ // undefined, undefined).
+ // 12. If numeric is not undefined, then
+ // a. Let numeric be ! ToString(numeric).
+ //
+ // Note: We omit the ToString(numeric) operation as it's not
+ // observable. Intl::GetBoolOption returns a Boolean and
+ // ToString(Boolean) is not side-effecting.
+ //
+ // 13. Set opt.[[kn]] to numeric.
+ bool numeric;
+ Maybe<bool> found_numeric = Intl::GetBoolOption(isolate, options, "numeric",
+ "Intl.Collator", &numeric);
+ MAYBE_RETURN(found_numeric, MaybeHandle<JSCollator>());
+
+ // 14. Let caseFirst be ? GetOption(options, "caseFirst", "string",
+ // « "upper", "lower", "false" », undefined).
+ // 15. Set opt.[[kf]] to caseFirst.
+ values = {"upper", "lower", "false"};
+ std::unique_ptr<char[]> case_first_str = nullptr;
+ Maybe<bool> found_case_first = Intl::GetStringOption(
+ isolate, options, "caseFirst", values, "Intl.Collator", &case_first_str);
+ MAYBE_RETURN(found_case_first, MaybeHandle<JSCollator>());
+
+ // The relevant unicode extensions accepted by Collator as specified here:
+ // https://tc39.github.io/ecma402/#sec-intl-collator-internal-slots
+ //
+ // 16. Let relevantExtensionKeys be %Collator%.[[RelevantExtensionKeys]].
+ std::set<std::string> relevant_extension_keys{"co", "kn", "kf"};
+
+ // We don't pass the relevant_extension_keys to ResolveLocale here
+ // as per the spec.
+ //
+ // In ResolveLocale, the spec makes sure we only pick and use the
+ // relevant extension keys and ignore any other keys. Also, in
+ // ResolveLocale, the spec makes sure that if a given key has both a
+ // value in the options object and an unicode extension value, then
+ // we pick the value provided in the options object.
+ // For example: in the case of `new Intl.Collator('en-u-kn-true', {
+ // numeric: false })` the value `false` is used for the `numeric`
+ // key.
+ //
+ // Instead of performing all this validation in ResolveLocale, we
+ // just perform it inline below. In the future when we port
+ // ResolveLocale to C++, we can make all these validations generic
+ // and move it ResolveLocale.
+ //
+ // 17. Let r be ResolveLocale(%Collator%.[[AvailableLocales]],
+ // requestedLocales, opt, %Collator%.[[RelevantExtensionKeys]],
+ // localeData).
+ // 18. Set collator.[[Locale]] to r.[[locale]].
+ Handle<JSObject> r;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, r,
+ Intl::ResolveLocale(isolate, "collator", requested_locales, options),
+ JSCollator);
+
+ Handle<String> locale_with_extension_str =
+ isolate->factory()->NewStringFromStaticChars("localeWithExtension");
+ Handle<Object> locale_with_extension_obj =
+ JSObject::GetDataProperty(r, locale_with_extension_str);
+
+ // The locale_with_extension has to be a string. Either a user
+ // provided canonicalized string or the default locale.
+ CHECK(locale_with_extension_obj->IsString());
+ Handle<String> locale_with_extension =
+ Handle<String>::cast(locale_with_extension_obj);
+
+ icu::Locale icu_locale =
+ Intl::CreateICULocale(isolate, locale_with_extension);
+ DCHECK(!icu_locale.isBogus());
+
+ std::map<std::string, std::string> extensions =
+ LookupUnicodeExtensions(icu_locale, relevant_extension_keys);
+
+ // 19. Let collation be r.[[co]].
+ //
+ // r.[[co]] is already set as part of the icu::Locale creation as
+ // icu parses unicode extensions and sets the keywords.
+ //
+ // We need to sanitize the keywords based on certain ECMAScript rules.
+ //
+ // As per https://tc39.github.io/ecma402/#sec-intl-collator-internal-slots:
+ // The values "standard" and "search" must not be used as elements
+ // in any [[SortLocaleData]][locale].co and
+ // [[SearchLocaleData]][locale].co list.
+ auto co_extension_it = extensions.find("co");
+ if (co_extension_it != extensions.end()) {
+ const std::string& value = co_extension_it->second;
+ if ((value == "search") || (value == "standard")) {
+ UErrorCode status = U_ZERO_ERROR;
+ icu_locale.setKeywordValue("co", NULL, status);
+ CHECK(U_SUCCESS(status));
+ }
+ }
+
+ // 20. If collation is null, let collation be "default".
+ // 21. Set collator.[[Collation]] to collation.
+ //
+ // We don't store the collation value as per the above two steps
+ // here. The collation value can be looked up from icu::Collator on
+ // demand, as part of Intl.Collator.prototype.resolvedOptions.
+
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::Collator> icu_collator(
+ icu::Collator::createInstance(icu_locale, status));
+ if (U_FAILURE(status) || icu_collator.get() == nullptr) {
+ status = U_ZERO_ERROR;
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
+ icu_collator.reset(
+ icu::Collator::createInstance(no_extension_locale, status));
+
+ if (U_FAILURE(status) || icu_collator.get() == nullptr) {
+ FATAL("Failed to create ICU collator, are ICU data files missing?");
+ }
+ }
+ DCHECK(U_SUCCESS(status));
+ CHECK_NOT_NULL(icu_collator.get());
+
+ // 22. If relevantExtensionKeys contains "kn", then
+ // a. Set collator.[[Numeric]] to ! SameValue(r.[[kn]], "true").
+ //
+ // If the numeric value is passed in through the options object,
+ // then we use it. Otherwise, we check if the numeric value is
+ // passed in through the unicode extensions.
+ status = U_ZERO_ERROR;
+ if (found_numeric.FromJust()) {
+ icu_collator->setAttribute(UCOL_NUMERIC_COLLATION,
+ numeric ? UCOL_ON : UCOL_OFF, status);
+ CHECK(U_SUCCESS(status));
+ } else {
+ auto kn_extension_it = extensions.find("kn");
+ if (kn_extension_it != extensions.end()) {
+ const std::string& value = kn_extension_it->second;
+
+ numeric = (value == "true");
+
+ icu_collator->setAttribute(UCOL_NUMERIC_COLLATION,
+ numeric ? UCOL_ON : UCOL_OFF, status);
+ CHECK(U_SUCCESS(status));
+ }
+ }
+
+ // 23. If relevantExtensionKeys contains "kf", then
+ // a. Set collator.[[CaseFirst]] to r.[[kf]].
+ //
+ // If the caseFirst value is passed in through the options object,
+ // then we use it. Otherwise, we check if the caseFirst value is
+ // passed in through the unicode extensions.
+ if (found_case_first.FromJust()) {
+ const char* case_first_cstr = case_first_str.get();
+ SetCaseFirstOption(icu_collator.get(), case_first_cstr);
+ } else {
+ auto kf_extension_it = extensions.find("kf");
+ if (kf_extension_it != extensions.end()) {
+ const std::string& value = kf_extension_it->second;
+ SetCaseFirstOption(icu_collator.get(), value.c_str());
+ }
+ }
+
+ // Normalization is always on, by the spec. We are free to optimize
+ // if the strings are already normalized (but we don't have a way to tell
+ // that right now).
+ status = U_ZERO_ERROR;
+ icu_collator->setAttribute(UCOL_NORMALIZATION_MODE, UCOL_ON, status);
+ CHECK(U_SUCCESS(status));
+
+ // 24. Let sensitivity be ? GetOption(options, "sensitivity",
+ // "string", « "base", "accent", "case", "variant" », undefined).
+ values = {"base", "accent", "case", "variant"};
+ std::unique_ptr<char[]> sensitivity_str = nullptr;
+ Maybe<bool> found_sensitivity =
+ Intl::GetStringOption(isolate, options, "sensitivity", values,
+ "Intl.Collator", &sensitivity_str);
+ MAYBE_RETURN(found_sensitivity, MaybeHandle<JSCollator>());
+
+ // 25. If sensitivity is undefined, then
+ if (!found_sensitivity.FromJust()) {
+ // 25. a. If usage is "sort", then
+ if (usage == Usage::SORT) {
+ // 25. a. i. Let sensitivity be "variant".
+ // 26. Set collator.[[Sensitivity]] to sensitivity.
+ icu_collator->setStrength(icu::Collator::TERTIARY);
+ }
+ } else {
+ DCHECK(found_sensitivity.FromJust());
+ const char* sensitivity_cstr = sensitivity_str.get();
+ DCHECK_NOT_NULL(sensitivity_cstr);
+
+ // 26. Set collator.[[Sensitivity]] to sensitivity.
+ if (strcmp(sensitivity_cstr, "base") == 0) {
+ icu_collator->setStrength(icu::Collator::PRIMARY);
+ } else if (strcmp(sensitivity_cstr, "accent") == 0) {
+ icu_collator->setStrength(icu::Collator::SECONDARY);
+ } else if (strcmp(sensitivity_cstr, "case") == 0) {
+ icu_collator->setStrength(icu::Collator::PRIMARY);
+ status = U_ZERO_ERROR;
+ icu_collator->setAttribute(UCOL_CASE_LEVEL, UCOL_ON, status);
+ CHECK(U_SUCCESS(status));
+ } else {
+ DCHECK_EQ(0, strcmp(sensitivity_cstr, "variant"));
+ icu_collator->setStrength(icu::Collator::TERTIARY);
+ }
+ }
+
+ // 27.Let ignorePunctuation be ? GetOption(options,
+ // "ignorePunctuation", "boolean", undefined, false).
+ bool ignore_punctuation;
+ Maybe<bool> found_ignore_punctuation =
+ Intl::GetBoolOption(isolate, options, "ignorePunctuation",
+ "Intl.Collator", &ignore_punctuation);
+ MAYBE_RETURN(found_ignore_punctuation, MaybeHandle<JSCollator>());
+
+ // 28. Set collator.[[IgnorePunctuation]] to ignorePunctuation.
+ if (found_ignore_punctuation.FromJust() && ignore_punctuation) {
+ status = U_ZERO_ERROR;
+ icu_collator->setAttribute(UCOL_ALTERNATE_HANDLING, UCOL_SHIFTED, status);
+ CHECK(U_SUCCESS(status));
+ }
+
+ Handle<Managed<icu::Collator>> managed_collator =
+ Managed<icu::Collator>::FromUniquePtr(isolate, 0,
+ std::move(icu_collator));
+ collator->set_icu_collator(*managed_collator);
+
+ // 29. Return collator.
+ return collator;
+}
+
+// static
+const char* JSCollator::UsageToString(Usage usage) {
+ switch (usage) {
+ case Usage::SORT:
+ return "sort";
+ case Usage::SEARCH:
+ return "search";
+ case Usage::COUNT:
+ UNREACHABLE();
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/js-collator.h b/deps/v8/src/objects/js-collator.h
new file mode 100644
index 0000000000..b2751a446e
--- /dev/null
+++ b/deps/v8/src/objects/js-collator.h
@@ -0,0 +1,92 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_COLLATOR_H_
+#define V8_OBJECTS_JS_COLLATOR_H_
+
+#include "src/heap/factory.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/objects/intl-objects.h"
+#include "src/objects/managed.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class JSCollator : public JSObject {
+ public:
+ // ecma402/#sec-initializecollator
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSCollator> InitializeCollator(
+ Isolate* isolate, Handle<JSCollator> collator, Handle<Object> locales,
+ Handle<Object> options);
+
+ // ecma402/#sec-intl.collator.prototype.resolvedoptions
+ static Handle<JSObject> ResolvedOptions(Isolate* isolate,
+ Handle<JSCollator> collator);
+
+ DECL_CAST(JSCollator)
+ DECL_PRINTER(JSCollator)
+ DECL_VERIFIER(JSCollator)
+
+ // [[Usage]] is one of the values "sort" or "search", identifying
+ // the collator usage.
+ enum class Usage {
+ SORT,
+ SEARCH,
+
+ COUNT
+ };
+ inline void set_usage(Usage usage);
+ inline Usage usage() const;
+ static const char* UsageToString(Usage usage);
+
+// Layout description.
+#define JS_COLLATOR_FIELDS(V) \
+ V(kICUCollatorOffset, kPointerSize) \
+ V(kFlagsOffset, kPointerSize) \
+ V(kBoundCompareOffset, kPointerSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_COLLATOR_FIELDS)
+#undef JS_COLLATOR_FIELDS
+
+ // ContextSlot defines the context structure for the bound
+ // Collator.prototype.compare function.
+ enum ContextSlot {
+ // The collator instance that the function holding this context is bound to.
+ kCollator = Context::MIN_CONTEXT_SLOTS,
+ kLength
+ };
+
+// Bit positions in |flags|.
+#define FLAGS_BIT_FIELDS(V, _) V(UsageBits, Usage, 1, _)
+
+ DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
+#undef FLAGS_BIT_FIELDS
+
+ STATIC_ASSERT(Usage::SORT <= UsageBits::kMax);
+ STATIC_ASSERT(Usage::SEARCH <= UsageBits::kMax);
+
+ DECL_ACCESSORS(icu_collator, Managed<icu::Collator>)
+ DECL_ACCESSORS(bound_compare, Object);
+ DECL_INT_ACCESSORS(flags)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSCollator);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_COLLATOR_H_
diff --git a/deps/v8/src/objects/js-collection-inl.h b/deps/v8/src/objects/js-collection-inl.h
index 12bba94eaa..c50e803429 100644
--- a/deps/v8/src/objects/js-collection-inl.h
+++ b/deps/v8/src/objects/js-collection-inl.h
@@ -7,6 +7,8 @@
#include "src/objects/js-collection.h"
+#include "src/objects-inl.h" // Needed for write barriers
+
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-collection.h b/deps/v8/src/objects/js-collection.h
index 5b9fcf1c29..47bb7a9c2a 100644
--- a/deps/v8/src/objects/js-collection.h
+++ b/deps/v8/src/objects/js-collection.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_JS_COLLECTION_H_
#include "src/objects.h"
+#include "src/objects/ordered-hash-table.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-generator-inl.h b/deps/v8/src/objects/js-generator-inl.h
new file mode 100644
index 0000000000..7eb372cb03
--- /dev/null
+++ b/deps/v8/src/objects/js-generator-inl.h
@@ -0,0 +1,52 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_GENERATOR_INL_H_
+#define V8_OBJECTS_JS_GENERATOR_INL_H_
+
+#include "src/objects/js-generator.h"
+
+#include "src/objects-inl.h" // Needed for write barriers
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(JSGeneratorObject)
+CAST_ACCESSOR(JSAsyncGeneratorObject)
+
+ACCESSORS(JSGeneratorObject, function, JSFunction, kFunctionOffset)
+ACCESSORS(JSGeneratorObject, context, Context, kContextOffset)
+ACCESSORS(JSGeneratorObject, receiver, Object, kReceiverOffset)
+ACCESSORS(JSGeneratorObject, input_or_debug_pos, Object, kInputOrDebugPosOffset)
+SMI_ACCESSORS(JSGeneratorObject, resume_mode, kResumeModeOffset)
+SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
+ACCESSORS(JSGeneratorObject, parameters_and_registers, FixedArray,
+ kParametersAndRegistersOffset)
+
+bool JSGeneratorObject::is_suspended() const {
+ DCHECK_LT(kGeneratorExecuting, 0);
+ DCHECK_LT(kGeneratorClosed, 0);
+ return continuation() >= 0;
+}
+
+bool JSGeneratorObject::is_closed() const {
+ return continuation() == kGeneratorClosed;
+}
+
+bool JSGeneratorObject::is_executing() const {
+ return continuation() == kGeneratorExecuting;
+}
+
+ACCESSORS(JSAsyncGeneratorObject, queue, HeapObject, kQueueOffset)
+SMI_ACCESSORS(JSAsyncGeneratorObject, is_awaiting, kIsAwaitingOffset)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_GENERATOR_INL_H_
diff --git a/deps/v8/src/objects/js-generator.h b/deps/v8/src/objects/js-generator.h
new file mode 100644
index 0000000000..4d63d524ea
--- /dev/null
+++ b/deps/v8/src/objects/js-generator.h
@@ -0,0 +1,111 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_GENERATOR_H_
+#define V8_OBJECTS_JS_GENERATOR_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class JSGeneratorObject : public JSObject {
+ public:
+ // [function]: The function corresponding to this generator object.
+ DECL_ACCESSORS(function, JSFunction)
+
+ // [context]: The context of the suspended computation.
+ DECL_ACCESSORS(context, Context)
+
+ // [receiver]: The receiver of the suspended computation.
+ DECL_ACCESSORS(receiver, Object)
+
+ // [input_or_debug_pos]
+ // For executing generators: the most recent input value.
+ // For suspended generators: debug information (bytecode offset).
+ // There is currently no need to remember the most recent input value for a
+ // suspended generator.
+ DECL_ACCESSORS(input_or_debug_pos, Object)
+
+ // [resume_mode]: The most recent resume mode.
+ enum ResumeMode { kNext, kReturn, kThrow };
+ DECL_INT_ACCESSORS(resume_mode)
+
+ // [continuation]
+ //
+ // A positive value indicates a suspended generator. The special
+ // kGeneratorExecuting and kGeneratorClosed values indicate that a generator
+ // cannot be resumed.
+ inline int continuation() const;
+ inline void set_continuation(int continuation);
+ inline bool is_closed() const;
+ inline bool is_executing() const;
+ inline bool is_suspended() const;
+
+ // For suspended generators: the source position at which the generator
+ // is suspended.
+ int source_position() const;
+
+ // [parameters_and_registers]: Saved interpreter register file.
+ DECL_ACCESSORS(parameters_and_registers, FixedArray)
+
+ DECL_CAST(JSGeneratorObject)
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSGeneratorObject)
+ DECL_VERIFIER(JSGeneratorObject)
+
+ // Magic sentinel values for the continuation.
+ static const int kGeneratorExecuting = -2;
+ static const int kGeneratorClosed = -1;
+
+ // Layout description.
+ static const int kFunctionOffset = JSObject::kHeaderSize;
+ static const int kContextOffset = kFunctionOffset + kPointerSize;
+ static const int kReceiverOffset = kContextOffset + kPointerSize;
+ static const int kInputOrDebugPosOffset = kReceiverOffset + kPointerSize;
+ static const int kResumeModeOffset = kInputOrDebugPosOffset + kPointerSize;
+ static const int kContinuationOffset = kResumeModeOffset + kPointerSize;
+ static const int kParametersAndRegistersOffset =
+ kContinuationOffset + kPointerSize;
+ static const int kSize = kParametersAndRegistersOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
+};
+
+class JSAsyncGeneratorObject : public JSGeneratorObject {
+ public:
+ DECL_CAST(JSAsyncGeneratorObject)
+
+ // Dispatched behavior.
+ DECL_VERIFIER(JSAsyncGeneratorObject)
+
+ // [queue]
+ // Pointer to the head of a singly linked list of AsyncGeneratorRequest, or
+ // undefined.
+ DECL_ACCESSORS(queue, HeapObject)
+
+ // [is_awaiting]
+ // Whether or not the generator is currently awaiting.
+ DECL_INT_ACCESSORS(is_awaiting)
+
+ // Layout description.
+ static const int kQueueOffset = JSGeneratorObject::kSize;
+ static const int kIsAwaitingOffset = kQueueOffset + kPointerSize;
+ static const int kSize = kIsAwaitingOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSAsyncGeneratorObject);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_GENERATOR_H_
diff --git a/deps/v8/src/objects/js-list-format-inl.h b/deps/v8/src/objects/js-list-format-inl.h
new file mode 100644
index 0000000000..554b3488b6
--- /dev/null
+++ b/deps/v8/src/objects/js-list-format-inl.h
@@ -0,0 +1,55 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_LIST_FORMAT_INL_H_
+#define V8_OBJECTS_JS_LIST_FORMAT_INL_H_
+
+#include "src/objects-inl.h"
+#include "src/objects/js-list-format.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Base list format accessors.
+ACCESSORS(JSListFormat, locale, String, kLocaleOffset)
+ACCESSORS(JSListFormat, formatter, Foreign, kFormatterOffset)
+SMI_ACCESSORS(JSListFormat, flags, kFlagsOffset)
+
+inline void JSListFormat::set_style(Style style) {
+ DCHECK_GT(Style::COUNT, style);
+ int hints = flags();
+ hints = StyleBits::update(hints, style);
+ set_flags(hints);
+}
+
+inline JSListFormat::Style JSListFormat::style() const {
+ return StyleBits::decode(flags());
+}
+
+inline void JSListFormat::set_type(Type type) {
+ DCHECK_GT(Type::COUNT, type);
+ int hints = flags();
+ hints = TypeBits::update(hints, type);
+ set_flags(hints);
+}
+
+inline JSListFormat::Type JSListFormat::type() const {
+ return TypeBits::decode(flags());
+}
+
+CAST_ACCESSOR(JSListFormat);
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_LIST_FORMAT_INL_H_
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
new file mode 100644
index 0000000000..66dbe0bfd9
--- /dev/null
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -0,0 +1,401 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#include "src/objects/js-list-format.h"
+
+#include <memory>
+#include <vector>
+
+#include "src/elements.h"
+#include "src/heap/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects/intl-objects.h"
+#include "src/objects/js-array-inl.h"
+#include "src/objects/js-list-format-inl.h"
+#include "src/objects/managed.h"
+#include "unicode/listformatter.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+const char* kStandard = "standard";
+const char* kOr = "or";
+const char* kUnit = "unit";
+const char* kStandardShort = "standard-short";
+const char* kUnitShort = "unit-short";
+const char* kUnitNarrow = "unit-narrow";
+
+const char* GetIcuStyleString(JSListFormat::Style style,
+ JSListFormat::Type type) {
+ switch (type) {
+ case JSListFormat::Type::CONJUNCTION:
+ switch (style) {
+ case JSListFormat::Style::LONG:
+ return kStandard;
+ case JSListFormat::Style::SHORT:
+ return kStandardShort;
+ case JSListFormat::Style::NARROW:
+ // Currently, ListFormat::createInstance on "standard-narrow" will
+ // fail so we use "standard-short" here.
+ // See https://unicode.org/cldr/trac/ticket/11254
+ // TODO(ftang): change to return kStandardNarrow; after the above
+ // issue fixed in CLDR/ICU.
+ // CLDR bug: https://unicode.org/cldr/trac/ticket/11254
+ // ICU bug: https://unicode-org.atlassian.net/browse/ICU-20014
+ return kStandardShort;
+ case JSListFormat::Style::COUNT:
+ UNREACHABLE();
+ }
+ case JSListFormat::Type::DISJUNCTION:
+ switch (style) {
+ // Currently, ListFormat::createInstance on "or-short" and "or-narrow"
+ // will fail so we use "or" here.
+ // See https://unicode.org/cldr/trac/ticket/11254
+ // TODO(ftang): change to return kOr, kOrShort or kOrNarrow depend on
+ // style after the above issue fixed in CLDR/ICU.
+ // CLDR bug: https://unicode.org/cldr/trac/ticket/11254
+ // ICU bug: https://unicode-org.atlassian.net/browse/ICU-20014
+ case JSListFormat::Style::LONG:
+ case JSListFormat::Style::SHORT:
+ case JSListFormat::Style::NARROW:
+ return kOr;
+ case JSListFormat::Style::COUNT:
+ UNREACHABLE();
+ }
+ case JSListFormat::Type::UNIT:
+ switch (style) {
+ case JSListFormat::Style::LONG:
+ return kUnit;
+ case JSListFormat::Style::SHORT:
+ return kUnitShort;
+ case JSListFormat::Style::NARROW:
+ return kUnitNarrow;
+ case JSListFormat::Style::COUNT:
+ UNREACHABLE();
+ }
+ case JSListFormat::Type::COUNT:
+ UNREACHABLE();
+ }
+}
+
+} // namespace
+
+JSListFormat::Style get_style(const char* str) {
+ switch (str[0]) {
+ case 'n':
+ if (strcmp(&str[1], "arrow") == 0) return JSListFormat::Style::NARROW;
+ break;
+ case 'l':
+ if (strcmp(&str[1], "ong") == 0) return JSListFormat::Style::LONG;
+ break;
+ case 's':
+ if (strcmp(&str[1], "hort") == 0) return JSListFormat::Style::SHORT;
+ break;
+ }
+ UNREACHABLE();
+}
+
+JSListFormat::Type get_type(const char* str) {
+ switch (str[0]) {
+ case 'c':
+ if (strcmp(&str[1], "onjunction") == 0)
+ return JSListFormat::Type::CONJUNCTION;
+ break;
+ case 'd':
+ if (strcmp(&str[1], "isjunction") == 0)
+ return JSListFormat::Type::DISJUNCTION;
+ break;
+ case 'u':
+ if (strcmp(&str[1], "nit") == 0) return JSListFormat::Type::UNIT;
+ break;
+ }
+ UNREACHABLE();
+}
+
+MaybeHandle<JSListFormat> JSListFormat::InitializeListFormat(
+ Isolate* isolate, Handle<JSListFormat> list_format_holder,
+ Handle<Object> input_locales, Handle<Object> input_options) {
+ Factory* factory = isolate->factory();
+ list_format_holder->set_flags(0);
+
+ Handle<JSReceiver> options;
+ // 2. If options is undefined, then
+ if (input_options->IsUndefined(isolate)) {
+ // a. Let options be ObjectCreate(null).
+ options = isolate->factory()->NewJSObjectWithNullProto();
+ // 3. Else
+ } else {
+ // a. Let options be ? ToObject(options).
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, options,
+ Object::ToObject(isolate, input_options),
+ JSListFormat);
+ }
+
+ // 5. Let t be GetOption(options, "type", "string", «"conjunction",
+ // "disjunction", "unit"», "conjunction").
+ std::unique_ptr<char[]> type_str = nullptr;
+ std::vector<const char*> type_values = {"conjunction", "disjunction", "unit"};
+ Maybe<bool> maybe_found_type = Intl::GetStringOption(
+ isolate, options, "type", type_values, "Intl.ListFormat", &type_str);
+ Type type_enum = Type::CONJUNCTION;
+ MAYBE_RETURN(maybe_found_type, MaybeHandle<JSListFormat>());
+ if (maybe_found_type.FromJust()) {
+ DCHECK_NOT_NULL(type_str.get());
+ type_enum = get_type(type_str.get());
+ }
+ // 6. Set listFormat.[[Type]] to t.
+ list_format_holder->set_type(type_enum);
+
+ // 7. Let s be ? GetOption(options, "style", "string",
+ // «"long", "short", "narrow"», "long").
+ std::unique_ptr<char[]> style_str = nullptr;
+ std::vector<const char*> style_values = {"long", "short", "narrow"};
+ Maybe<bool> maybe_found_style = Intl::GetStringOption(
+ isolate, options, "style", style_values, "Intl.ListFormat", &style_str);
+ Style style_enum = Style::LONG;
+ MAYBE_RETURN(maybe_found_style, MaybeHandle<JSListFormat>());
+ if (maybe_found_style.FromJust()) {
+ DCHECK_NOT_NULL(style_str.get());
+ style_enum = get_style(style_str.get());
+ }
+ // 15. Set listFormat.[[Style]] to s.
+ list_format_holder->set_style(style_enum);
+
+ // 10. Let r be ResolveLocale(%ListFormat%.[[AvailableLocales]],
+ // requestedLocales, opt, undefined, localeData).
+ Handle<JSObject> r;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, r,
+ Intl::ResolveLocale(isolate, "listformat", input_locales, options),
+ JSListFormat);
+
+ Handle<Object> locale_obj =
+ JSObject::GetDataProperty(r, factory->locale_string());
+ Handle<String> locale;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, locale, Object::ToString(isolate, locale_obj), JSListFormat);
+
+ // 18. Set listFormat.[[Locale]] to the value of r.[[Locale]].
+ list_format_holder->set_locale(*locale);
+
+ std::unique_ptr<char[]> locale_name = locale->ToCString();
+ icu::Locale icu_locale(locale_name.get());
+ UErrorCode status = U_ZERO_ERROR;
+ icu::ListFormatter* formatter = icu::ListFormatter::createInstance(
+ icu_locale, GetIcuStyleString(style_enum, type_enum), status);
+ if (U_FAILURE(status)) {
+ delete formatter;
+ FATAL("Failed to create ICU list formatter, are ICU data files missing?");
+ }
+ CHECK_NOT_NULL(formatter);
+
+ Handle<Managed<icu::ListFormatter>> managed_formatter =
+ Managed<icu::ListFormatter>::FromRawPtr(isolate, 0, formatter);
+
+ list_format_holder->set_formatter(*managed_formatter);
+ return list_format_holder;
+}
+
+Handle<JSObject> JSListFormat::ResolvedOptions(
+ Isolate* isolate, Handle<JSListFormat> format_holder) {
+ Factory* factory = isolate->factory();
+ Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
+ Handle<String> locale(format_holder->locale(), isolate);
+ JSObject::AddProperty(isolate, result, factory->locale_string(), locale,
+ NONE);
+ JSObject::AddProperty(isolate, result, factory->style_string(),
+ format_holder->StyleAsString(), NONE);
+ JSObject::AddProperty(isolate, result, factory->type_string(),
+ format_holder->TypeAsString(), NONE);
+ return result;
+}
+
+icu::ListFormatter* JSListFormat::UnpackFormatter(Isolate* isolate,
+ Handle<JSListFormat> holder) {
+ return Managed<icu::ListFormatter>::cast(holder->formatter())->raw();
+}
+
+Handle<String> JSListFormat::StyleAsString() const {
+ switch (style()) {
+ case Style::LONG:
+ return GetReadOnlyRoots().long_string_handle();
+ case Style::SHORT:
+ return GetReadOnlyRoots().short_string_handle();
+ case Style::NARROW:
+ return GetReadOnlyRoots().narrow_string_handle();
+ case Style::COUNT:
+ UNREACHABLE();
+ }
+}
+
+Handle<String> JSListFormat::TypeAsString() const {
+ switch (type()) {
+ case Type::CONJUNCTION:
+ return GetReadOnlyRoots().conjunction_string_handle();
+ case Type::DISJUNCTION:
+ return GetReadOnlyRoots().disjunction_string_handle();
+ case Type::UNIT:
+ return GetReadOnlyRoots().unit_string_handle();
+ case Type::COUNT:
+ UNREACHABLE();
+ }
+}
+
+namespace {
+
+// TODO(ftang) remove the following hack after icu::ListFormat support
+// FieldPosition.
+// This is a temporary workaround until icu::ListFormat support FieldPosition
+// It is inefficient and won't work correctly on the edge case that the input
+// contains fraction of the list pattern.
+// For example the following under English will mark the "an" incorrectly
+// since the formatted is "a, b, and an".
+// listFormat.formatToParts(["a", "b", "an"])
+// https://ssl.icu-project.org/trac/ticket/13754
+MaybeHandle<JSArray> GenerateListFormatParts(
+ Isolate* isolate, const icu::UnicodeString& formatted,
+ const icu::UnicodeString items[], int length) {
+ Factory* factory = isolate->factory();
+ int estimate_size = length * 2 + 1;
+ Handle<JSArray> array = factory->NewJSArray(estimate_size);
+ int index = 0;
+ int last_pos = 0;
+ for (int i = 0; i < length; i++) {
+ int found = formatted.indexOf(items[i], last_pos);
+ DCHECK_GE(found, 0);
+ if (found > last_pos) {
+ Handle<String> substring;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, substring,
+ Intl::ToString(isolate, formatted, last_pos, found), JSArray);
+ Intl::AddElement(isolate, array, index++, factory->literal_string(),
+ substring);
+ }
+ last_pos = found + items[i].length();
+ Handle<String> substring;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, substring, Intl::ToString(isolate, formatted, found, last_pos),
+ JSArray);
+ Intl::AddElement(isolate, array, index++, factory->element_string(),
+ substring);
+ }
+ if (last_pos < formatted.length()) {
+ Handle<String> substring;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, substring,
+ Intl::ToString(isolate, formatted, last_pos, formatted.length()),
+ JSArray);
+ Intl::AddElement(isolate, array, index++, factory->literal_string(),
+ substring);
+ }
+ return array;
+}
+
+// Extract String from JSArray into array of UnicodeString
+Maybe<bool> ToUnicodeStringArray(Isolate* isolate, Handle<JSArray> array,
+ icu::UnicodeString items[], uint32_t length) {
+ Factory* factory = isolate->factory();
+ // In general, ElementsAccessor::Get actually isn't guaranteed to give us the
+ // elements in order. But given that it was created by a builtin we control,
+ // it shouldn't be possible for it to be problematic. Add DCHECK to ensure
+ // that.
+ DCHECK(array->HasFastPackedElements());
+ auto* accessor = array->GetElementsAccessor();
+ DCHECK(length == accessor->NumberOfElements(*array));
+ // ecma402 #sec-createpartsfromlist
+ // 2. If list contains any element value such that Type(value) is not String,
+ // throw a TypeError exception.
+ //
+ // Per spec it looks like we're supposed to throw a TypeError exception if the
+ // item isn't already a string, rather than coercing to a string. Moreover,
+ // the way the spec's written it looks like we're supposed to run through the
+ // whole list to check that they're all strings before going further.
+ for (uint32_t i = 0; i < length; i++) {
+ Handle<Object> item = accessor->Get(array, i);
+ DCHECK(!item.is_null());
+ if (!item->IsString()) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewTypeError(MessageTemplate::kArrayItemNotType,
+ factory->NewStringFromStaticChars("list"),
+ factory->NewNumber(i),
+ factory->NewStringFromStaticChars("String")),
+ Nothing<bool>());
+ }
+ }
+ for (uint32_t i = 0; i < length; i++) {
+ Handle<String> string = Handle<String>::cast(accessor->Get(array, i));
+ DisallowHeapAllocation no_gc;
+ string = String::Flatten(isolate, string);
+ std::unique_ptr<uc16[]> sap;
+ items[i] =
+ icu::UnicodeString(GetUCharBufferFromFlat(string->GetFlatContent(),
+ &sap, string->length()),
+ string->length());
+ }
+ return Just(true);
+}
+
+} // namespace
+
+Maybe<bool> FormatListCommon(Isolate* isolate,
+ Handle<JSListFormat> format_holder,
+ Handle<JSArray> list,
+ icu::UnicodeString& formatted, uint32_t* length,
+ std::unique_ptr<icu::UnicodeString[]>& array) {
+ DCHECK(!list->IsUndefined());
+
+ icu::ListFormatter* formatter =
+ JSListFormat::UnpackFormatter(isolate, format_holder);
+ CHECK_NOT_NULL(formatter);
+
+ *length = list->GetElementsAccessor()->NumberOfElements(*list);
+ array.reset(new icu::UnicodeString[*length]);
+
+ // ecma402 #sec-createpartsfromlist
+ // 2. If list contains any element value such that Type(value) is not String,
+ // throw a TypeError exception.
+ MAYBE_RETURN(ToUnicodeStringArray(isolate, list, array.get(), *length),
+ Nothing<bool>());
+
+ UErrorCode status = U_ZERO_ERROR;
+ formatter->format(array.get(), *length, formatted, status);
+ DCHECK(U_SUCCESS(status));
+ return Just(true);
+}
+
+// ecma402 #sec-formatlist
+MaybeHandle<String> JSListFormat::FormatList(Isolate* isolate,
+ Handle<JSListFormat> format_holder,
+ Handle<JSArray> list) {
+ icu::UnicodeString formatted;
+ uint32_t length;
+ std::unique_ptr<icu::UnicodeString[]> array;
+ MAYBE_RETURN(
+ FormatListCommon(isolate, format_holder, list, formatted, &length, array),
+ Handle<String>());
+ return Intl::ToString(isolate, formatted);
+}
+
+// ecma42 #sec-formatlisttoparts
+MaybeHandle<JSArray> JSListFormat::FormatListToParts(
+ Isolate* isolate, Handle<JSListFormat> format_holder,
+ Handle<JSArray> list) {
+ icu::UnicodeString formatted;
+ uint32_t length;
+ std::unique_ptr<icu::UnicodeString[]> array;
+ MAYBE_RETURN(
+ FormatListCommon(isolate, format_holder, list, formatted, &length, array),
+ Handle<JSArray>());
+ return GenerateListFormatParts(isolate, formatted, array.get(), length);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/js-list-format.h b/deps/v8/src/objects/js-list-format.h
new file mode 100644
index 0000000000..22f8d20005
--- /dev/null
+++ b/deps/v8/src/objects/js-list-format.h
@@ -0,0 +1,121 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_LIST_FORMAT_H_
+#define V8_OBJECTS_JS_LIST_FORMAT_H_
+
+#include "src/heap/factory.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "unicode/uversion.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace U_ICU_NAMESPACE {
+class ListFormatter;
+}
+
+namespace v8 {
+namespace internal {
+
+class JSListFormat : public JSObject {
+ public:
+ // Initializes relative time format object with properties derived from input
+ // locales and options.
+ static MaybeHandle<JSListFormat> InitializeListFormat(
+ Isolate* isolate, Handle<JSListFormat> list_format_holder,
+ Handle<Object> locales, Handle<Object> options);
+
+ static Handle<JSObject> ResolvedOptions(Isolate* isolate,
+ Handle<JSListFormat> format_holder);
+
+ // Unpacks formatter object from corresponding JavaScript object.
+ static icu::ListFormatter* UnpackFormatter(
+ Isolate* isolate, Handle<JSListFormat> list_format_holder);
+
+ // ecma402 #sec-formatlist
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> FormatList(
+ Isolate* isolate, Handle<JSListFormat> format_holder,
+ Handle<JSArray> list);
+
+ // ecma42 #sec-formatlisttoparts
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> FormatListToParts(
+ Isolate* isolate, Handle<JSListFormat> format_holder,
+ Handle<JSArray> list);
+
+ Handle<String> StyleAsString() const;
+ Handle<String> TypeAsString() const;
+
+ DECL_CAST(JSListFormat)
+
+ // ListFormat accessors.
+ DECL_ACCESSORS(locale, String)
+ DECL_ACCESSORS(formatter, Foreign)
+
+ // Style: identifying the relative time format style used.
+ //
+ // ecma402/#sec-properties-of-intl-listformat-instances
+ enum class Style {
+ LONG, // Everything spelled out.
+ SHORT, // Abbreviations used when possible.
+ NARROW, // Use the shortest possible form.
+ COUNT
+ };
+ inline void set_style(Style style);
+ inline Style style() const;
+
+ // Type: identifying the list of types used.
+ //
+ // ecma402/#sec-properties-of-intl-listformat-instances
+ enum class Type {
+ CONJUNCTION, // for "and"-based lists (e.g., "A, B and C")
+ DISJUNCTION, // for "or"-based lists (e.g., "A, B or C"),
+ UNIT, // for lists of values with units (e.g., "5 pounds, 12 ounces").
+ COUNT
+ };
+ inline void set_type(Type type);
+ inline Type type() const;
+
+// Bit positions in |flags|.
+#define FLAGS_BIT_FIELDS(V, _) \
+ V(StyleBits, Style, 2, _) \
+ V(TypeBits, Type, 2, _)
+ DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
+#undef FLAGS_BIT_FIELDS
+
+ STATIC_ASSERT(Style::LONG <= StyleBits::kMax);
+ STATIC_ASSERT(Style::SHORT <= StyleBits::kMax);
+ STATIC_ASSERT(Style::NARROW <= StyleBits::kMax);
+ STATIC_ASSERT(Type::CONJUNCTION <= TypeBits::kMax);
+ STATIC_ASSERT(Type::DISJUNCTION <= TypeBits::kMax);
+ STATIC_ASSERT(Type::UNIT <= TypeBits::kMax);
+
+ // [flags] Bit field containing various flags about the function.
+ DECL_INT_ACCESSORS(flags)
+
+ DECL_PRINTER(JSListFormat)
+ DECL_VERIFIER(JSListFormat)
+
+ // Layout description.
+ static const int kJSListFormatOffset = JSObject::kHeaderSize;
+ static const int kLocaleOffset = kJSListFormatOffset + kPointerSize;
+ static const int kFormatterOffset = kLocaleOffset + kPointerSize;
+ static const int kFlagsOffset = kFormatterOffset + kPointerSize;
+ static const int kSize = kFlagsOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSListFormat);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_LIST_FORMAT_H_
diff --git a/deps/v8/src/objects/js-locale-inl.h b/deps/v8/src/objects/js-locale-inl.h
index 65f537ea52..a70bef998e 100644
--- a/deps/v8/src/objects/js-locale-inl.h
+++ b/deps/v8/src/objects/js-locale-inl.h
@@ -9,6 +9,7 @@
#ifndef V8_OBJECTS_JS_LOCALE_INL_H_
#define V8_OBJECTS_JS_LOCALE_INL_H_
+#include "src/api-inl.h"
#include "src/objects-inl.h"
#include "src/objects/js-locale.h"
@@ -38,4 +39,6 @@ CAST_ACCESSOR(JSLocale);
} // namespace internal
} // namespace v8
+#include "src/objects/object-macros-undef.h"
+
#endif // V8_OBJECTS_JS_LOCALE_INL_H_
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index 5e78c0bc3f..8968aa58c9 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -21,6 +21,7 @@
#include "src/objects/intl-objects.h"
#include "src/objects/js-locale-inl.h"
#include "unicode/locid.h"
+#include "unicode/uloc.h"
#include "unicode/unistr.h"
#include "unicode/uvernum.h"
#include "unicode/uversion.h"
@@ -139,17 +140,17 @@ bool PopulateLocaleWithUnicodeTags(Isolate* isolate, const char* icu_locale,
if (bcp47_value) {
Handle<String> bcp47_handle =
factory->NewStringFromAsciiChecked(bcp47_value);
- if (strncmp(bcp47_key, "kn", 2) == 0) {
+ if (strcmp(bcp47_key, "kn") == 0) {
locale_holder->set_numeric(*bcp47_handle);
- } else if (strncmp(bcp47_key, "ca", 2) == 0) {
+ } else if (strcmp(bcp47_key, "ca") == 0) {
locale_holder->set_calendar(*bcp47_handle);
- } else if (strncmp(bcp47_key, "kf", 2) == 0) {
+ } else if (strcmp(bcp47_key, "kf") == 0) {
locale_holder->set_case_first(*bcp47_handle);
- } else if (strncmp(bcp47_key, "co", 2) == 0) {
+ } else if (strcmp(bcp47_key, "co") == 0) {
locale_holder->set_collation(*bcp47_handle);
- } else if (strncmp(bcp47_key, "hc", 2) == 0) {
+ } else if (strcmp(bcp47_key, "hc") == 0) {
locale_holder->set_hour_cycle(*bcp47_handle);
- } else if (strncmp(bcp47_key, "nu", 2) == 0) {
+ } else if (strcmp(bcp47_key, "nu") == 0) {
locale_holder->set_numbering_system(*bcp47_handle);
}
}
@@ -174,8 +175,14 @@ MaybeHandle<JSLocale> JSLocale::InitializeLocale(Isolate* isolate,
char icu_result[ULOC_FULLNAME_CAPACITY];
char icu_canonical[ULOC_FULLNAME_CAPACITY];
+ if (locale->length() == 0) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kLocaleNotEmpty),
+ JSLocale);
+ }
+
v8::String::Utf8Value bcp47_locale(v8_isolate, v8::Utils::ToLocal(locale));
- if (bcp47_locale.length() == 0) return MaybeHandle<JSLocale>();
+ CHECK_LT(0, bcp47_locale.length());
+ CHECK_NOT_NULL(*bcp47_locale);
int icu_length = uloc_forLanguageTag(
*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY, nullptr, &status);
@@ -301,5 +308,33 @@ MaybeHandle<JSLocale> JSLocale::InitializeLocale(Isolate* isolate,
return locale_holder;
}
+namespace {
+
+Handle<String> MorphLocale(Isolate* isolate, String* input,
+ int32_t (*morph_func)(const char*, char*, int32_t,
+ UErrorCode*)) {
+ Factory* factory = isolate->factory();
+ char localeBuffer[ULOC_FULLNAME_CAPACITY];
+ UErrorCode status = U_ZERO_ERROR;
+ DCHECK_NOT_NULL(morph_func);
+ int32_t length = (*morph_func)(input->ToCString().get(), localeBuffer,
+ ULOC_FULLNAME_CAPACITY, &status);
+ DCHECK(U_SUCCESS(status));
+ DCHECK_GT(length, 0);
+ std::string locale(localeBuffer, length);
+ std::replace(locale.begin(), locale.end(), '_', '-');
+ return factory->NewStringFromAsciiChecked(locale.c_str());
+}
+
+} // namespace
+
+Handle<String> JSLocale::Maximize(Isolate* isolate, String* locale) {
+ return MorphLocale(isolate, locale, uloc_addLikelySubtags);
+}
+
+Handle<String> JSLocale::Minimize(Isolate* isolate, String* locale) {
+ return MorphLocale(isolate, locale, uloc_minimizeSubtags);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-locale.h b/deps/v8/src/objects/js-locale.h
index 74d64bf486..d111885d52 100644
--- a/deps/v8/src/objects/js-locale.h
+++ b/deps/v8/src/objects/js-locale.h
@@ -9,7 +9,6 @@
#ifndef V8_OBJECTS_JS_LOCALE_H_
#define V8_OBJECTS_JS_LOCALE_H_
-#include "src/api.h"
#include "src/global-handles.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
@@ -30,6 +29,8 @@ class JSLocale : public JSObject {
Handle<JSLocale> locale_holder,
Handle<String> locale,
Handle<JSReceiver> options);
+ static Handle<String> Maximize(Isolate* isolate, String* locale);
+ static Handle<String> Minimize(Isolate* isolate, String* locale);
DECL_CAST(JSLocale)
@@ -76,4 +77,6 @@ class JSLocale : public JSObject {
} // namespace internal
} // namespace v8
+#include "src/objects/object-macros-undef.h"
+
#endif // V8_OBJECTS_JS_LOCALE_H_
diff --git a/deps/v8/src/objects/js-plural-rules-inl.h b/deps/v8/src/objects/js-plural-rules-inl.h
new file mode 100644
index 0000000000..6cfeb827ca
--- /dev/null
+++ b/deps/v8/src/objects/js-plural-rules-inl.h
@@ -0,0 +1,36 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_PLURAL_RULES_INL_H_
+#define V8_OBJECTS_JS_PLURAL_RULES_INL_H_
+
+#include "src/api-inl.h"
+#include "src/objects-inl.h"
+#include "src/objects/js-plural-rules.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+ACCESSORS(JSPluralRules, locale, String, kLocaleOffset)
+ACCESSORS(JSPluralRules, type, String, kTypeOffset)
+ACCESSORS(JSPluralRules, icu_plural_rules, Managed<icu::PluralRules>,
+ kICUPluralRulesOffset)
+ACCESSORS(JSPluralRules, icu_decimal_format, Managed<icu::DecimalFormat>,
+ kICUDecimalFormatOffset)
+
+CAST_ACCESSOR(JSPluralRules);
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_PLURAL_RULES_INL_H_
diff --git a/deps/v8/src/objects/js-plural-rules.cc b/deps/v8/src/objects/js-plural-rules.cc
new file mode 100644
index 0000000000..07cc62a41e
--- /dev/null
+++ b/deps/v8/src/objects/js-plural-rules.cc
@@ -0,0 +1,326 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#include "src/objects/js-plural-rules.h"
+
+#include "src/isolate-inl.h"
+#include "src/objects/intl-objects.h"
+#include "src/objects/js-plural-rules-inl.h"
+#include "unicode/decimfmt.h"
+#include "unicode/locid.h"
+#include "unicode/numfmt.h"
+#include "unicode/plurrule.h"
+#include "unicode/strenum.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+bool CreateICUPluralRules(Isolate* isolate, const icu::Locale& icu_locale,
+ const char* type_string,
+ std::unique_ptr<icu::PluralRules>* pl,
+ std::unique_ptr<icu::DecimalFormat>* nf) {
+ // Make formatter from options. Numbering system is added
+ // to the locale as Unicode extension (if it was specified at all).
+ UErrorCode status = U_ZERO_ERROR;
+
+ UPluralType type = UPLURAL_TYPE_CARDINAL;
+ if (strcmp(type_string, "ordinal") == 0) {
+ type = UPLURAL_TYPE_ORDINAL;
+ } else {
+ CHECK_EQ(0, strcmp(type_string, "cardinal"));
+ }
+
+ std::unique_ptr<icu::PluralRules> plural_rules(
+ icu::PluralRules::forLocale(icu_locale, type, status));
+ if (U_FAILURE(status)) {
+ return false;
+ }
+ CHECK_NOT_NULL(plural_rules.get());
+
+ std::unique_ptr<icu::DecimalFormat> number_format(
+ static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createInstance(icu_locale, UNUM_DECIMAL, status)));
+ if (U_FAILURE(status)) {
+ return false;
+ }
+ CHECK_NOT_NULL(number_format.get());
+
+ *pl = std::move(plural_rules);
+ *nf = std::move(number_format);
+
+ return true;
+}
+
+void InitializeICUPluralRules(
+ Isolate* isolate, Handle<String> locale, const char* type,
+ std::unique_ptr<icu::PluralRules>* plural_rules,
+ std::unique_ptr<icu::DecimalFormat>* number_format) {
+ icu::Locale icu_locale = Intl::CreateICULocale(isolate, locale);
+ DCHECK(!icu_locale.isBogus());
+
+ bool success = CreateICUPluralRules(isolate, icu_locale, type, plural_rules,
+ number_format);
+ if (!success) {
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
+ success = CreateICUPluralRules(isolate, no_extension_locale, type,
+ plural_rules, number_format);
+
+ if (!success) {
+ FATAL("Failed to create ICU PluralRules, are ICU data files missing?");
+ }
+ }
+
+ CHECK_NOT_NULL((*plural_rules).get());
+ CHECK_NOT_NULL((*number_format).get());
+}
+
+} // namespace
+
+// static
+MaybeHandle<JSPluralRules> JSPluralRules::InitializePluralRules(
+ Isolate* isolate, Handle<JSPluralRules> plural_rules,
+ Handle<Object> locales, Handle<Object> options_obj) {
+ // 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
+ // TODO(jkummerow): Port ResolveLocale, then use the C++ version of
+ // CanonicalizeLocaleList here.
+ Handle<JSObject> requested_locales;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, requested_locales,
+ Intl::CanonicalizeLocaleListJS(isolate, locales),
+ JSPluralRules);
+
+ // 2. If options is undefined, then
+ if (options_obj->IsUndefined(isolate)) {
+ // 2. a. Let options be ObjectCreate(null).
+ options_obj = isolate->factory()->NewJSObjectWithNullProto();
+ } else {
+ // 3. Else
+ // 3. a. Let options be ? ToObject(options).
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, options_obj,
+ Object::ToObject(isolate, options_obj, "Intl.PluralRules"),
+ JSPluralRules);
+ }
+
+ // At this point, options_obj can either be a JSObject or a JSProxy only.
+ Handle<JSReceiver> options = Handle<JSReceiver>::cast(options_obj);
+
+ // TODO(gsathya): This is currently done as part of the
+ // Intl::ResolveLocale call below. Fix this once resolveLocale is
+ // changed to not do the lookup.
+ //
+ // 5. Let matcher be ? GetOption(options, "localeMatcher", "string",
+ // « "lookup", "best fit" », "best fit").
+ // 6. Set opt.[[localeMatcher]] to matcher.
+
+ // 7. Let t be ? GetOption(options, "type", "string", « "cardinal",
+ // "ordinal" », "cardinal").
+ std::vector<const char*> values = {"cardinal", "ordinal"};
+ std::unique_ptr<char[]> type_str = nullptr;
+ const char* type_cstr = "cardinal";
+ Maybe<bool> found = Intl::GetStringOption(isolate, options, "type", values,
+ "Intl.PluralRules", &type_str);
+ MAYBE_RETURN(found, MaybeHandle<JSPluralRules>());
+ if (found.FromJust()) {
+ type_cstr = type_str.get();
+ }
+
+ // 8. Set pluralRules.[[Type]] to t.
+ Handle<String> type =
+ isolate->factory()->NewStringFromAsciiChecked(type_cstr);
+ plural_rules->set_type(*type);
+
+ // Note: The spec says we should do ResolveLocale after performing
+ // SetNumberFormatDigitOptions but we need the locale to create all
+ // the ICU data structures.
+ //
+ // This isn't observable so we aren't violating the spec.
+
+ // 11. Let r be ResolveLocale(%PluralRules%.[[AvailableLocales]],
+ // requestedLocales, opt, %PluralRules%.[[RelevantExtensionKeys]],
+ // localeData).
+ Handle<JSObject> r;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, r,
+ Intl::ResolveLocale(isolate, "pluralrules", requested_locales, options),
+ JSPluralRules);
+
+ Handle<String> locale_str = isolate->factory()->locale_string();
+ Handle<Object> locale_obj = JSObject::GetDataProperty(r, locale_str);
+
+ // The locale has to be a string. Either a user provided
+ // canonicalized string or the default locale.
+ CHECK(locale_obj->IsString());
+ Handle<String> locale = Handle<String>::cast(locale_obj);
+
+ // 12. Set pluralRules.[[Locale]] to the value of r.[[locale]].
+ plural_rules->set_locale(*locale);
+
+ std::unique_ptr<icu::PluralRules> icu_plural_rules;
+ std::unique_ptr<icu::DecimalFormat> icu_decimal_format;
+ InitializeICUPluralRules(isolate, locale, type_cstr, &icu_plural_rules,
+ &icu_decimal_format);
+ CHECK_NOT_NULL(icu_plural_rules.get());
+ CHECK_NOT_NULL(icu_decimal_format.get());
+
+ // 9. Perform ? SetNumberFormatDigitOptions(pluralRules, options, 0, 3).
+ Maybe<bool> done = Intl::SetNumberFormatDigitOptions(
+ isolate, icu_decimal_format.get(), options, 0, 3);
+ MAYBE_RETURN(done, MaybeHandle<JSPluralRules>());
+
+ Handle<Managed<icu::PluralRules>> managed_plural_rules =
+ Managed<icu::PluralRules>::FromUniquePtr(isolate, 0,
+ std::move(icu_plural_rules));
+ plural_rules->set_icu_plural_rules(*managed_plural_rules);
+
+ Handle<Managed<icu::DecimalFormat>> managed_decimal_format =
+ Managed<icu::DecimalFormat>::FromUniquePtr(isolate, 0,
+ std::move(icu_decimal_format));
+ plural_rules->set_icu_decimal_format(*managed_decimal_format);
+
+ // 13. Return pluralRules.
+ return plural_rules;
+}
+
+MaybeHandle<String> JSPluralRules::ResolvePlural(
+ Isolate* isolate, Handle<JSPluralRules> plural_rules,
+ Handle<Object> number) {
+ icu::PluralRules* icu_plural_rules = plural_rules->icu_plural_rules()->raw();
+ CHECK_NOT_NULL(icu_plural_rules);
+
+ icu::DecimalFormat* icu_decimal_format =
+ plural_rules->icu_decimal_format()->raw();
+ CHECK_NOT_NULL(icu_decimal_format);
+
+ // Currently, PluralRules doesn't implement all the options for rounding that
+ // the Intl spec provides; format and parse the number to round to the
+ // appropriate amount, then apply PluralRules.
+ //
+ // TODO(littledan): If a future ICU version supports an extended API to avoid
+ // this step, then switch to that API. Bug thread:
+ // http://bugs.icu-project.org/trac/ticket/12763
+ icu::UnicodeString rounded_string;
+ icu_decimal_format->format(number->Number(), rounded_string);
+
+ icu::Formattable formattable;
+ UErrorCode status = U_ZERO_ERROR;
+ icu_decimal_format->parse(rounded_string, formattable, status);
+ CHECK(U_SUCCESS(status));
+
+ double rounded = formattable.getDouble(status);
+ CHECK(U_SUCCESS(status));
+
+ icu::UnicodeString result = icu_plural_rules->select(rounded);
+ return isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
+}
+
+namespace {
+
+void CreateDataPropertyForOptions(Isolate* isolate, Handle<JSObject> options,
+ Handle<Object> value, const char* key) {
+ Handle<String> key_str = isolate->factory()->NewStringFromAsciiChecked(key);
+
+ // This is a brand new JSObject that shouldn't already have the same
+ // key so this shouldn't fail.
+ CHECK(JSReceiver::CreateDataProperty(isolate, options, key_str, value,
+ kDontThrow)
+ .FromJust());
+}
+
+void CreateDataPropertyForOptions(Isolate* isolate, Handle<JSObject> options,
+ int value, const char* key) {
+ Handle<Smi> value_smi(Smi::FromInt(value), isolate);
+ CreateDataPropertyForOptions(isolate, options, value_smi, key);
+}
+
+} // namespace
+
+Handle<JSObject> JSPluralRules::ResolvedOptions(
+ Isolate* isolate, Handle<JSPluralRules> plural_rules) {
+ Handle<JSObject> options =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ Handle<String> locale_value(plural_rules->locale(), isolate);
+ CreateDataPropertyForOptions(isolate, options, locale_value, "locale");
+
+ Handle<String> type_value(plural_rules->type(), isolate);
+ CreateDataPropertyForOptions(isolate, options, type_value, "type");
+
+ icu::DecimalFormat* icu_decimal_format =
+ plural_rules->icu_decimal_format()->raw();
+ CHECK_NOT_NULL(icu_decimal_format);
+
+ // This is a safe upcast as icu::DecimalFormat inherits from
+ // icu::NumberFormat.
+ icu::NumberFormat* icu_number_format =
+ static_cast<icu::NumberFormat*>(icu_decimal_format);
+
+ int min_int_digits = icu_number_format->getMinimumIntegerDigits();
+ CreateDataPropertyForOptions(isolate, options, min_int_digits,
+ "minimumIntegerDigits");
+
+ int min_fraction_digits = icu_number_format->getMinimumFractionDigits();
+ CreateDataPropertyForOptions(isolate, options, min_fraction_digits,
+ "minimumFractionDigits");
+
+ int max_fraction_digits = icu_number_format->getMaximumFractionDigits();
+ CreateDataPropertyForOptions(isolate, options, max_fraction_digits,
+ "maximumFractionDigits");
+
+ if (icu_decimal_format->areSignificantDigitsUsed()) {
+ int min_significant_digits =
+ icu_decimal_format->getMinimumSignificantDigits();
+ CreateDataPropertyForOptions(isolate, options, min_significant_digits,
+ "minimumSignificantDigits");
+
+ int max_significant_digits =
+ icu_decimal_format->getMaximumSignificantDigits();
+ CreateDataPropertyForOptions(isolate, options, max_significant_digits,
+ "maximumSignificantDigits");
+ }
+
+ // 6. Let pluralCategories be a List of Strings representing the
+ // possible results of PluralRuleSelect for the selected locale pr.
+ icu::PluralRules* icu_plural_rules = plural_rules->icu_plural_rules()->raw();
+ CHECK_NOT_NULL(icu_plural_rules);
+
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> categories(
+ icu_plural_rules->getKeywords(status));
+ CHECK(U_SUCCESS(status));
+ int32_t count = categories->count(status);
+ CHECK(U_SUCCESS(status));
+
+ Handle<FixedArray> plural_categories =
+ isolate->factory()->NewFixedArray(count);
+ for (int32_t i = 0; i < count; i++) {
+ const icu::UnicodeString* category = categories->snext(status);
+ CHECK(U_SUCCESS(status));
+ if (category == nullptr) break;
+
+ std::string keyword;
+ Handle<String> value = isolate->factory()->NewStringFromAsciiChecked(
+ category->toUTF8String(keyword).data());
+ plural_categories->set(i, *value);
+ }
+
+ // 7. Perform ! CreateDataProperty(options, "pluralCategories",
+ // CreateArrayFromList(pluralCategories)).
+ Handle<JSArray> plural_categories_value =
+ isolate->factory()->NewJSArrayWithElements(plural_categories);
+ CreateDataPropertyForOptions(isolate, options, plural_categories_value,
+ "pluralCategories");
+
+ return options;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/js-plural-rules.h b/deps/v8/src/objects/js-plural-rules.h
new file mode 100644
index 0000000000..9d5da795ab
--- /dev/null
+++ b/deps/v8/src/objects/js-plural-rules.h
@@ -0,0 +1,69 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_JS_PLURAL_RULES_H_
+#define V8_OBJECTS_JS_PLURAL_RULES_H_
+
+#include "src/heap/factory.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/objects/intl-objects.h"
+#include "src/objects/managed.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class JSPluralRules : public JSObject {
+ public:
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSPluralRules> InitializePluralRules(
+ Isolate* isolate, Handle<JSPluralRules> plural_rules,
+ Handle<Object> locales, Handle<Object> options);
+
+ static Handle<JSObject> ResolvedOptions(Isolate* isolate,
+ Handle<JSPluralRules> plural_rules);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> ResolvePlural(
+ Isolate* isolate, Handle<JSPluralRules> plural_rules,
+ Handle<Object> number);
+
+ DECL_CAST(JSPluralRules)
+ DECL_PRINTER(JSPluralRules)
+ DECL_VERIFIER(JSPluralRules)
+
+// Layout description.
+#define JS_PLURAL_RULES_FIELDS(V) \
+ V(kLocaleOffset, kPointerSize) \
+ /* In the future, this can be an enum, \
+ and not a string. */ \
+ V(kTypeOffset, kPointerSize) \
+ V(kICUPluralRulesOffset, kPointerSize) \
+ V(kICUDecimalFormatOffset, kPointerSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_PLURAL_RULES_FIELDS)
+#undef JS_PLURAL_RULES_FIELDS
+
+ DECL_ACCESSORS(locale, String)
+ DECL_ACCESSORS(type, String)
+ DECL_ACCESSORS(icu_plural_rules, Managed<icu::PluralRules>)
+ DECL_ACCESSORS(icu_decimal_format, Managed<icu::DecimalFormat>)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSPluralRules);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_PLURAL_RULES_H_
diff --git a/deps/v8/src/objects/js-promise-inl.h b/deps/v8/src/objects/js-promise-inl.h
index 3eb1a4fff1..326ba82473 100644
--- a/deps/v8/src/objects/js-promise-inl.h
+++ b/deps/v8/src/objects/js-promise-inl.h
@@ -5,9 +5,11 @@
#ifndef V8_OBJECTS_JS_PROMISE_INL_H_
#define V8_OBJECTS_JS_PROMISE_INL_H_
-#include "src/objects.h"
#include "src/objects/js-promise.h"
+#include "src/objects-inl.h" // Needed for write barriers
+#include "src/objects.h"
+
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-proxy-inl.h b/deps/v8/src/objects/js-proxy-inl.h
new file mode 100644
index 0000000000..989d286b40
--- /dev/null
+++ b/deps/v8/src/objects/js-proxy-inl.h
@@ -0,0 +1,30 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_PROXY_INL_H_
+#define V8_OBJECTS_JS_PROXY_INL_H_
+
+#include "src/objects/js-proxy.h"
+
+#include "src/objects-inl.h" // Needed for write barriers
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(JSProxy)
+
+ACCESSORS(JSProxy, target, Object, kTargetOffset)
+ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
+
+bool JSProxy::IsRevoked() const { return !handler()->IsJSReceiver(); }
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_PROXY_INL_H_
diff --git a/deps/v8/src/objects/js-proxy.h b/deps/v8/src/objects/js-proxy.h
new file mode 100644
index 0000000000..45e27473fe
--- /dev/null
+++ b/deps/v8/src/objects/js-proxy.h
@@ -0,0 +1,155 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_PROXY_H_
+#define V8_OBJECTS_JS_PROXY_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// The JSProxy describes EcmaScript Harmony proxies
+class JSProxy : public JSReceiver {
+ public:
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSProxy> New(Isolate* isolate,
+ Handle<Object>,
+ Handle<Object>);
+
+ // [handler]: The handler property.
+ DECL_ACCESSORS(handler, Object)
+ // [target]: The target property.
+ DECL_ACCESSORS(target, Object)
+
+ static MaybeHandle<Context> GetFunctionRealm(Handle<JSProxy> proxy);
+
+ DECL_CAST(JSProxy)
+
+ V8_INLINE bool IsRevoked() const;
+ static void Revoke(Handle<JSProxy> proxy);
+
+ // ES6 9.5.1
+ static MaybeHandle<Object> GetPrototype(Handle<JSProxy> receiver);
+
+ // ES6 9.5.2
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetPrototype(
+ Handle<JSProxy> proxy, Handle<Object> value, bool from_javascript,
+ ShouldThrow should_throw);
+ // ES6 9.5.3
+ V8_WARN_UNUSED_RESULT static Maybe<bool> IsExtensible(Handle<JSProxy> proxy);
+
+ // ES6, #sec-isarray. NOT to be confused with %_IsArray.
+ V8_WARN_UNUSED_RESULT static Maybe<bool> IsArray(Handle<JSProxy> proxy);
+
+ // ES6 9.5.4 (when passed kDontThrow)
+ V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensions(
+ Handle<JSProxy> proxy, ShouldThrow should_throw);
+
+ // ES6 9.5.5
+ V8_WARN_UNUSED_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
+ Isolate* isolate, Handle<JSProxy> proxy, Handle<Name> name,
+ PropertyDescriptor* desc);
+
+ // ES6 9.5.6
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
+ Isolate* isolate, Handle<JSProxy> object, Handle<Object> key,
+ PropertyDescriptor* desc, ShouldThrow should_throw);
+
+ // ES6 9.5.7
+ V8_WARN_UNUSED_RESULT static Maybe<bool> HasProperty(Isolate* isolate,
+ Handle<JSProxy> proxy,
+ Handle<Name> name);
+
+ // This function never returns false.
+ // It returns either true or throws.
+ V8_WARN_UNUSED_RESULT static Maybe<bool> CheckHasTrap(
+ Isolate* isolate, Handle<Name> name, Handle<JSReceiver> target);
+
+ // ES6 9.5.8
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetProperty(
+ Isolate* isolate, Handle<JSProxy> proxy, Handle<Name> name,
+ Handle<Object> receiver, bool* was_found);
+
+ enum AccessKind { kGet, kSet };
+
+ static MaybeHandle<Object> CheckGetSetTrapResult(Isolate* isolate,
+ Handle<Name> name,
+ Handle<JSReceiver> target,
+ Handle<Object> trap_result,
+ AccessKind access_kind);
+
+ // ES6 9.5.9
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetProperty(
+ Handle<JSProxy> proxy, Handle<Name> name, Handle<Object> value,
+ Handle<Object> receiver, LanguageMode language_mode);
+
+ // ES6 9.5.10 (when passed LanguageMode::kSloppy)
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DeletePropertyOrElement(
+ Handle<JSProxy> proxy, Handle<Name> name, LanguageMode language_mode);
+
+ // ES6 9.5.12
+ V8_WARN_UNUSED_RESULT static Maybe<bool> OwnPropertyKeys(
+ Isolate* isolate, Handle<JSReceiver> receiver, Handle<JSProxy> proxy,
+ PropertyFilter filter, KeyAccumulator* accumulator);
+
+ V8_WARN_UNUSED_RESULT static Maybe<PropertyAttributes> GetPropertyAttributes(
+ LookupIterator* it);
+
+ // Dispatched behavior.
+ DECL_PRINTER(JSProxy)
+ DECL_VERIFIER(JSProxy)
+
+ static const int kMaxIterationLimit = 100 * 1024;
+
+ // Layout description.
+ static const int kTargetOffset = JSReceiver::kHeaderSize;
+ static const int kHandlerOffset = kTargetOffset + kPointerSize;
+ static const int kSize = kHandlerOffset + kPointerSize;
+
+ // kTargetOffset aliases with the elements of JSObject. The fact that
+ // JSProxy::target is a Javascript value which cannot be confused with an
+ // elements backing store is exploited by loading from this offset from an
+ // unknown JSReceiver.
+ STATIC_ASSERT(JSObject::kElementsOffset == JSProxy::kTargetOffset);
+
+ typedef FixedBodyDescriptor<JSReceiver::kPropertiesOrHashOffset, kSize, kSize>
+ BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ static Maybe<bool> SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
+ Handle<Symbol> private_name,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
+};
+
+// JSProxyRevocableResult is just a JSObject with a specific initial map.
+// This initial map adds in-object properties for "proxy" and "revoke".
+// See https://tc39.github.io/ecma262/#sec-proxy.revocable
+class JSProxyRevocableResult : public JSObject {
+ public:
+ // Offsets of object fields.
+ static const int kProxyOffset = JSObject::kHeaderSize;
+ static const int kRevokeOffset = kProxyOffset + kPointerSize;
+ static const int kSize = kRevokeOffset + kPointerSize;
+ // Indices of in-object properties.
+ static const int kProxyIndex = 0;
+ static const int kRevokeIndex = 1;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxyRevocableResult);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_PROXY_H_
diff --git a/deps/v8/src/objects/js-regexp-inl.h b/deps/v8/src/objects/js-regexp-inl.h
index c6f31e81af..eeae4fb7ad 100644
--- a/deps/v8/src/objects/js-regexp-inl.h
+++ b/deps/v8/src/objects/js-regexp-inl.h
@@ -7,6 +7,7 @@
#include "src/objects/js-regexp.h"
+#include "src/objects-inl.h" // Needed for write barriers
#include "src/objects/string.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/js-regexp-string-iterator-inl.h b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
index ca099d48e5..4fc8f88841 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator-inl.h
+++ b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
@@ -7,6 +7,8 @@
#include "src/objects/js-regexp-string-iterator.h"
+#include "src/objects-inl.h" // Needed for write barriers
+
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-relative-time-format-inl.h b/deps/v8/src/objects/js-relative-time-format-inl.h
index 3d2a5c8e09..6dc984e252 100644
--- a/deps/v8/src/objects/js-relative-time-format-inl.h
+++ b/deps/v8/src/objects/js-relative-time-format-inl.h
@@ -19,37 +19,32 @@ namespace v8 {
namespace internal {
// Base relative time format accessors.
-ACCESSORS(JSRelativeTimeFormat, locale, String, kLocaleOffset);
-ACCESSORS(JSRelativeTimeFormat, formatter, Foreign, kFormatterOffset);
+ACCESSORS(JSRelativeTimeFormat, locale, String, kLocaleOffset)
+ACCESSORS(JSRelativeTimeFormat, formatter, Foreign, kFormatterOffset)
+SMI_ACCESSORS(JSRelativeTimeFormat, flags, kFlagsOffset)
// TODO(ftang): Use bit field accessor for style and numeric later.
inline void JSRelativeTimeFormat::set_style(Style style) {
DCHECK_GT(Style::COUNT, style);
- int value = static_cast<int>(style);
- WRITE_FIELD(this, kStyleOffset, Smi::FromInt(value));
+ int hints = flags();
+ hints = StyleBits::update(hints, style);
+ set_flags(hints);
}
inline JSRelativeTimeFormat::Style JSRelativeTimeFormat::style() const {
- Object* value = READ_FIELD(this, kStyleOffset);
- int style = Smi::ToInt(value);
- DCHECK_LE(0, style);
- DCHECK_GT(static_cast<int>(Style::COUNT), style);
- return static_cast<Style>(style);
+ return StyleBits::decode(flags());
}
inline void JSRelativeTimeFormat::set_numeric(Numeric numeric) {
DCHECK_GT(Numeric::COUNT, numeric);
- int value = static_cast<int>(numeric);
- WRITE_FIELD(this, kNumericOffset, Smi::FromInt(value));
+ int hints = flags();
+ hints = NumericBits::update(hints, numeric);
+ set_flags(hints);
}
inline JSRelativeTimeFormat::Numeric JSRelativeTimeFormat::numeric() const {
- Object* value = READ_FIELD(this, kNumericOffset);
- int numeric = Smi::ToInt(value);
- DCHECK_LE(0, numeric);
- DCHECK_GT(static_cast<int>(Numeric::COUNT), numeric);
- return static_cast<Numeric>(numeric);
+ return NumericBits::decode(flags());
}
CAST_ACCESSOR(JSRelativeTimeFormat);
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
index c70db1b339..56130f7311 100644
--- a/deps/v8/src/objects/js-relative-time-format.cc
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -17,12 +17,10 @@
#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-relative-time-format-inl.h"
+#include "src/objects/managed.h"
#include "unicode/numfmt.h"
#include "unicode/reldatefmt.h"
-// Has to be the last include (doesn't have include guards):
-#include "src/objects/object-macros.h"
-
namespace v8 {
namespace internal {
@@ -40,28 +38,28 @@ UDateRelativeDateTimeFormatterStyle getIcuStyle(
UNREACHABLE();
}
}
+} // namespace
-JSRelativeTimeFormat::Style getStyle(const char* str) {
+JSRelativeTimeFormat::Style JSRelativeTimeFormat::getStyle(const char* str) {
if (strcmp(str, "long") == 0) return JSRelativeTimeFormat::Style::LONG;
if (strcmp(str, "short") == 0) return JSRelativeTimeFormat::Style::SHORT;
if (strcmp(str, "narrow") == 0) return JSRelativeTimeFormat::Style::NARROW;
UNREACHABLE();
}
-JSRelativeTimeFormat::Numeric getNumeric(const char* str) {
+JSRelativeTimeFormat::Numeric JSRelativeTimeFormat::getNumeric(
+ const char* str) {
if (strcmp(str, "auto") == 0) return JSRelativeTimeFormat::Numeric::AUTO;
if (strcmp(str, "always") == 0) return JSRelativeTimeFormat::Numeric::ALWAYS;
UNREACHABLE();
}
-} // namespace
-
MaybeHandle<JSRelativeTimeFormat>
JSRelativeTimeFormat::InitializeRelativeTimeFormat(
Isolate* isolate, Handle<JSRelativeTimeFormat> relative_time_format_holder,
Handle<Object> input_locales, Handle<Object> input_options) {
Factory* factory = isolate->factory();
-
+ relative_time_format_holder->set_flags(0);
// 4. If options is undefined, then
Handle<JSReceiver> options;
if (input_options->IsUndefined(isolate)) {
@@ -137,19 +135,11 @@ JSRelativeTimeFormat::InitializeRelativeTimeFormat(
// ? Construct(%NumberFormat%, « nfLocale, nfOptions »).
icu::NumberFormat* number_format =
icu::NumberFormat::createInstance(icu_locale, UNUM_DECIMAL, status);
- if (U_FAILURE(status) || number_format == nullptr) {
- THROW_NEW_ERROR(
- isolate,
- NewRangeError(MessageTemplate::kRelativeDateTimeFormatterBadParameters),
- JSRelativeTimeFormat);
+ if (U_FAILURE(status)) {
+ delete number_format;
+ FATAL("Failed to create ICU number format, are ICU data files missing?");
}
- // 23. Perform ! CreateDataPropertyOrThrow(nfOptions, "useGrouping", false).
- number_format->setGroupingUsed(false);
-
- // 24. Perform ! CreateDataPropertyOrThrow(nfOptions,
- // "minimumIntegerDigits", 2).
- // Ref: https://github.com/tc39/proposal-intl-relative-time/issues/80
- number_format->setMinimumIntegerDigits(2);
+ CHECK_NOT_NULL(number_format);
// Change UDISPCTX_CAPITALIZATION_NONE to other values if
// ECMA402 later include option to change capitalization.
@@ -158,13 +148,14 @@ JSRelativeTimeFormat::InitializeRelativeTimeFormat(
new icu::RelativeDateTimeFormatter(icu_locale, number_format,
getIcuStyle(style_enum),
UDISPCTX_CAPITALIZATION_NONE, status);
-
- if (U_FAILURE(status) || (icu_formatter == nullptr)) {
- THROW_NEW_ERROR(
- isolate,
- NewRangeError(MessageTemplate::kRelativeDateTimeFormatterBadParameters),
- JSRelativeTimeFormat);
+ if (U_FAILURE(status)) {
+ delete icu_formatter;
+ FATAL(
+ "Failed to create ICU relative date time formatter, are ICU data files "
+ "missing?");
}
+ CHECK_NOT_NULL(icu_formatter);
+
Handle<Managed<icu::RelativeDateTimeFormatter>> managed_formatter =
Managed<icu::RelativeDateTimeFormatter>::FromRawPtr(isolate, 0,
icu_formatter);
@@ -190,7 +181,7 @@ Handle<JSObject> JSRelativeTimeFormat::ResolvedOptions(
}
icu::RelativeDateTimeFormatter* JSRelativeTimeFormat::UnpackFormatter(
- Isolate* isolate, Handle<JSRelativeTimeFormat> holder) {
+ Handle<JSRelativeTimeFormat> holder) {
return Managed<icu::RelativeDateTimeFormatter>::cast(holder->formatter())
->raw();
}
@@ -221,5 +212,3 @@ Handle<String> JSRelativeTimeFormat::NumericAsString() const {
} // namespace internal
} // namespace v8
-
-#include "src/objects/object-macros-undef.h"
diff --git a/deps/v8/src/objects/js-relative-time-format.h b/deps/v8/src/objects/js-relative-time-format.h
index 70d1cdacf4..397c6fe287 100644
--- a/deps/v8/src/objects/js-relative-time-format.h
+++ b/deps/v8/src/objects/js-relative-time-format.h
@@ -38,7 +38,6 @@ class JSRelativeTimeFormat : public JSObject {
// Unpacks formatter object from corresponding JavaScript object.
static icu::RelativeDateTimeFormatter* UnpackFormatter(
- Isolate* isolate,
Handle<JSRelativeTimeFormat> relative_time_format_holder);
Handle<String> StyleAsString() const;
Handle<String> NumericAsString() const;
@@ -47,10 +46,9 @@ class JSRelativeTimeFormat : public JSObject {
// RelativeTimeFormat accessors.
DECL_ACCESSORS(locale, String)
- // TODO(ftang): Style requires only 3 bits and Numeric requires only 2 bits
- // but here we're using 64 bits for each. We should fold these two fields into
- // a single Flags field and use BIT_FIELD_ACCESSORS to access it.
- //
+
+ DECL_ACCESSORS(formatter, Foreign)
+
// Style: identifying the relative time format style used.
//
// ecma402/#sec-properties-of-intl-relativetimeformat-instances
@@ -78,22 +76,36 @@ class JSRelativeTimeFormat : public JSObject {
inline void set_numeric(Numeric numeric);
inline Numeric numeric() const;
- DECL_ACCESSORS(formatter, Foreign)
+// Bit positions in |flags|.
+#define FLAGS_BIT_FIELDS(V, _) \
+ V(StyleBits, Style, 2, _) \
+ V(NumericBits, Numeric, 1, _)
+ DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
+#undef FLAGS_BIT_FIELDS
+
+ STATIC_ASSERT(Style::LONG <= StyleBits::kMax);
+ STATIC_ASSERT(Style::SHORT <= StyleBits::kMax);
+ STATIC_ASSERT(Style::NARROW <= StyleBits::kMax);
+ STATIC_ASSERT(Numeric::AUTO <= NumericBits::kMax);
+ STATIC_ASSERT(Numeric::ALWAYS <= NumericBits::kMax);
+
+ // [flags] Bit field containing various flags about the function.
+ DECL_INT_ACCESSORS(flags)
+
DECL_PRINTER(JSRelativeTimeFormat)
DECL_VERIFIER(JSRelativeTimeFormat)
// Layout description.
static const int kJSRelativeTimeFormatOffset = JSObject::kHeaderSize;
static const int kLocaleOffset = kJSRelativeTimeFormatOffset + kPointerSize;
- static const int kStyleOffset = kLocaleOffset + kPointerSize;
- static const int kNumericOffset = kStyleOffset + kPointerSize;
- static const int kFormatterOffset = kNumericOffset + kPointerSize;
- static const int kSize = kFormatterOffset + kPointerSize;
-
- // Constant to access field
- static const int kFormatterField = 3;
+ static const int kFormatterOffset = kLocaleOffset + kPointerSize;
+ static const int kFlagsOffset = kFormatterOffset + kPointerSize;
+ static const int kSize = kFlagsOffset + kPointerSize;
private:
+ static Style getStyle(const char* str);
+ static Numeric getNumeric(const char* str);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSRelativeTimeFormat);
};
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 05d2416996..59f061dc05 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_MAP_INL_H_
#include "src/objects/map.h"
-
#include "src/field-type.h"
#include "src/objects-inl.h"
#include "src/objects/api-callbacks-inl.h"
@@ -298,6 +297,17 @@ int Map::UnusedPropertyFields() const {
return unused;
}
+int Map::UnusedInObjectProperties() const {
+ // Like Map::UnusedPropertyFields(), but returns 0 for out of object
+ // properties.
+ int value = used_or_unused_instance_size_in_words();
+ DCHECK_IMPLIES(!IsJSObjectMap(), value == 0);
+ if (value >= JSObject::kFieldsAdded) {
+ return instance_size_in_words() - value;
+ }
+ return 0;
+}
+
int Map::used_or_unused_instance_size_in_words() const {
return RELAXED_READ_BYTE_FIELD(this, kUsedOrUnusedInstanceSizeInWordsOffset);
}
@@ -349,6 +359,17 @@ void Map::CopyUnusedPropertyFields(Map* map) {
DCHECK_EQ(UnusedPropertyFields(), map->UnusedPropertyFields());
}
+void Map::CopyUnusedPropertyFieldsAdjustedForInstanceSize(Map* map) {
+ int value = map->used_or_unused_instance_size_in_words();
+ if (value >= JSValue::kFieldsAdded) {
+ // Unused in-object fields. Adjust the offset from the object’s start
+ // so it matches the distance to the object’s end.
+ value += instance_size_in_words() - map->instance_size_in_words();
+ }
+ set_used_or_unused_instance_size_in_words(value);
+ DCHECK_EQ(UnusedPropertyFields(), map->UnusedPropertyFields());
+}
+
void Map::AccountAddedPropertyField() {
// Update used instance size and unused property fields number.
STATIC_ASSERT(JSObject::kFieldsAdded == JSObject::kHeaderSize / kPointerSize);
@@ -503,6 +524,17 @@ bool Map::CanTransition() const {
bool Map::IsBooleanMap() const {
return this == GetReadOnlyRoots().boolean_map();
}
+
+bool Map::IsNullMap() const { return this == GetReadOnlyRoots().null_map(); }
+
+bool Map::IsUndefinedMap() const {
+ return this == GetReadOnlyRoots().undefined_map();
+}
+
+bool Map::IsNullOrUndefinedMap() const {
+ return IsNullMap() || IsUndefinedMap();
+}
+
bool Map::IsPrimitiveMap() const {
return instance_type() <= LAST_PRIMITIVE_TYPE;
}
@@ -536,8 +568,7 @@ Object* Map::prototype() const { return READ_FIELD(this, kPrototypeOffset); }
void Map::set_prototype(Object* value, WriteBarrierMode mode) {
DCHECK(value->IsNull() || value->IsJSReceiver());
WRITE_FIELD(this, kPrototypeOffset, value);
- CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this,
- kPrototypeOffset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(this, kPrototypeOffset, value, mode);
}
LayoutDescriptor* Map::layout_descriptor_gc_safe() const {
@@ -657,8 +688,7 @@ Object* Map::prototype_info() const {
void Map::set_prototype_info(Object* value, WriteBarrierMode mode) {
CHECK(is_prototype_map());
WRITE_FIELD(this, Map::kTransitionsOrPrototypeInfoOffset, value);
- CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this,
- Map::kTransitionsOrPrototypeInfoOffset, value,
+ CONDITIONAL_WRITE_BARRIER(this, Map::kTransitionsOrPrototypeInfoOffset, value,
mode);
}
@@ -672,7 +702,6 @@ void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
}
ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
-ACCESSORS(Map, weak_cell_cache, Object, kWeakCellCacheOffset)
ACCESSORS(Map, prototype_validity_cell, Object, kPrototypeValidityCellOffset)
ACCESSORS(Map, constructor_or_backpointer, Object,
kConstructorOrBackPointerOffset)
@@ -746,8 +775,8 @@ int NormalizedMapCache::GetIndex(Handle<Map> map) {
}
bool NormalizedMapCache::IsNormalizedMapCache(const HeapObject* obj) {
- if (!obj->IsFixedArray()) return false;
- if (FixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) {
+ if (!obj->IsWeakFixedArray()) return false;
+ if (WeakFixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) {
return false;
}
#ifdef VERIFY_HEAP
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 09afb83a8f..397f874610 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -16,52 +16,52 @@
namespace v8 {
namespace internal {
-#define VISITOR_ID_LIST(V) \
- V(AllocationSite) \
- V(BigInt) \
- V(ByteArray) \
- V(BytecodeArray) \
- V(Cell) \
- V(Code) \
- V(CodeDataContainer) \
- V(ConsString) \
- V(DataHandler) \
- V(DataObject) \
- V(EphemeronHashTable) \
- V(FeedbackCell) \
- V(FeedbackVector) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- V(FixedFloat64Array) \
- V(FixedTypedArrayBase) \
- V(FreeSpace) \
- V(JSApiObject) \
- V(JSArrayBuffer) \
- V(JSFunction) \
- V(JSObject) \
- V(JSObjectFast) \
- V(JSWeakCollection) \
- V(Map) \
- V(NativeContext) \
- V(Oddball) \
- V(PreParsedScopeData) \
- V(PropertyArray) \
- V(PropertyCell) \
- V(PrototypeInfo) \
- V(SeqOneByteString) \
- V(SeqTwoByteString) \
- V(SharedFunctionInfo) \
- V(ShortcutCandidate) \
- V(SlicedString) \
- V(SmallOrderedHashMap) \
- V(SmallOrderedHashSet) \
- V(Struct) \
- V(Symbol) \
- V(ThinString) \
- V(TransitionArray) \
- V(UncompiledDataWithPreParsedScope) \
- V(WasmInstanceObject) \
- V(WeakCell) \
+#define VISITOR_ID_LIST(V) \
+ V(AllocationSite) \
+ V(BigInt) \
+ V(ByteArray) \
+ V(BytecodeArray) \
+ V(Cell) \
+ V(Code) \
+ V(CodeDataContainer) \
+ V(ConsString) \
+ V(DataHandler) \
+ V(DataObject) \
+ V(EphemeronHashTable) \
+ V(FeedbackCell) \
+ V(FeedbackVector) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ V(FixedFloat64Array) \
+ V(FixedTypedArrayBase) \
+ V(FreeSpace) \
+ V(JSApiObject) \
+ V(JSArrayBuffer) \
+ V(JSFunction) \
+ V(JSObject) \
+ V(JSObjectFast) \
+ V(JSWeakCollection) \
+ V(Map) \
+ V(NativeContext) \
+ V(Oddball) \
+ V(PreParsedScopeData) \
+ V(PropertyArray) \
+ V(PropertyCell) \
+ V(PrototypeInfo) \
+ V(SeqOneByteString) \
+ V(SeqTwoByteString) \
+ V(SharedFunctionInfo) \
+ V(ShortcutCandidate) \
+ V(SlicedString) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
+ V(Struct) \
+ V(Symbol) \
+ V(ThinString) \
+ V(TransitionArray) \
+ V(UncompiledDataWithoutPreParsedScope) \
+ V(UncompiledDataWithPreParsedScope) \
+ V(WasmInstanceObject) \
V(WeakArray)
// For data objects, JS objects and structs along with generic visitor which
@@ -166,8 +166,6 @@ typedef std::vector<Handle<Map>> MapHandles;
// +*************************************************************+
// | TaggedPointer | [dependent_code] |
// +---------------+---------------------------------------------+
-// | TaggedPointer | [weak_cell_cache] |
-// +---------------+---------------------------------------------+
class Map : public HeapObject {
public:
@@ -213,11 +211,14 @@ class Map : public HeapObject {
// Tells how many unused property fields (in-object or out-of object) are
// available in the instance (only used for JSObject in fast mode).
inline int UnusedPropertyFields() const;
+ // Tells how many unused in-object property words are present.
+ inline int UnusedInObjectProperties() const;
// Updates the counters tracking unused fields in the object.
inline void SetInObjectUnusedPropertyFields(int unused_property_fields);
// Updates the counters tracking unused fields in the property array.
inline void SetOutOfObjectUnusedPropertyFields(int unused_property_fields);
inline void CopyUnusedPropertyFields(Map* map);
+ inline void CopyUnusedPropertyFieldsAdjustedForInstanceSize(Map* map);
inline void AccountAddedPropertyField();
inline void AccountAddedOutOfObjectPropertyField(
int unused_in_property_array);
@@ -328,6 +329,11 @@ class Map : public HeapObject {
// Does the tracking step.
inline void InobjectSlackTrackingStep(Isolate* isolate);
+ // Computes inobject slack for the transition tree starting at this initial
+ // map.
+ int ComputeMinObjectSlack(Isolate* isolate);
+ inline int InstanceSizeFromSlack(int slack) const;
+
// Completes inobject slack tracking for the transition tree starting at this
// initial map.
void CompleteInobjectSlackTracking(Isolate* isolate);
@@ -441,11 +447,6 @@ class Map : public HeapObject {
// Return the map of the root of object's prototype chain.
Map* GetPrototypeChainRootMap(Isolate* isolate) const;
- // Returns a WeakCell object containing given prototype. The cell is cached
- // in PrototypeInfo which is created lazily.
- static Handle<WeakCell> GetOrCreatePrototypeWeakCell(
- Handle<JSReceiver> prototype, Isolate* isolate);
-
Map* FindRootMap(Isolate* isolate) const;
Map* FindFieldOwner(Isolate* isolate, int descriptor) const;
@@ -574,9 +575,6 @@ class Map : public HeapObject {
// [dependent code]: list of optimized codes that weakly embed this map.
DECL_ACCESSORS(dependent_code, DependentCode)
- // [weak cell cache]: cache that stores a weak cell pointing to this map.
- DECL_ACCESSORS(weak_cell_cache, Object)
-
// [prototype_validity_cell]: Cell containing the validity bit for prototype
// chains or Smi(0) if uninitialized.
// The meaning of this validity cell is different for prototype maps and
@@ -656,7 +654,8 @@ class Map : public HeapObject {
Descriptor* descriptor,
TransitionFlag flag);
- static MaybeObjectHandle WrapFieldType(Handle<FieldType> type);
+ static MaybeObjectHandle WrapFieldType(Isolate* isolate,
+ Handle<FieldType> type);
static FieldType* UnwrapFieldType(MaybeObject* wrapped_type);
V8_WARN_UNUSED_RESULT static MaybeHandle<Map> CopyWithField(
@@ -762,6 +761,9 @@ class Map : public HeapObject {
inline bool CanTransition() const;
inline bool IsBooleanMap() const;
+ inline bool IsNullMap() const;
+ inline bool IsUndefinedMap() const;
+ inline bool IsNullOrUndefinedMap() const;
inline bool IsPrimitiveMap() const;
inline bool IsJSReceiverMap() const;
inline bool IsJSObjectMap() const;
@@ -780,8 +782,6 @@ class Map : public HeapObject {
bool IsMapInArrayPrototypeChain(Isolate* isolate) const;
- static Handle<WeakCell> WeakCellForMap(Isolate* isolate, Handle<Map> map);
-
// Dispatched behavior.
DECL_PRINTER(Map)
DECL_VERIFIER(Map)
@@ -820,7 +820,6 @@ class Map : public HeapObject {
V(kDescriptorsOffset, kPointerSize) \
V(kLayoutDescriptorOffset, FLAG_unbox_double_fields ? kPointerSize : 0) \
V(kDependentCodeOffset, kPointerSize) \
- V(kWeakCellCacheOffset, kPointerSize) \
V(kPrototypeValidityCellOffset, kPointerSize) \
V(kPointerFieldsEndOffset, 0) \
/* Total size. */ \
@@ -969,7 +968,8 @@ class Map : public HeapObject {
// The cache for maps used by normalized (dictionary mode) objects.
// Such maps do not have property descriptors, so a typical program
// needs very limited number of distinct normalized maps.
-class NormalizedMapCache : public FixedArray, public NeverReadOnlySpaceObject {
+class NormalizedMapCache : public WeakFixedArray,
+ public NeverReadOnlySpaceObject {
public:
using NeverReadOnlySpaceObject::GetHeap;
using NeverReadOnlySpaceObject::GetIsolate;
@@ -978,10 +978,7 @@ class NormalizedMapCache : public FixedArray, public NeverReadOnlySpaceObject {
V8_WARN_UNUSED_RESULT MaybeHandle<Map> Get(Handle<Map> fast_map,
PropertyNormalizationMode mode);
- void Set(Handle<Map> fast_map, Handle<Map> normalized_map,
- Handle<WeakCell> normalized_map_weak_cell);
-
- void Clear();
+ void Set(Handle<Map> fast_map, Handle<Map> normalized_map);
DECL_CAST(NormalizedMapCache)
diff --git a/deps/v8/src/objects/maybe-object-inl.h b/deps/v8/src/objects/maybe-object-inl.h
index 0c04550673..fa3cd8c14f 100644
--- a/deps/v8/src/objects/maybe-object-inl.h
+++ b/deps/v8/src/objects/maybe-object-inl.h
@@ -7,8 +7,7 @@
#include "src/objects/maybe-object.h"
-#include "include/v8.h"
-#include "src/globals.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -26,7 +25,7 @@ Smi* MaybeObject::ToSmi() {
return Smi::cast(reinterpret_cast<Object*>(this));
}
-bool MaybeObject::IsStrongOrWeakHeapObject() {
+bool MaybeObject::IsStrongOrWeakHeapObject() const {
if (IsSmi() || IsClearedWeakHeapObject()) {
return false;
}
@@ -53,7 +52,7 @@ bool MaybeObject::ToStrongOrWeakHeapObject(
return true;
}
-bool MaybeObject::IsStrongHeapObject() {
+bool MaybeObject::IsStrongHeapObject() const {
return !HasWeakHeapObjectTag(this) && !IsSmi();
}
@@ -70,11 +69,11 @@ HeapObject* MaybeObject::ToStrongHeapObject() {
return reinterpret_cast<HeapObject*>(this);
}
-bool MaybeObject::IsWeakHeapObject() {
+bool MaybeObject::IsWeakHeapObject() const {
return HasWeakHeapObjectTag(this) && !IsClearedWeakHeapObject();
}
-bool MaybeObject::IsWeakOrClearedHeapObject() {
+bool MaybeObject::IsWeakOrClearedHeapObject() const {
return HasWeakHeapObjectTag(this);
}
@@ -104,7 +103,7 @@ Object* MaybeObject::GetHeapObjectOrSmi() {
return GetHeapObject();
}
-bool MaybeObject::IsObject() { return IsSmi() || IsStrongHeapObject(); }
+bool MaybeObject::IsObject() const { return IsSmi() || IsStrongHeapObject(); }
Object* MaybeObject::ToObject() {
DCHECK(!HasWeakHeapObjectTag(this));
diff --git a/deps/v8/src/objects/maybe-object.h b/deps/v8/src/objects/maybe-object.h
index 0d8751b652..84c8538224 100644
--- a/deps/v8/src/objects/maybe-object.h
+++ b/deps/v8/src/objects/maybe-object.h
@@ -7,12 +7,14 @@
#include "include/v8.h"
#include "src/globals.h"
+#include "src/objects.h"
namespace v8 {
namespace internal {
class HeapObject;
class Smi;
+class StringStream;
// A MaybeObject is either a SMI, a strong reference to a HeapObject, a weak
// reference to a HeapObject, or a cleared weak reference. It's used for
@@ -23,19 +25,19 @@ class MaybeObject {
inline bool ToSmi(Smi** value);
inline Smi* ToSmi();
- bool IsClearedWeakHeapObject() {
+ bool IsClearedWeakHeapObject() const {
return ::v8::internal::IsClearedWeakHeapObject(this);
}
- inline bool IsStrongOrWeakHeapObject();
+ inline bool IsStrongOrWeakHeapObject() const;
inline bool ToStrongOrWeakHeapObject(HeapObject** result);
inline bool ToStrongOrWeakHeapObject(HeapObject** result,
HeapObjectReferenceType* reference_type);
- inline bool IsStrongHeapObject();
+ inline bool IsStrongHeapObject() const;
inline bool ToStrongHeapObject(HeapObject** result);
inline HeapObject* ToStrongHeapObject();
- inline bool IsWeakHeapObject();
- inline bool IsWeakOrClearedHeapObject();
+ inline bool IsWeakHeapObject() const;
+ inline bool IsWeakOrClearedHeapObject() const;
inline bool ToWeakHeapObject(HeapObject** result);
inline HeapObject* ToWeakHeapObject();
@@ -43,7 +45,7 @@ class MaybeObject {
inline HeapObject* GetHeapObject();
inline Object* GetHeapObjectOrSmi();
- inline bool IsObject();
+ inline bool IsObject() const;
inline Object* ToObject();
static MaybeObject* FromSmi(Smi* smi) {
diff --git a/deps/v8/src/objects/microtask-inl.h b/deps/v8/src/objects/microtask-inl.h
index 71a9ea20ec..180f55be17 100644
--- a/deps/v8/src/objects/microtask-inl.h
+++ b/deps/v8/src/objects/microtask-inl.h
@@ -7,6 +7,8 @@
#include "src/objects/microtask.h"
+#include "src/objects-inl.h" // Needed for write barriers
+
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/module-inl.h b/deps/v8/src/objects/module-inl.h
index 1fffd01005..1a4f2b3efa 100644
--- a/deps/v8/src/objects/module-inl.h
+++ b/deps/v8/src/objects/module-inl.h
@@ -6,6 +6,8 @@
#define V8_OBJECTS_MODULE_INL_H_
#include "src/objects/module.h"
+
+#include "src/objects-inl.h" // Needed for write barriers
#include "src/objects/scope-info.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index 8672d43264..02a94c446b 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -8,10 +8,11 @@
#include "src/objects/module.h"
#include "src/accessors.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/ast/modules.h"
#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-generator-inl.h"
#include "src/objects/module-inl.h"
namespace v8 {
@@ -176,7 +177,7 @@ void Module::StoreVariable(Handle<Module> module, int cell_index,
}
#ifdef DEBUG
-void Module::PrintStatusTransition(Isolate* isolate, Status new_status) {
+void Module::PrintStatusTransition(Status new_status) {
if (FLAG_trace_module_status) {
StdoutStream os;
os << "Changing module status from " << status() << " to " << new_status
@@ -189,12 +190,12 @@ void Module::PrintStatusTransition(Isolate* isolate, Status new_status) {
}
#endif // DEBUG
-void Module::SetStatus(Isolate* isolate, Status new_status) {
+void Module::SetStatus(Status new_status) {
DisallowHeapAllocation no_alloc;
DCHECK_LE(status(), new_status);
DCHECK_NE(new_status, Module::kErrored);
#ifdef DEBUG
- PrintStatusTransition(isolate, new_status);
+ PrintStatusTransition(new_status);
#endif // DEBUG
set_status(new_status);
}
@@ -240,7 +241,7 @@ void Module::Reset(Isolate* isolate, Handle<Module> module) {
module->set_code(JSFunction::cast(module->code())->shared());
}
#ifdef DEBUG
- module->PrintStatusTransition(isolate, kUninstantiated);
+ module->PrintStatusTransition(kUninstantiated);
#endif // DEBUG
module->set_status(kUninstantiated);
module->set_exports(*exports);
@@ -259,7 +260,7 @@ void Module::RecordError(Isolate* isolate) {
set_code(info());
#ifdef DEBUG
- PrintStatusTransition(isolate, Module::kErrored);
+ PrintStatusTransition(Module::kErrored);
#endif // DEBUG
set_status(Module::kErrored);
set_exception(the_exception);
@@ -476,7 +477,7 @@ bool Module::PrepareInstantiate(Isolate* isolate, Handle<Module> module,
DCHECK_NE(module->status(), kEvaluating);
DCHECK_NE(module->status(), kInstantiating);
if (module->status() >= kPreInstantiating) return true;
- module->SetStatus(isolate, kPreInstantiating);
+ module->SetStatus(kPreInstantiating);
STACK_CHECK(isolate, false);
// Obtain requested modules.
@@ -571,7 +572,7 @@ bool Module::MaybeTransitionComponent(Isolate* isolate, Handle<Module> module,
if (new_status == kInstantiated) {
if (!RunInitializationCode(isolate, ancestor)) return false;
}
- ancestor->SetStatus(isolate, new_status);
+ ancestor->SetStatus(new_status);
} while (*ancestor != *module);
}
return true;
@@ -593,7 +594,7 @@ bool Module::FinishInstantiate(Isolate* isolate, Handle<Module> module,
isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared, isolate->native_context());
module->set_code(*function);
- module->SetStatus(isolate, kInstantiating);
+ module->SetStatus(kInstantiating);
module->set_dfs_index(*dfs_index);
module->set_dfs_ancestor_index(*dfs_index);
stack->push_front(module);
@@ -715,7 +716,7 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module,
isolate);
module->set_code(
generator->function()->shared()->scope_info()->ModuleDescriptorInfo());
- module->SetStatus(isolate, kEvaluating);
+ module->SetStatus(kEvaluating);
module->set_dfs_index(*dfs_index);
module->set_dfs_ancestor_index(*dfs_index);
stack->push_front(module);
@@ -899,7 +900,10 @@ Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
// - We can store a pointer from the map back to the namespace object.
// Turbofan can use this for inlining the access.
JSObject::OptimizeAsPrototype(ns);
- Map::GetOrCreatePrototypeWeakCell(ns, isolate);
+
+ Handle<PrototypeInfo> proto_info =
+ Map::GetOrCreatePrototypeInfo(Handle<JSObject>::cast(ns), isolate);
+ proto_info->set_module_namespace(*ns);
return ns;
}
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index 23c1356817..4612d73c89 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -201,12 +201,12 @@ class Module : public Struct, public NeverReadOnlySpaceObject {
static void ResetGraph(Isolate* isolate, Handle<Module> module);
// To set status to kErrored, RecordError should be used.
- void SetStatus(Isolate* isolate, Status status);
+ void SetStatus(Status status);
void RecordError(Isolate* isolate);
#ifdef DEBUG
// For --trace-module-status.
- void PrintStatusTransition(Isolate* isolate, Status new_status);
+ void PrintStatusTransition(Status new_status);
#endif // DEBUG
DISALLOW_IMPLICIT_CONSTRUCTORS(Module);
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index c35aab095a..8176bb0324 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -24,6 +24,7 @@
#undef BOOL_GETTER
#undef BOOL_ACCESSORS
#undef BIT_FIELD_ACCESSORS
+#undef INSTANCE_TYPE_CHECKER
#undef TYPE_CHECKER
#undef FIELD_ADDR
#undef READ_FIELD
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index 4bbf9e535b..9ec24a62f7 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -14,6 +14,8 @@
// for fields that can be written to and read from multiple threads at the same
// time. See comments in src/base/atomicops.h for the memory ordering sematics.
+#include <src/v8memory.h>
+
#define DECL_PRIMITIVE_ACCESSORS(name, type) \
inline type name() const; \
inline void set_##name(type value);
@@ -28,6 +30,10 @@
inline uint16_t name() const; \
inline void set_##name(int value);
+#define DECL_UINT8_ACCESSORS(name) \
+ inline uint8_t name() const; \
+ inline void set_##name(int value);
+
#define DECL_ACCESSORS(name, type) \
inline type* name() const; \
inline void set_##name(type* value, \
@@ -65,18 +71,25 @@
WRITE_UINT16_FIELD(this, offset, value); \
}
-#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
- set_condition) \
- type* holder::name() const { \
- type* value = type::cast(READ_FIELD(this, offset)); \
- DCHECK(get_condition); \
- return value; \
- } \
- void holder::set_##name(type* value, WriteBarrierMode mode) { \
- DCHECK(set_condition); \
- WRITE_FIELD(this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, \
- offset, value, mode); \
+#define UINT8_ACCESSORS(holder, name, offset) \
+ uint8_t holder::name() const { return READ_UINT8_FIELD(this, offset); } \
+ void holder::set_##name(int value) { \
+ DCHECK_GE(value, 0); \
+ DCHECK_LE(value, static_cast<uint8_t>(-1)); \
+ WRITE_UINT8_FIELD(this, offset, value); \
+ }
+
+#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
+ set_condition) \
+ type* holder::name() const { \
+ type* value = type::cast(READ_FIELD(this, offset)); \
+ DCHECK(get_condition); \
+ return value; \
+ } \
+ void holder::set_##name(type* value, WriteBarrierMode mode) { \
+ DCHECK(set_condition); \
+ WRITE_FIELD(this, offset, value); \
+ CONDITIONAL_WRITE_BARRIER(this, offset, value, mode); \
}
#define ACCESSORS_CHECKED(holder, name, type, offset, condition) \
ACCESSORS_CHECKED2(holder, name, type, offset, condition, condition)
@@ -84,18 +97,17 @@
#define ACCESSORS(holder, name, type, offset) \
ACCESSORS_CHECKED(holder, name, type, offset, true)
-#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \
- set_condition) \
- MaybeObject* holder::name() const { \
- MaybeObject* value = READ_WEAK_FIELD(this, offset); \
- DCHECK(get_condition); \
- return value; \
- } \
- void holder::set_##name(MaybeObject* value, WriteBarrierMode mode) { \
- DCHECK(set_condition); \
- WRITE_WEAK_FIELD(this, offset, value); \
- CONDITIONAL_WEAK_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, \
- offset, value, mode); \
+#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \
+ set_condition) \
+ MaybeObject* holder::name() const { \
+ MaybeObject* value = READ_WEAK_FIELD(this, offset); \
+ DCHECK(get_condition); \
+ return value; \
+ } \
+ void holder::set_##name(MaybeObject* value, WriteBarrierMode mode) { \
+ DCHECK(set_condition); \
+ WRITE_WEAK_FIELD(this, offset, value); \
+ CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode); \
}
#define WEAK_ACCESSORS_CHECKED(holder, name, offset, condition) \
@@ -154,9 +166,14 @@
set_##field(BitField::update(field(), value)); \
}
-#define TYPE_CHECKER(type, instancetype) \
- bool HeapObject::Is##type() const { \
- return map()->instance_type() == instancetype; \
+#define INSTANCE_TYPE_CHECKER(type, forinstancetype) \
+ V8_INLINE bool Is##type(InstanceType instance_type) { \
+ return instance_type == forinstancetype; \
+ }
+
+#define TYPE_CHECKER(type, ...) \
+ bool HeapObject::Is##type() const { \
+ return InstanceTypeChecker::Is##type(map()->instance_type()); \
}
#define FIELD_ADDR(p, offset) \
@@ -206,48 +223,45 @@
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<base::AtomicWord>(value));
-#define WRITE_BARRIER(heap, object, offset, value) \
- do { \
- Heap* __heap__ = heap; \
- __heap__->incremental_marking()->RecordWrite( \
- object, HeapObject::RawField(object, offset), value); \
- __heap__->RecordWrite(object, HeapObject::RawField(object, offset), \
- value); \
+#define WRITE_BARRIER(object, offset, value) \
+ do { \
+ DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
+ MarkingBarrier(object, HeapObject::RawField(object, offset), value); \
+ GenerationalBarrier(object, HeapObject::RawField(object, offset), value); \
} while (false)
-#define WEAK_WRITE_BARRIER(heap, object, offset, value) \
- do { \
- Heap* __heap__ = heap; \
- __heap__->incremental_marking()->RecordMaybeWeakWrite( \
- object, HeapObject::RawMaybeWeakField(object, offset), value); \
- __heap__->RecordWrite( \
- object, HeapObject::RawMaybeWeakField(object, offset), value); \
+#define WEAK_WRITE_BARRIER(object, offset, value) \
+ do { \
+ DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
+ MarkingBarrier(object, HeapObject::RawMaybeWeakField(object, offset), \
+ value); \
+ GenerationalBarrier(object, HeapObject::RawMaybeWeakField(object, offset), \
+ value); \
} while (false)
-#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
- do { \
- Heap* __heap__ = heap; \
- if (mode != SKIP_WRITE_BARRIER) { \
- if (mode == UPDATE_WRITE_BARRIER) { \
- __heap__->incremental_marking()->RecordWrite( \
- object, HeapObject::RawField(object, offset), value); \
- } \
- __heap__->RecordWrite(object, HeapObject::RawField(object, offset), \
- value); \
- } \
+#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode) \
+ do { \
+ DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
+ if (mode != SKIP_WRITE_BARRIER) { \
+ if (mode == UPDATE_WRITE_BARRIER) { \
+ MarkingBarrier(object, HeapObject::RawField(object, offset), value); \
+ } \
+ GenerationalBarrier(object, HeapObject::RawField(object, offset), \
+ value); \
+ } \
} while (false)
-#define CONDITIONAL_WEAK_WRITE_BARRIER(heap, object, offset, value, mode) \
- do { \
- Heap* __heap__ = heap; \
- if (mode != SKIP_WRITE_BARRIER) { \
- if (mode == UPDATE_WRITE_BARRIER) { \
- __heap__->incremental_marking()->RecordMaybeWeakWrite( \
- object, HeapObject::RawMaybeWeakField(object, offset), value); \
- } \
- __heap__->RecordWrite( \
- object, HeapObject::RawMaybeWeakField(object, offset), value); \
- } \
+#define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode) \
+ do { \
+ DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
+ if (mode != SKIP_WRITE_BARRIER) { \
+ if (mode == UPDATE_WRITE_BARRIER) { \
+ MarkingBarrier(object, HeapObject::RawMaybeWeakField(object, offset), \
+ value); \
+ } \
+ GenerationalBarrier( \
+ object, HeapObject::RawMaybeWeakField(object, offset), value); \
+ } \
} while (false)
#define READ_DOUBLE_FIELD(p, offset) ReadDoubleValue(FIELD_ADDR(p, offset))
diff --git a/deps/v8/src/objects/ordered-hash-table-inl.h b/deps/v8/src/objects/ordered-hash-table-inl.h
index 25d5dc938a..76b0692c46 100644
--- a/deps/v8/src/objects/ordered-hash-table-inl.h
+++ b/deps/v8/src/objects/ordered-hash-table-inl.h
@@ -5,9 +5,11 @@
#ifndef V8_OBJECTS_ORDERED_HASH_TABLE_INL_H_
#define V8_OBJECTS_ORDERED_HASH_TABLE_INL_H_
-#include "src/heap/heap.h"
#include "src/objects/ordered-hash-table.h"
+#include "src/heap/heap.h"
+#include "src/objects/fixed-array-inl.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index ad558e731e..fdafce56ae 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -126,13 +126,17 @@ Handle<FixedArray> OrderedHashSet::ConvertToKeysArray(
Handle<FixedArray> result = Handle<FixedArray>::cast(table);
// From this point on table is no longer a valid OrderedHashSet.
result->set_map(ReadOnlyRoots(isolate).fixed_array_map());
+ int const kMaxStringTableEntries =
+ isolate->heap()->MaxNumberToStringCacheSize();
for (int i = 0; i < length; i++) {
int index = kHashTableStartIndex + nof_buckets + (i * kEntrySize);
Object* key = table->get(index);
if (convert == GetKeysConversion::kConvertToString) {
uint32_t index_value;
if (key->ToArrayIndex(&index_value)) {
- key = *isolate->factory()->Uint32ToString(index_value);
+ // Avoid trashing the Number2String cache if indices get very large.
+ bool use_cache = i < kMaxStringTableEntries;
+ key = *isolate->factory()->Uint32ToString(index_value, use_cache);
} else {
CHECK(key->IsName());
}
diff --git a/deps/v8/src/objects/property-descriptor-object.h b/deps/v8/src/objects/property-descriptor-object.h
index 880aaa9c30..758bde56b0 100644
--- a/deps/v8/src/objects/property-descriptor-object.h
+++ b/deps/v8/src/objects/property-descriptor-object.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_PROPERTY_DESCRIPTOR_OBJECT_H_
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/prototype-info-inl.h b/deps/v8/src/objects/prototype-info-inl.h
index df638c9e5b..298674eb10 100644
--- a/deps/v8/src/objects/prototype-info-inl.h
+++ b/deps/v8/src/objects/prototype-info-inl.h
@@ -7,6 +7,8 @@
#include "src/objects/prototype-info.h"
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/map.h"
#include "src/objects/maybe-object.h"
// Has to be the last include (doesn't have include guards):
@@ -32,7 +34,7 @@ bool PrototypeInfo::HasObjectCreateMap() {
return cache->IsWeakHeapObject();
}
-ACCESSORS(PrototypeInfo, weak_cell, Object, kWeakCellOffset)
+ACCESSORS(PrototypeInfo, module_namespace, Object, kJSModuleNamespaceOffset)
ACCESSORS(PrototypeInfo, prototype_users, Object, kPrototypeUsersOffset)
WEAK_ACCESSORS(PrototypeInfo, object_create_map, kObjectCreateMapOffset)
SMI_ACCESSORS(PrototypeInfo, registry_slot, kRegistrySlotOffset)
diff --git a/deps/v8/src/objects/prototype-info.h b/deps/v8/src/objects/prototype-info.h
index 303617fda0..09c538fd19 100644
--- a/deps/v8/src/objects/prototype-info.h
+++ b/deps/v8/src/objects/prototype-info.h
@@ -19,8 +19,11 @@ class PrototypeInfo : public Struct {
public:
static const int UNREGISTERED = -1;
- // [weak_cell]: A WeakCell containing this prototype. ICs cache the cell here.
- DECL_ACCESSORS(weak_cell, Object)
+ // [module_namespace]: A backpointer to JSModuleNamespace from its
+ // PrototypeInfo (or undefined). This field is only used for JSModuleNamespace
+ // maps. TODO(jkummerow): Figure out if there's a way to store the namespace
+ // pointer elsewhere to save memory.
+ DECL_ACCESSORS(module_namespace, Object)
// [prototype_users]: WeakArrayList containing weak references to maps using
// this prototype, or Smi(0) if uninitialized.
@@ -49,8 +52,9 @@ class PrototypeInfo : public Struct {
DECL_PRINTER(PrototypeInfo)
DECL_VERIFIER(PrototypeInfo)
- static const int kWeakCellOffset = HeapObject::kHeaderSize;
- static const int kPrototypeUsersOffset = kWeakCellOffset + kPointerSize;
+ static const int kJSModuleNamespaceOffset = HeapObject::kHeaderSize;
+ static const int kPrototypeUsersOffset =
+ kJSModuleNamespaceOffset + kPointerSize;
static const int kRegistrySlotOffset = kPrototypeUsersOffset + kPointerSize;
static const int kValidityCellOffset = kRegistrySlotOffset + kPointerSize;
static const int kObjectCreateMapOffset = kValidityCellOffset + kPointerSize;
@@ -84,7 +88,8 @@ class PrototypeUsers : public WeakArrayList {
typedef void (*CompactionCallback)(HeapObject* object, int from_index,
int to_index);
static WeakArrayList* Compact(Handle<WeakArrayList> array, Heap* heap,
- CompactionCallback callback);
+ CompactionCallback callback,
+ PretenureFlag pretenure = NOT_TENURED);
#ifdef VERIFY_HEAP
static void Verify(WeakArrayList* array);
diff --git a/deps/v8/src/objects/regexp-match-info.h b/deps/v8/src/objects/regexp-match-info.h
index 838c299bd2..5d8fcfb5ce 100644
--- a/deps/v8/src/objects/regexp-match-info.h
+++ b/deps/v8/src/objects/regexp-match-info.h
@@ -7,6 +7,7 @@
#include "src/base/compiler-specific.h"
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index 46adcb2a8a..3420b71754 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -182,7 +182,7 @@ class Script : public Struct, public NeverReadOnlySpaceObject {
Script* Next();
private:
- FixedArrayOfWeakCells::Iterator iterator_;
+ WeakArrayList::Iterator iterator_;
DISALLOW_COPY_AND_ASSIGN(Iterator);
};
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 892da7c5c5..0b4a7effb9 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -5,12 +5,13 @@
#ifndef V8_OBJECTS_SHARED_FUNCTION_INFO_INL_H_
#define V8_OBJECTS_SHARED_FUNCTION_INFO_INL_H_
+#include "src/objects/shared-function-info.h"
+
+#include "src/handles-inl.h"
#include "src/heap/heap-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/scope-info.h"
-#include "src/objects/shared-function-info.h"
#include "src/objects/templates.h"
-#include "src/wasm/wasm-objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -35,8 +36,7 @@ void PreParsedScopeData::set_child_data(int index, Object* value,
DCHECK_LT(index, this->length());
int offset = kChildDataStartOffset + index * kPointerSize;
RELAXED_WRITE_FIELD(this, offset, value);
- CONDITIONAL_WRITE_BARRIER(Heap::FromWritableHeapObject(this), this, offset,
- value, mode);
+ CONDITIONAL_WRITE_BARRIER(this, offset, value, mode);
}
Object** PreParsedScopeData::child_data_start() const {
@@ -53,6 +53,7 @@ void PreParsedScopeData::clear_padding() {
}
CAST_ACCESSOR(UncompiledData)
+ACCESSORS(UncompiledData, inferred_name, String, kInferredNameOffset)
INT32_ACCESSORS(UncompiledData, start_position, kStartPositionOffset)
INT32_ACCESSORS(UncompiledData, end_position, kEndPositionOffset)
INT32_ACCESSORS(UncompiledData, function_literal_id, kFunctionLiteralIdOffset)
@@ -83,9 +84,8 @@ DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
ACCESSORS(SharedFunctionInfo, name_or_scope_info, Object,
kNameOrScopeInfoOffset)
ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
-ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
-ACCESSORS(SharedFunctionInfo, function_identifier_or_debug_info, Object,
- kFunctionIdentifierOrDebugInfoOffset)
+ACCESSORS(SharedFunctionInfo, script_or_debug_info, Object,
+ kScriptOrDebugInfoOffset)
#if V8_SFI_HAS_UNIQUE_ID
INT_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
@@ -93,8 +93,9 @@ INT_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
UINT16_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
UINT16_ACCESSORS(SharedFunctionInfo, internal_formal_parameter_count,
kFormalParameterCountOffset)
-UINT16_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
- kExpectedNofPropertiesOffset)
+UINT8_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
+ kExpectedNofPropertiesOffset)
+UINT8_ACCESSORS(SharedFunctionInfo, raw_builtin_function_id, kBuiltinFunctionId)
UINT16_ACCESSORS(SharedFunctionInfo, raw_function_token_offset,
kFunctionTokenOffsetOffset)
INT_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
@@ -187,7 +188,7 @@ BailoutReason SharedFunctionInfo::disable_optimization_reason() const {
return DisabledOptimizationReasonBits::decode(flags());
}
-LanguageMode SharedFunctionInfo::language_mode() {
+LanguageMode SharedFunctionInfo::language_mode() const {
STATIC_ASSERT(LanguageModeSize == 2);
return construct_language_mode(IsStrictBit::decode(flags()));
}
@@ -335,47 +336,6 @@ void SharedFunctionInfo::SetPosition(int start_position, int end_position) {
}
}
-Code* SharedFunctionInfo::GetCode() const {
- // ======
- // NOTE: This chain of checks MUST be kept in sync with the equivalent CSA
- // GetSharedFunctionInfoCode method in code-stub-assembler.cc.
- // ======
-
- Isolate* isolate = GetIsolate();
- Object* data = function_data();
- if (data->IsSmi()) {
- // Holding a Smi means we are a builtin.
- DCHECK(HasBuiltinId());
- return isolate->builtins()->builtin(builtin_id());
- } else if (data->IsBytecodeArray()) {
- // Having a bytecode array means we are a compiled, interpreted function.
- DCHECK(HasBytecodeArray());
- return isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
- } else if (data->IsFixedArray()) {
- // Having a fixed array means we are an asm.js/wasm function.
- DCHECK(HasAsmWasmData());
- return isolate->builtins()->builtin(Builtins::kInstantiateAsmJs);
- } else if (data->IsUncompiledData()) {
- // Having uncompiled data (with or without scope) means we need to compile.
- DCHECK(HasUncompiledData());
- return isolate->builtins()->builtin(Builtins::kCompileLazy);
- } else if (data->IsFunctionTemplateInfo()) {
- // Having a function template info means we are an API function.
- DCHECK(IsApiFunction());
- return isolate->builtins()->builtin(Builtins::kHandleApiCall);
- } else if (data->IsWasmExportedFunctionData()) {
- // Having a WasmExportedFunctionData means the code is in there.
- DCHECK(HasWasmExportedFunctionData());
- return wasm_exported_function_data()->wrapper_code();
- } else if (data->IsInterpreterData()) {
- Code* code = InterpreterTrampoline();
- DCHECK(code->IsCode());
- DCHECK(code->is_interpreter_trampoline_builtin());
- return code;
- }
- UNREACHABLE();
-}
-
bool SharedFunctionInfo::IsInterpreted() const { return HasBytecodeArray(); }
ScopeInfo* SharedFunctionInfo::scope_info() const {
@@ -401,7 +361,7 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo* scope_info,
}
WRITE_FIELD(this, kNameOrScopeInfoOffset,
reinterpret_cast<Object*>(scope_info));
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kNameOrScopeInfoOffset,
+ CONDITIONAL_WRITE_BARRIER(this, kNameOrScopeInfoOffset,
reinterpret_cast<Object*>(scope_info), mode);
}
@@ -653,12 +613,6 @@ bool SharedFunctionInfo::HasWasmExportedFunctionData() const {
return function_data()->IsWasmExportedFunctionData();
}
-WasmExportedFunctionData* SharedFunctionInfo::wasm_exported_function_data()
- const {
- DCHECK(HasWasmExportedFunctionData());
- return WasmExportedFunctionData::cast(function_data());
-}
-
int SharedFunctionInfo::FunctionLiteralId(Isolate* isolate) const {
// Fast path for the common case when the SFI is uncompiled and so the
// function literal id is already in the uncompiled data.
@@ -670,73 +624,76 @@ int SharedFunctionInfo::FunctionLiteralId(Isolate* isolate) const {
}
// Otherwise, search for the function in the SFI's script's function list,
- // and return its index in that list.
+ // and return its index in that list.e
return FindIndexInScript(isolate);
}
+Object* SharedFunctionInfo::script() const {
+ Object* maybe_script = script_or_debug_info();
+ if (maybe_script->IsDebugInfo()) {
+ return DebugInfo::cast(maybe_script)->script();
+ }
+ return maybe_script;
+}
+
+void SharedFunctionInfo::set_script(Object* script) {
+ Object* maybe_debug_info = script_or_debug_info();
+ if (maybe_debug_info->IsDebugInfo()) {
+ DebugInfo::cast(maybe_debug_info)->set_script(script);
+ } else {
+ set_script_or_debug_info(script);
+ }
+}
+
bool SharedFunctionInfo::HasDebugInfo() const {
- return function_identifier_or_debug_info()->IsDebugInfo();
+ return script_or_debug_info()->IsDebugInfo();
}
DebugInfo* SharedFunctionInfo::GetDebugInfo() const {
DCHECK(HasDebugInfo());
- return DebugInfo::cast(function_identifier_or_debug_info());
-}
-
-Object* SharedFunctionInfo::function_identifier() const {
- Object* result;
- if (HasDebugInfo()) {
- result = GetDebugInfo()->function_identifier();
- } else {
- result = function_identifier_or_debug_info();
- }
- DCHECK(result->IsSmi() || result->IsString() || result->IsUndefined());
- return result;
+ return DebugInfo::cast(script_or_debug_info());
}
void SharedFunctionInfo::SetDebugInfo(DebugInfo* debug_info) {
DCHECK(!HasDebugInfo());
- DCHECK_EQ(debug_info->function_identifier(),
- function_identifier_or_debug_info());
- set_function_identifier_or_debug_info(debug_info);
+ DCHECK_EQ(debug_info->script(), script_or_debug_info());
+ set_script_or_debug_info(debug_info);
}
bool SharedFunctionInfo::HasBuiltinFunctionId() {
- return function_identifier()->IsSmi();
+ return builtin_function_id() != BuiltinFunctionId::kInvalidBuiltinFunctionId;
}
BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
- DCHECK(HasBuiltinFunctionId());
- return static_cast<BuiltinFunctionId>(Smi::ToInt(function_identifier()));
+ return static_cast<BuiltinFunctionId>(raw_builtin_function_id());
}
void SharedFunctionInfo::set_builtin_function_id(BuiltinFunctionId id) {
- DCHECK(!HasDebugInfo());
- set_function_identifier_or_debug_info(Smi::FromInt(id));
+ set_raw_builtin_function_id(static_cast<uint8_t>(id));
}
bool SharedFunctionInfo::HasInferredName() {
- return function_identifier()->IsString();
+ Object* scope_info = name_or_scope_info();
+ if (scope_info->IsScopeInfo()) {
+ return ScopeInfo::cast(scope_info)->HasInferredFunctionName();
+ }
+ return HasUncompiledData();
}
String* SharedFunctionInfo::inferred_name() {
- if (HasInferredName()) {
- return String::cast(function_identifier());
+ Object* maybe_scope_info = name_or_scope_info();
+ if (maybe_scope_info->IsScopeInfo()) {
+ ScopeInfo* scope_info = ScopeInfo::cast(maybe_scope_info);
+ if (scope_info->HasInferredFunctionName()) {
+ Object* name = ScopeInfo::cast(maybe_scope_info)->InferredFunctionName();
+ if (name->IsString()) return String::cast(name);
+ }
+ } else if (HasUncompiledData()) {
+ return uncompiled_data()->inferred_name();
}
- DCHECK(function_identifier()->IsUndefined() || HasBuiltinFunctionId());
return GetReadOnlyRoots().empty_string();
}
-void SharedFunctionInfo::set_inferred_name(String* inferred_name) {
- DCHECK(function_identifier_or_debug_info()->IsUndefined() ||
- HasInferredName() || HasDebugInfo());
- if (HasDebugInfo()) {
- GetDebugInfo()->set_function_identifier(inferred_name);
- } else {
- set_function_identifier_or_debug_info(inferred_name);
- }
-}
-
bool SharedFunctionInfo::IsUserJavaScript() {
Object* script_obj = script();
if (script_obj->IsUndefined()) return false;
@@ -791,7 +748,8 @@ void SharedFunctionInfo::DiscardCompiled(
// validity checks, since we're performing the unusual task of decompiling.
Handle<UncompiledData> data =
isolate->factory()->NewUncompiledDataWithoutPreParsedScope(
- start_position, end_position, function_literal_id);
+ handle(shared_info->inferred_name(), isolate), start_position,
+ end_position, function_literal_id);
shared_info->set_function_data(*data);
}
}
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index c8684ea2f0..d5f65a91d1 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -68,6 +68,7 @@ class PreParsedScopeData : public HeapObject {
// not stored in the SharedFunctionInfo.
class UncompiledData : public HeapObject {
public:
+ DECL_ACCESSORS(inferred_name, String)
DECL_INT32_ACCESSORS(start_position)
DECL_INT32_ACCESSORS(end_position)
DECL_INT32_ACCESSORS(function_literal_id)
@@ -75,6 +76,9 @@ class UncompiledData : public HeapObject {
DECL_CAST(UncompiledData)
#define UNCOMPILED_DATA_FIELDS(V) \
+ V(kStartOfPointerFieldsOffset, 0) \
+ V(kInferredNameOffset, kPointerSize) \
+ V(kEndOfPointerFieldsOffset, 0) \
V(kStartPositionOffset, kInt32Size) \
V(kEndPositionOffset, kInt32Size) \
V(kFunctionLiteralIdOffset, kInt32Size) \
@@ -86,6 +90,10 @@ class UncompiledData : public HeapObject {
static const int kSize = POINTER_SIZE_ALIGN(kUnalignedSize);
+ typedef FixedBodyDescriptor<kStartOfPointerFieldsOffset,
+ kEndOfPointerFieldsOffset, kSize>
+ BodyDescriptor;
+
// Clear uninitialized padding space.
inline void clear_padding();
@@ -104,6 +112,11 @@ class UncompiledDataWithoutPreParsedScope : public UncompiledData {
static const int kSize = UncompiledData::kSize;
+ // No extra fields compared to UncompiledData.
+ typedef UncompiledData::BodyDescriptor BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(UncompiledDataWithoutPreParsedScope);
};
@@ -132,8 +145,10 @@ class UncompiledDataWithPreParsedScope : public UncompiledData {
// Make sure the size is aligned
STATIC_ASSERT(kSize == POINTER_SIZE_ALIGN(kSize));
- typedef FixedBodyDescriptor<kStartOfPointerFieldsOffset,
- kEndOfPointerFieldsOffset, kSize>
+ typedef SubclassBodyDescriptor<
+ UncompiledData::BodyDescriptor,
+ FixedBodyDescriptor<kStartOfPointerFieldsOffset,
+ kEndOfPointerFieldsOffset, kSize>>
BodyDescriptor;
// No weak fields.
typedef BodyDescriptor BodyDescriptorWeak;
@@ -174,7 +189,7 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
inline void SetName(String* name);
// Get the code object which represents the execution of this function.
- inline Code* GetCode() const;
+ Code* GetCode() const;
// Get the abstract code associated with the function, which will either be
// a Code object or a BytecodeArray.
@@ -259,7 +274,7 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
// [expected_nof_properties]: Expected number of properties for the
// function. The value is only reliable when the function has been compiled.
- DECL_UINT16_ACCESSORS(expected_nof_properties)
+ DECL_UINT8_ACCESSORS(expected_nof_properties)
#if V8_SFI_HAS_UNIQUE_ID
// [unique_id] - For --trace-maps purposes, an identifier that's persistent
@@ -316,40 +331,35 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
UncompiledDataWithPreParsedScope* data);
inline bool HasUncompiledDataWithoutPreParsedScope() const;
inline bool HasWasmExportedFunctionData() const;
- inline WasmExportedFunctionData* wasm_exported_function_data() const;
+ WasmExportedFunctionData* wasm_exported_function_data() const;
inline void set_wasm_exported_function_data(WasmExportedFunctionData* data);
// Clear out pre-parsed scope data from UncompiledDataWithPreParsedScope,
// turning it into UncompiledDataWithoutPreParsedScope.
inline void ClearPreParsedScopeData();
- // [function identifier]: This field holds an additional identifier for the
- // function.
- // - a Smi identifying a builtin function [HasBuiltinFunctionId()].
- // - a String identifying the function's inferred name [HasInferredName()].
- // - a DebugInfo which holds the actual function_identifier [HasDebugInfo()].
- // The inferred_name is inferred from variable or property
- // assignment of this function. It is used to facilitate debugging and
- // profiling of JavaScript code written in OO style, where almost
- // all functions are anonymous but are assigned to object
- // properties.
- DECL_ACCESSORS(function_identifier_or_debug_info, Object)
-
+ // [raw_builtin_function_id]: The id of the built-in function this function
+ // represents, used during optimization to improve code generation.
+ // TODO(leszeks): Once there are no more JS builtins, this can be replaced
+ // by BuiltinId.
+ DECL_UINT8_ACCESSORS(raw_builtin_function_id)
inline bool HasBuiltinFunctionId();
inline BuiltinFunctionId builtin_function_id();
inline void set_builtin_function_id(BuiltinFunctionId id);
+ // Make sure BuiltinFunctionIds fit in a uint8_t
+ STATIC_ASSERT((std::is_same<std::underlying_type<BuiltinFunctionId>::type,
+ uint8_t>::value));
+
+ // The inferred_name is inferred from variable or property assignment of this
+ // function. It is used to facilitate debugging and profiling of JavaScript
+ // code written in OO style, where almost all functions are anonymous but are
+ // assigned to object properties.
inline bool HasInferredName();
inline String* inferred_name();
- inline void set_inferred_name(String* inferred_name);
// Get the function literal id associated with this function, for parsing.
inline int FunctionLiteralId(Isolate* isolate) const;
- // The function is subject to debugging if a debug info is attached.
- inline bool HasDebugInfo() const;
- inline DebugInfo* GetDebugInfo() const;
- inline void SetDebugInfo(DebugInfo* debug_info);
-
// Break infos are contained in DebugInfo, this is a convenience method
// to simplify access.
bool HasBreakInfo() const;
@@ -366,8 +376,18 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
// Used for flags such as --turbo-filter.
bool PassesFilter(const char* raw_filter);
- // [script]: Script from which the function originates.
- DECL_ACCESSORS(script, Object)
+ // [script_or_debug_info]: One of:
+ // - Script from which the function originates.
+ // - a DebugInfo which holds the actual script [HasDebugInfo()].
+ DECL_ACCESSORS(script_or_debug_info, Object)
+
+ inline Object* script() const;
+ inline void set_script(Object* script);
+
+ // The function is subject to debugging if a debug info is attached.
+ inline bool HasDebugInfo() const;
+ inline DebugInfo* GetDebugInfo() const;
+ inline void SetDebugInfo(DebugInfo* debug_info);
// The offset of the 'function' token in the script source relative to the
// start position. Can return kFunctionTokenOutOfRange if offset doesn't
@@ -395,7 +415,7 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation)
// Indicates the language mode.
- inline LanguageMode language_mode();
+ inline LanguageMode language_mode() const;
inline void set_language_mode(LanguageMode language_mode);
// Indicates whether the source is implicitly wrapped in a function.
@@ -554,7 +574,7 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
private:
Script::Iterator script_iterator_;
- FixedArrayOfWeakCells::Iterator noscript_sfi_iterator_;
+ WeakArrayList::Iterator noscript_sfi_iterator_;
SharedFunctionInfo::ScriptIterator sfi_iterator_;
DisallowHeapAllocation no_gc_;
DISALLOW_COPY_AND_ASSIGN(GlobalIterator);
@@ -583,14 +603,14 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
V(kFunctionDataOffset, kPointerSize) \
V(kNameOrScopeInfoOffset, kPointerSize) \
V(kOuterScopeInfoOrFeedbackMetadataOffset, kPointerSize) \
- V(kScriptOffset, kPointerSize) \
- V(kFunctionIdentifierOrDebugInfoOffset, kPointerSize) \
+ V(kScriptOrDebugInfoOffset, kPointerSize) \
V(kEndOfPointerFieldsOffset, 0) \
/* Raw data fields. */ \
V(kUniqueIdOffset, kUniqueIdFieldSize) \
V(kLengthOffset, kUInt16Size) \
V(kFormalParameterCountOffset, kUInt16Size) \
- V(kExpectedNofPropertiesOffset, kUInt16Size) \
+ V(kExpectedNofPropertiesOffset, kUInt8Size) \
+ V(kBuiltinFunctionId, kUInt8Size) \
V(kFunctionTokenOffsetOffset, kUInt16Size) \
V(kFlagsOffset, kInt32Size) \
/* Total size. */ \
@@ -603,7 +623,7 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
typedef FixedBodyDescriptor<kStartOfPointerFieldsOffset,
- kEndOfPointerFieldsOffset, kSize>
+ kEndOfPointerFieldsOffset, kAlignedSize>
BodyDescriptor;
// No weak fields.
typedef BodyDescriptor BodyDescriptorWeak;
@@ -654,8 +674,6 @@ class SharedFunctionInfo : public HeapObject, public NeverReadOnlySpaceObject {
// function.
DECL_ACCESSORS(outer_scope_info, HeapObject)
- inline Object* function_identifier() const;
-
inline void set_kind(FunctionKind kind);
inline void set_needs_home_object(bool value);
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index 235bdcd8e7..1e8eef754f 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -8,6 +8,7 @@
#include "src/objects/string.h"
#include "src/conversions-inl.h"
+#include "src/handles-inl.h"
#include "src/heap/factory.h"
#include "src/objects/name-inl.h"
#include "src/string-hasher-inl.h"
@@ -25,6 +26,7 @@ CAST_ACCESSOR(ConsString)
CAST_ACCESSOR(ExternalOneByteString)
CAST_ACCESSOR(ExternalString)
CAST_ACCESSOR(ExternalTwoByteString)
+CAST_ACCESSOR(InternalizedString)
CAST_ACCESSOR(SeqOneByteString)
CAST_ACCESSOR(SeqString)
CAST_ACCESSOR(SeqTwoByteString)
@@ -497,7 +499,7 @@ void SlicedString::set_parent(Isolate* isolate, String* parent,
WriteBarrierMode mode) {
DCHECK(parent->IsSeqString() || parent->IsExternalString());
WRITE_FIELD(this, kParentOffset, parent);
- CONDITIONAL_WRITE_BARRIER(isolate->heap(), this, kParentOffset, parent, mode);
+ CONDITIONAL_WRITE_BARRIER(this, kParentOffset, parent, mode);
}
SMI_ACCESSORS(SlicedString, offset, kOffsetOffset)
@@ -511,7 +513,7 @@ Object* ConsString::unchecked_first() { return READ_FIELD(this, kFirstOffset); }
void ConsString::set_first(Isolate* isolate, String* value,
WriteBarrierMode mode) {
WRITE_FIELD(this, kFirstOffset, value);
- CONDITIONAL_WRITE_BARRIER(isolate->heap(), this, kFirstOffset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(this, kFirstOffset, value, mode);
}
String* ConsString::second() {
@@ -525,7 +527,7 @@ Object* ConsString::unchecked_second() {
void ConsString::set_second(Isolate* isolate, String* value,
WriteBarrierMode mode) {
WRITE_FIELD(this, kSecondOffset, value);
- CONDITIONAL_WRITE_BARRIER(isolate->heap(), this, kSecondOffset, value, mode);
+ CONDITIONAL_WRITE_BARRIER(this, kSecondOffset, value, mode);
}
ACCESSORS(ThinString, actual, String, kActualOffset);
@@ -577,6 +579,14 @@ void ExternalOneByteString::update_data_cache() {
*data_field = resource()->data();
}
+void ExternalOneByteString::SetResource(
+ Isolate* isolate, const ExternalOneByteString::Resource* resource) {
+ set_resource(resource);
+ size_t new_payload = resource == nullptr ? 0 : resource->length();
+ if (new_payload > 0)
+ isolate->heap()->UpdateExternalString(this, 0, new_payload);
+}
+
void ExternalOneByteString::set_resource(
const ExternalOneByteString::Resource* resource) {
DCHECK(IsAligned(reinterpret_cast<intptr_t>(resource), kPointerSize));
@@ -605,6 +615,14 @@ void ExternalTwoByteString::update_data_cache() {
*data_field = resource()->data();
}
+void ExternalTwoByteString::SetResource(
+ Isolate* isolate, const ExternalTwoByteString::Resource* resource) {
+ set_resource(resource);
+ size_t new_payload = resource == nullptr ? 0 : resource->length() * 2;
+ if (new_payload > 0)
+ isolate->heap()->UpdateExternalString(this, 0, new_payload);
+}
+
void ExternalTwoByteString::set_resource(
const ExternalTwoByteString::Resource* resource) {
*reinterpret_cast<const Resource**>(FIELD_ADDR(this, kResourceOffset)) =
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index ba036edee9..4058c7cec3 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -311,6 +311,7 @@ class String : public Name {
// Externalization.
bool MakeExternal(v8::String::ExternalStringResource* resource);
bool MakeExternal(v8::String::ExternalOneByteStringResource* resource);
+ bool SupportsExternalization();
// Conversion.
inline bool AsArrayIndex(uint32_t* index);
@@ -486,6 +487,15 @@ class SeqString : public String {
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
};
+class InternalizedString : public String {
+ public:
+ DECL_CAST(InternalizedString)
+ // TODO(neis): Possibly move some stuff from String here.
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(InternalizedString);
+};
+
// The OneByteString class captures sequential one-byte string objects.
// Each character in the OneByteString is an one-byte character.
class SeqOneByteString : public SeqString {
@@ -751,6 +761,11 @@ class ExternalOneByteString : public ExternalString {
// The underlying resource.
inline const Resource* resource();
+
+ // It is assumed that the previous resource is null. If it is not null, then
+ // it is the responsability of the caller the handle the previous resource.
+ inline void SetResource(Isolate* isolate, const Resource* buffer);
+ // Used only during serialization.
inline void set_resource(const Resource* buffer);
// Update the pointer cache to the external character array.
@@ -784,6 +799,11 @@ class ExternalTwoByteString : public ExternalString {
// The underlying string resource.
inline const Resource* resource();
+
+ // It is assumed that the previous resource is null. If it is not null, then
+ // it is the responsability of the caller the handle the previous resource.
+ inline void SetResource(Isolate* isolate, const Resource* buffer);
+ // Used only during serialization.
inline void set_resource(const Resource* buffer);
// Update the pointer cache to the external character array.
diff --git a/deps/v8/src/optimized-compilation-info.cc b/deps/v8/src/optimized-compilation-info.cc
index 77730919f1..f14c3a6661 100644
--- a/deps/v8/src/optimized-compilation-info.cc
+++ b/deps/v8/src/optimized-compilation-info.cc
@@ -5,12 +5,10 @@
#include "src/optimized-compilation-info.h"
#include "src/api.h"
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
#include "src/debug/debug.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
-#include "src/parsing/parse-info.h"
+#include "src/objects/shared-function-info.h"
#include "src/source-position.h"
namespace v8 {
diff --git a/deps/v8/src/optimized-compilation-info.h b/deps/v8/src/optimized-compilation-info.h
index a8cb8d220b..ecb883f49d 100644
--- a/deps/v8/src/optimized-compilation-info.h
+++ b/deps/v8/src/optimized-compilation-info.h
@@ -56,6 +56,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
kTraceTurboJson = 1 << 14,
kTraceTurboGraph = 1 << 15,
kTraceTurboScheduled = 1 << 16,
+ kWasmRuntimeExceptionSupport = 1 << 17
};
// Construct a compilation info for optimized compilation.
@@ -166,6 +167,14 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
return GetFlag(kAnalyzeEnvironmentLiveness);
}
+ void SetWasmRuntimeExceptionSupport() {
+ SetFlag(kWasmRuntimeExceptionSupport);
+ }
+
+ bool wasm_runtime_exception_support() {
+ return GetFlag(kWasmRuntimeExceptionSupport);
+ }
+
bool trace_turbo_json_enabled() const { return GetFlag(kTraceTurboJson); }
bool trace_turbo_graph_enabled() const { return GetFlag(kTraceTurboGraph); }
diff --git a/deps/v8/src/parsing/expression-classifier.h b/deps/v8/src/parsing/expression-classifier.h
index 522b650be7..7833dbc8d3 100644
--- a/deps/v8/src/parsing/expression-classifier.h
+++ b/deps/v8/src/parsing/expression-classifier.h
@@ -7,6 +7,7 @@
#include "src/messages.h"
#include "src/parsing/scanner.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -61,18 +62,15 @@ class ExpressionClassifier {
: location(Scanner::Location::invalid()),
message(MessageTemplate::kNone),
kind(kUnusedError),
- type(kSyntaxError),
arg(nullptr) {}
V8_INLINE explicit Error(Scanner::Location loc,
MessageTemplate::Template msg, ErrorKind k,
- const char* a = nullptr,
- ParseErrorType t = kSyntaxError)
- : location(loc), message(msg), kind(k), type(t), arg(a) {}
+ const char* a = nullptr)
+ : location(loc), message(msg), kind(k), arg(a) {}
Scanner::Location location;
- MessageTemplate::Template message : 26;
+ MessageTemplate::Template message : 28;
unsigned kind : 4;
- ParseErrorType type : 2;
const char* arg;
};
@@ -88,10 +86,6 @@ class ExpressionClassifier {
};
// clang-format on
- enum FunctionProperties : unsigned {
- NonSimpleParameter = 1 << 0
- };
-
explicit ExpressionClassifier(typename Types::Base* base,
DuplicateFinder* duplicate_finder = nullptr)
: base_(base),
@@ -100,9 +94,9 @@ class ExpressionClassifier {
reported_errors_(base->impl()->GetReportedErrorList()),
duplicate_finder_(duplicate_finder),
invalid_productions_(0),
- function_properties_(0) {
+ is_non_simple_parameter_list_(0) {
base->classifier_ = this;
- reported_errors_begin_ = reported_errors_end_ = reported_errors_->length();
+ reported_errors_begin_ = reported_errors_end_ = reported_errors_->size();
}
V8_INLINE ~ExpressionClassifier() {
@@ -193,11 +187,11 @@ class ExpressionClassifier {
}
V8_INLINE bool is_simple_parameter_list() const {
- return !(function_properties_ & NonSimpleParameter);
+ return !is_non_simple_parameter_list_;
}
V8_INLINE void RecordNonSimpleParameter() {
- function_properties_ |= NonSimpleParameter;
+ is_non_simple_parameter_list_ = 1;
}
void RecordExpressionError(const Scanner::Location& loc,
@@ -208,14 +202,6 @@ class ExpressionClassifier {
Add(Error(loc, message, kExpressionProduction, arg));
}
- void RecordExpressionError(const Scanner::Location& loc,
- MessageTemplate::Template message,
- ParseErrorType type, const char* arg = nullptr) {
- if (!is_valid_expression()) return;
- invalid_productions_ |= ExpressionProduction;
- Add(Error(loc, message, kExpressionProduction, arg, type));
- }
-
void RecordFormalParameterInitializerError(const Scanner::Location& loc,
MessageTemplate::Template message,
const char* arg = nullptr) {
@@ -292,7 +278,7 @@ class ExpressionClassifier {
void Accumulate(ExpressionClassifier* inner, unsigned productions) {
DCHECK_EQ(inner->reported_errors_, reported_errors_);
DCHECK_EQ(inner->reported_errors_begin_, reported_errors_end_);
- DCHECK_EQ(inner->reported_errors_end_, reported_errors_->length());
+ DCHECK_EQ(inner->reported_errors_end_, reported_errors_->size());
// Propagate errors from inner, but don't overwrite already recorded
// errors.
unsigned non_arrow_inner_invalid_productions =
@@ -305,9 +291,9 @@ class ExpressionClassifier {
bool copy_BP_to_AFP = false;
if (productions & ArrowFormalParametersProduction &&
is_valid_arrow_formal_parameters()) {
- // Also copy function properties if expecting an arrow function
- // parameter.
- function_properties_ |= inner->function_properties_;
+ // Also whether we've seen any non-simple parameters
+ // if expecting an arrow function parameter.
+ is_non_simple_parameter_list_ |= inner->is_non_simple_parameter_list_;
if (!inner->is_valid_binding_pattern()) {
copy_BP_to_AFP = true;
invalid_productions_ |= ArrowFormalParametersProduction;
@@ -352,14 +338,14 @@ class ExpressionClassifier {
}
}
}
- reported_errors_->Rewind(reported_errors_end_);
+ reported_errors_->resize(reported_errors_end_);
inner->reported_errors_begin_ = inner->reported_errors_end_ =
reported_errors_end_;
}
V8_INLINE void Discard() {
- if (reported_errors_end_ == reported_errors_->length()) {
- reported_errors_->Rewind(reported_errors_begin_);
+ if (reported_errors_end_ == reported_errors_->size()) {
+ reported_errors_->resize(reported_errors_begin_);
reported_errors_end_ = reported_errors_begin_;
}
DCHECK_EQ(reported_errors_begin_, reported_errors_end_);
@@ -389,8 +375,8 @@ class ExpressionClassifier {
// Adds e to the end of the list of reported errors for this classifier.
// It is expected that this classifier is the last one in the stack.
V8_INLINE void Add(const Error& e) {
- DCHECK_EQ(reported_errors_end_, reported_errors_->length());
- reported_errors_->Add(e, zone_);
+ DCHECK_EQ(reported_errors_end_, reported_errors_->size());
+ reported_errors_->push_back(e);
reported_errors_end_++;
}
@@ -400,7 +386,7 @@ class ExpressionClassifier {
// in an inner classifier) or it could be an existing error (in case a
// copy is needed).
V8_INLINE void Copy(int i) {
- DCHECK_LT(i, reported_errors_->length());
+ DCHECK_LT(i, reported_errors_->size());
if (reported_errors_end_ != i)
reported_errors_->at(reported_errors_end_) = reported_errors_->at(i);
reported_errors_end_++;
@@ -409,10 +395,10 @@ class ExpressionClassifier {
typename Types::Base* base_;
ExpressionClassifier* previous_;
Zone* zone_;
- ZoneList<Error>* reported_errors_;
+ ZoneVector<Error>* reported_errors_;
DuplicateFinder* duplicate_finder_;
- unsigned invalid_productions_ : 14;
- unsigned function_properties_ : 2;
+ unsigned invalid_productions_ : 15;
+ unsigned is_non_simple_parameter_list_ : 1;
// The uint16_t for reported_errors_begin_ and reported_errors_end_ will
// not be enough in the case of a long series of expressions using nested
// classifiers, e.g., a long sequence of assignments, as in:
diff --git a/deps/v8/src/parsing/func-name-inferrer.cc b/deps/v8/src/parsing/func-name-inferrer.cc
index 4920877610..7d476f1e64 100644
--- a/deps/v8/src/parsing/func-name-inferrer.cc
+++ b/deps/v8/src/parsing/func-name-inferrer.cc
@@ -14,72 +14,74 @@ namespace internal {
FuncNameInferrer::FuncNameInferrer(AstValueFactory* ast_value_factory,
Zone* zone)
: ast_value_factory_(ast_value_factory),
- entries_stack_(10, zone),
- names_stack_(5, zone),
- funcs_to_infer_(4, zone),
- zone_(zone) {
-}
-
+ entries_stack_(zone),
+ names_stack_(zone),
+ funcs_to_infer_(zone),
+ zone_(zone) {}
void FuncNameInferrer::PushEnclosingName(const AstRawString* name) {
// Enclosing name is a name of a constructor function. To check
// that it is really a constructor, we check that it is not empty
// and starts with a capital letter.
if (!name->IsEmpty() && unibrow::Uppercase::Is(name->FirstCharacter())) {
- names_stack_.Add(Name(name, kEnclosingConstructorName), zone());
+ names_stack_.push_back(Name(name, kEnclosingConstructorName));
}
}
void FuncNameInferrer::PushLiteralName(const AstRawString* name) {
if (IsOpen() && name != ast_value_factory_->prototype_string()) {
- names_stack_.Add(Name(name, kLiteralName), zone());
+ names_stack_.push_back(Name(name, kLiteralName));
}
}
void FuncNameInferrer::PushVariableName(const AstRawString* name) {
if (IsOpen() && name != ast_value_factory_->dot_result_string()) {
- names_stack_.Add(Name(name, kVariableName), zone());
+ names_stack_.push_back(Name(name, kVariableName));
}
}
void FuncNameInferrer::RemoveAsyncKeywordFromEnd() {
if (IsOpen()) {
- CHECK_GT(names_stack_.length(), 0);
- CHECK(names_stack_.last().name->IsOneByteEqualTo("async"));
- names_stack_.RemoveLast();
+ CHECK_GT(names_stack_.size(), 0);
+ CHECK(names_stack_.back().name->IsOneByteEqualTo("async"));
+ names_stack_.pop_back();
}
}
void FuncNameInferrer::Leave() {
DCHECK(IsOpen());
- names_stack_.Rewind(entries_stack_.RemoveLast());
- if (entries_stack_.is_empty()) funcs_to_infer_.Clear();
+ size_t last_entry = entries_stack_.back();
+ entries_stack_.pop_back();
+ names_stack_.Rewind(last_entry);
+ if (entries_stack_.is_empty()) funcs_to_infer_.Rewind();
}
const AstConsString* FuncNameInferrer::MakeNameFromStack() {
AstConsString* result = ast_value_factory_->NewConsString();
- for (int pos = 0; pos < names_stack_.length(); pos++) {
+ auto it = names_stack_.begin();
+ while (it != names_stack_.end()) {
+ // Advance the iterator to be able to peek the next value.
+ auto current = it++;
// Skip consecutive variable declarations.
- if (pos + 1 < names_stack_.length() &&
- names_stack_.at(pos).type == kVariableName &&
- names_stack_.at(pos + 1).type == kVariableName) {
+ if (it != names_stack_.end() && current->type == kVariableName &&
+ it->type == kVariableName) {
continue;
}
// Add name. Separate names with ".".
if (!result->IsEmpty()) {
result->AddString(zone(), ast_value_factory_->dot_string());
}
- result->AddString(zone(), names_stack_.at(pos).name);
+ result->AddString(zone(), current->name);
}
return result;
}
void FuncNameInferrer::InferFunctionsNames() {
const AstConsString* func_name = MakeNameFromStack();
- for (int i = 0; i < funcs_to_infer_.length(); ++i) {
- funcs_to_infer_[i]->set_raw_inferred_name(func_name);
+ for (FunctionLiteral* func : funcs_to_infer_) {
+ func->set_raw_inferred_name(func_name);
}
funcs_to_infer_.Rewind(0);
}
diff --git a/deps/v8/src/parsing/func-name-inferrer.h b/deps/v8/src/parsing/func-name-inferrer.h
index 21c4da3be9..8f0f428a05 100644
--- a/deps/v8/src/parsing/func-name-inferrer.h
+++ b/deps/v8/src/parsing/func-name-inferrer.h
@@ -5,6 +5,7 @@
#ifndef V8_PARSING_FUNC_NAME_INFERRER_H_
#define V8_PARSING_FUNC_NAME_INFERRER_H_
+#include "src/zone/zone-chunk-list.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -62,13 +63,13 @@ class FuncNameInferrer : public ZoneObject {
// Adds a function to infer name for.
void AddFunction(FunctionLiteral* func_to_infer) {
if (IsOpen()) {
- funcs_to_infer_.Add(func_to_infer, zone());
+ funcs_to_infer_.push_back(func_to_infer);
}
}
void RemoveLastFunction() {
if (IsOpen() && !funcs_to_infer_.is_empty()) {
- funcs_to_infer_.RemoveLast();
+ funcs_to_infer_.pop_back();
}
}
@@ -94,7 +95,7 @@ class FuncNameInferrer : public ZoneObject {
NameType type;
};
- void Enter() { entries_stack_.Add(names_stack_.length(), zone()); }
+ void Enter() { entries_stack_.push_back(names_stack_.size()); }
void Leave();
@@ -107,9 +108,9 @@ class FuncNameInferrer : public ZoneObject {
void InferFunctionsNames();
AstValueFactory* ast_value_factory_;
- ZoneList<int> entries_stack_;
- ZoneList<Name> names_stack_;
- ZoneList<FunctionLiteral*> funcs_to_infer_;
+ ZoneChunkList<size_t> entries_stack_;
+ ZoneChunkList<Name> names_stack_;
+ ZoneChunkList<FunctionLiteral*> funcs_to_infer_;
Zone* zone_;
DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer);
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index ee7e4b1569..0a58c4f0bd 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -4,10 +4,10 @@
#include "src/parsing/parse-info.h"
-#include "src/api.h"
#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
+#include "src/base/template-utils.h"
#include "src/heap/heap-inl.h"
#include "src/objects-inl.h"
#include "src/objects/scope-info.h"
@@ -17,14 +17,14 @@ namespace v8 {
namespace internal {
ParseInfo::ParseInfo(Isolate* isolate, AccountingAllocator* zone_allocator)
- : zone_(std::make_shared<Zone>(zone_allocator, ZONE_NAME)),
+ : zone_(base::make_unique<Zone>(zone_allocator, ZONE_NAME)),
flags_(0),
extension_(nullptr),
script_scope_(nullptr),
unicode_cache_(nullptr),
stack_limit_(0),
hash_seed_(0),
- function_flags_(0),
+ function_kind_(FunctionKind::kNormalFunction),
script_id_(-1),
start_position_(0),
end_position_(0),
@@ -65,11 +65,14 @@ ParseInfo::ParseInfo(Isolate* isolate, Handle<SharedFunctionInfo> shared)
set_wrapped_as_function(shared->is_wrapped());
set_allow_lazy_parsing(FLAG_lazy_inner_functions);
set_is_named_expression(shared->is_named_expression());
- set_function_flags(shared->flags());
set_start_position(shared->StartPosition());
set_end_position(shared->EndPosition());
function_literal_id_ = shared->FunctionLiteralId(isolate);
set_language_mode(shared->language_mode());
+ set_function_kind(shared->kind());
+ set_declaration(shared->is_declaration());
+ set_requires_instance_fields_initializer(
+ shared->requires_instance_fields_initializer());
set_asm_wasm_broken(shared->is_asm_wasm_broken());
Handle<Script> script(Script::cast(shared->script()), isolate);
@@ -100,19 +103,6 @@ ParseInfo::~ParseInfo() {}
DeclarationScope* ParseInfo::scope() const { return literal()->scope(); }
-bool ParseInfo::is_declaration() const {
- return SharedFunctionInfo::IsDeclarationBit::decode(function_flags_);
-}
-
-FunctionKind ParseInfo::function_kind() const {
- return SharedFunctionInfo::FunctionKindBits::decode(function_flags_);
-}
-
-bool ParseInfo::requires_instance_fields_initializer() const {
- return SharedFunctionInfo::RequiresInstanceFieldsInitializer::decode(
- function_flags_);
-}
-
void ParseInfo::EmitBackgroundParseStatisticsOnBackgroundThread() {
// If runtime call stats was enabled by tracing, emit a trace event at the
// end of background parsing on the background thread.
@@ -141,11 +131,6 @@ void ParseInfo::UpdateBackgroundParseStatisticsOnMainThread(Isolate* isolate) {
set_runtime_call_stats(main_call_stats);
}
-void ParseInfo::ShareZone(ParseInfo* other) {
- DCHECK_EQ(0, zone_->allocation_size());
- zone_ = other->zone_;
-}
-
Handle<Script> ParseInfo::CreateScript(Isolate* isolate, Handle<String> source,
ScriptOriginOptions origin_options,
NativesFlag natives) {
@@ -186,11 +171,6 @@ AstValueFactory* ParseInfo::GetOrCreateAstValueFactory() {
return ast_value_factory();
}
-void ParseInfo::ShareAstValueFactory(ParseInfo* other) {
- DCHECK(!ast_value_factory_.get());
- ast_value_factory_ = other->ast_value_factory_;
-}
-
void ParseInfo::AllocateSourceRangeMap() {
DCHECK(block_coverage_enabled());
set_source_range_map(new (zone()) SourceRangeMap(zone()));
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 4abf3a1fb0..64a50806f5 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -54,12 +54,6 @@ class V8_EXPORT_PRIVATE ParseInfo {
Zone* zone() const { return zone_.get(); }
- // Sets this parse info to share the same zone as |other|
- void ShareZone(ParseInfo* other);
-
- // Sets this parse info to share the same ast value factory as |other|
- void ShareAstValueFactory(ParseInfo* other);
-
// Convenience accessor methods for flags.
#define FLAG_ACCESSOR(flag, getter, setter) \
bool getter() const { return GetFlag(flag); } \
@@ -86,6 +80,10 @@ class V8_EXPORT_PRIVATE ParseInfo {
FLAG_ACCESSOR(kWrappedAsFunction, is_wrapped_as_function,
set_wrapped_as_function)
FLAG_ACCESSOR(kAllowEvalCache, allow_eval_cache, set_allow_eval_cache)
+ FLAG_ACCESSOR(kIsDeclaration, is_declaration, set_declaration)
+ FLAG_ACCESSOR(kRequiresInstanceFieldsInitializer,
+ requires_instance_fields_initializer,
+ set_requires_instance_fields_initializer);
#undef FLAG_ACCESSOR
void set_parse_restriction(ParseRestriction restriction) {
@@ -143,11 +141,6 @@ class V8_EXPORT_PRIVATE ParseInfo {
uint64_t hash_seed() const { return hash_seed_; }
void set_hash_seed(uint64_t hash_seed) { hash_seed_ = hash_seed; }
- int function_flags() const { return function_flags_; }
- void set_function_flags(int function_flags) {
- function_flags_ = function_flags;
- }
-
int start_position() const { return start_position_; }
void set_start_position(int start_position) {
start_position_ = start_position;
@@ -166,6 +159,11 @@ class V8_EXPORT_PRIVATE ParseInfo {
function_literal_id_ = function_literal_id;
}
+ FunctionKind function_kind() const { return function_kind_; }
+ void set_function_kind(FunctionKind function_kind) {
+ function_kind_ = function_kind;
+ }
+
int max_function_literal_id() const { return max_function_literal_id_; }
void set_max_function_literal_id(int max_function_literal_id) {
max_function_literal_id_ = max_function_literal_id;
@@ -196,11 +194,6 @@ class V8_EXPORT_PRIVATE ParseInfo {
return &pending_error_handler_;
}
- // Getters for individual function flags.
- bool is_declaration() const;
- FunctionKind function_kind() const;
- bool requires_instance_fields_initializer() const;
-
//--------------------------------------------------------------------------
// TODO(titzer): these should not be part of ParseInfo.
//--------------------------------------------------------------------------
@@ -249,19 +242,19 @@ class V8_EXPORT_PRIVATE ParseInfo {
kOnBackgroundThread = 1 << 13,
kWrappedAsFunction = 1 << 14, // Implicitly wrapped as function.
kAllowEvalCache = 1 << 15,
+ kIsDeclaration = 1 << 16,
+ kRequiresInstanceFieldsInitializer = 1 << 17,
};
//------------- Inputs to parsing and scope analysis -----------------------
- std::shared_ptr<Zone> zone_;
+ std::unique_ptr<Zone> zone_;
unsigned flags_;
v8::Extension* extension_;
DeclarationScope* script_scope_;
UnicodeCache* unicode_cache_;
uintptr_t stack_limit_;
uint64_t hash_seed_;
- // TODO(leszeks): Move any remaining flags used here either to the flags_
- // field or to other fields.
- int function_flags_;
+ FunctionKind function_kind_;
int script_id_;
int start_position_;
int end_position_;
@@ -276,7 +269,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
//----------- Inputs+Outputs of parsing and scope analysis -----------------
std::unique_ptr<Utf16CharacterStream> character_stream_;
ConsumedPreParsedScopeData consumed_preparsed_scope_data_;
- std::shared_ptr<AstValueFactory> ast_value_factory_;
+ std::unique_ptr<AstValueFactory> ast_value_factory_;
const class AstStringConstants* ast_string_constants_;
const AstRawString* function_name_;
RuntimeCallStats* runtime_call_stats_;
@@ -285,7 +278,6 @@ class V8_EXPORT_PRIVATE ParseInfo {
//----------- Output of parsing and scope analysis ------------------------
FunctionLiteral* literal_;
- std::shared_ptr<DeferredHandles> deferred_handles_;
PendingCompilationErrorHandler pending_error_handler_;
void SetFlag(Flag f) { flags_ |= f; }
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 6f6cff8e20..9d13724f06 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -21,6 +21,7 @@
#include "src/parsing/func-name-inferrer.h"
#include "src/parsing/scanner.h"
#include "src/parsing/token.h"
+#include "src/zone/zone-chunk-list.h"
namespace v8 {
namespace internal {
@@ -416,21 +417,23 @@ class ParserBase {
void AdoptDestructuringAssignmentsFromParentState(int pos) {
const auto& outer_assignments =
outer_function_state_->destructuring_assignments_to_rewrite_;
- DCHECK_GE(outer_assignments.length(), pos);
- for (int i = pos; i < outer_assignments.length(); ++i) {
- auto expr = outer_assignments[i];
+ DCHECK_GE(outer_assignments.size(), pos);
+ auto it = outer_assignments.begin();
+ it.Advance(pos);
+ for (; it != outer_assignments.end(); ++it) {
+ auto expr = *it;
expr->set_scope(scope_);
- destructuring_assignments_to_rewrite_.Add(expr, scope_->zone());
+ destructuring_assignments_to_rewrite_.push_back(expr);
}
outer_function_state_->RewindDestructuringAssignments(pos);
}
- const ZoneList<RewritableExpressionT>&
+ const ZoneChunkList<RewritableExpressionT>&
destructuring_assignments_to_rewrite() const {
return destructuring_assignments_to_rewrite_;
}
- ZoneList<typename ExpressionClassifier::Error>* GetReportedErrorList() {
+ ZoneVector<typename ExpressionClassifier::Error>* GetReportedErrorList() {
return &reported_errors_;
}
@@ -472,26 +475,27 @@ class ParserBase {
private:
void AddDestructuringAssignment(RewritableExpressionT expr) {
- destructuring_assignments_to_rewrite_.Add(expr, scope_->zone());
+ destructuring_assignments_to_rewrite_.push_back(expr);
}
// Properties count estimation.
int expected_property_count_;
+ // How many suspends are needed for this function.
+ int suspend_count_;
+
FunctionState** function_state_stack_;
FunctionState* outer_function_state_;
DeclarationScope* scope_;
- ZoneList<RewritableExpressionT> destructuring_assignments_to_rewrite_;
+ ZoneChunkList<RewritableExpressionT> destructuring_assignments_to_rewrite_;
- ZoneList<typename ExpressionClassifier::Error> reported_errors_;
+ // We use a ZoneVector here because we need to do a lot of random access.
+ ZoneVector<typename ExpressionClassifier::Error> reported_errors_;
// A reason, if any, why this function should not be optimized.
BailoutReason dont_optimize_reason_;
- // How many suspends are needed for this function.
- int suspend_count_;
-
// Record whether the next (=== immediately following) function literal is
// preceded by a parenthesis / exclamation mark. Also record the previous
// state.
@@ -594,7 +598,6 @@ class ParserBase {
typename Types::ClassPropertyList instance_fields;
FunctionLiteralT constructor;
- // TODO(gsathya): Use a bitfield store all the booleans.
bool has_seen_constructor;
bool has_name_static_property;
bool has_static_computed_names;
@@ -745,8 +748,7 @@ class ParserBase {
Next();
return;
}
- if (scanner()->HasAnyLineTerminatorBeforeNext() ||
- tok == Token::RBRACE ||
+ if (scanner()->HasLineTerminatorBeforeNext() || tok == Token::RBRACE ||
tok == Token::EOS) {
return;
}
@@ -959,8 +961,7 @@ class ParserBase {
void ReportClassifierError(
const typename ExpressionClassifier::Error& error) {
- impl()->ReportMessageAt(error.location, error.message, error.arg,
- error.type);
+ impl()->ReportMessageAt(error.location, error.message, error.arg);
}
void ValidateExpression(bool* ok) {
@@ -1089,7 +1090,7 @@ class ParserBase {
function_state_->kind(), is_strict_reserved, is_await, ok);
}
- IdentifierT ParseIdentifierName(bool* ok);
+ V8_INLINE IdentifierT ParseIdentifierName(bool* ok);
ExpressionT ParseIdentifierNameOrPrivateName(bool* ok);
@@ -1135,8 +1136,7 @@ class ParserBase {
ClassLiteralPropertyT ParseClassPropertyDefinition(
ClassLiteralChecker* checker, ClassInfo* class_info,
IdentifierT* property_name, bool has_extends, bool* is_computed_name,
- bool* has_seen_constructor, ClassLiteralProperty::Kind* property_kind,
- bool* is_static, bool* has_name_static_property, bool* ok);
+ ClassLiteralProperty::Kind* property_kind, bool* is_static, bool* ok);
ExpressionT ParseClassFieldInitializer(ClassInfo* class_info, bool is_static,
bool* ok);
ObjectLiteralPropertyT ParseObjectPropertyDefinition(
@@ -1152,15 +1152,15 @@ class ParserBase {
ExpressionT ParseAssignmentExpression(bool accept_IN, bool* ok);
ExpressionT ParseYieldExpression(bool accept_IN, bool* ok);
- ExpressionT ParseConditionalExpression(bool accept_IN, bool* ok);
+ V8_INLINE ExpressionT ParseConditionalExpression(bool accept_IN, bool* ok);
ExpressionT ParseBinaryExpression(int prec, bool accept_IN, bool* ok);
ExpressionT ParseUnaryExpression(bool* ok);
- ExpressionT ParsePostfixExpression(bool* ok);
- ExpressionT ParseLeftHandSideExpression(bool* ok);
+ V8_INLINE ExpressionT ParsePostfixExpression(bool* ok);
+ V8_INLINE ExpressionT ParseLeftHandSideExpression(bool* ok);
ExpressionT ParseMemberWithNewPrefixesExpression(bool* is_async, bool* ok);
- ExpressionT ParseMemberExpression(bool* is_async, bool* ok);
- ExpressionT ParseMemberExpressionContinuation(ExpressionT expression,
- bool* is_async, bool* ok);
+ V8_INLINE ExpressionT ParseMemberExpression(bool* is_async, bool* ok);
+ V8_INLINE ExpressionT ParseMemberExpressionContinuation(
+ ExpressionT expression, bool* is_async, bool* ok);
// `rewritable_length`: length of the destructuring_assignments_to_rewrite()
// queue in the parent function state, prior to parsing of formal parameters.
@@ -1183,7 +1183,7 @@ class ParserBase {
ExpressionT ParseImportExpressions(bool* ok);
ExpressionT ParseNewTargetExpression(bool* ok);
- void ParseFormalParameter(FormalParametersT* parameters, bool* ok);
+ V8_INLINE void ParseFormalParameter(FormalParametersT* parameters, bool* ok);
void ParseFormalParameterList(FormalParametersT* parameters, bool* ok);
void CheckArityRestrictions(int param_count, FunctionKind function_type,
bool has_rest, int formals_start_pos,
@@ -1228,14 +1228,19 @@ class ParserBase {
USE(result);
DCHECK_EQ(result, kLazyParsingComplete);
}
- LazyParsingResult ParseStatementList(StatementListT body,
- Token::Value end_token, bool may_abort,
- bool* ok);
+ V8_INLINE LazyParsingResult ParseStatementList(StatementListT body,
+ Token::Value end_token,
+ bool may_abort, bool* ok);
StatementT ParseStatementListItem(bool* ok);
- StatementT ParseStatement(ZonePtrList<const AstRawString>* labels, bool* ok) {
- return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
+
+ StatementT ParseStatement(ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels,
+ bool* ok) {
+ return ParseStatement(labels, own_labels,
+ kDisallowLabelledFunctionStatement, ok);
}
StatementT ParseStatement(ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels,
AllowLabelledFunctionStatement allow_function,
bool* ok);
BlockT ParseBlock(ZonePtrList<const AstRawString>* labels, bool* ok);
@@ -1259,6 +1264,7 @@ class ParserBase {
StatementT ParseExpressionOrLabelledStatement(
ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels,
AllowLabelledFunctionStatement allow_function, bool* ok);
StatementT ParseIfStatement(ZonePtrList<const AstRawString>* labels,
bool* ok);
@@ -1269,34 +1275,41 @@ class ParserBase {
StatementT ParseWithStatement(ZonePtrList<const AstRawString>* labels,
bool* ok);
StatementT ParseDoWhileStatement(ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels,
bool* ok);
StatementT ParseWhileStatement(ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels,
bool* ok);
StatementT ParseThrowStatement(bool* ok);
StatementT ParseSwitchStatement(ZonePtrList<const AstRawString>* labels,
bool* ok);
- StatementT ParseTryStatement(bool* ok);
+ V8_INLINE StatementT ParseTryStatement(bool* ok);
StatementT ParseForStatement(ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels,
bool* ok);
StatementT ParseForEachStatementWithDeclarations(
int stmt_pos, ForInfo* for_info, ZonePtrList<const AstRawString>* labels,
- Scope* inner_block_scope, bool* ok);
+ ZonePtrList<const AstRawString>* own_labels, Scope* inner_block_scope,
+ bool* ok);
StatementT ParseForEachStatementWithoutDeclarations(
int stmt_pos, ExpressionT expression, int lhs_beg_pos, int lhs_end_pos,
- ForInfo* for_info, ZonePtrList<const AstRawString>* labels, bool* ok);
+ ForInfo* for_info, ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, bool* ok);
// Parse a C-style for loop: 'for (<init>; <cond>; <next>) { ... }'
// "for (<init>;" is assumed to have been parser already.
- ForStatementT ParseStandardForLoop(int stmt_pos,
- ZonePtrList<const AstRawString>* labels,
- ExpressionT* cond, StatementT* next,
- StatementT* body, bool* ok);
+ ForStatementT ParseStandardForLoop(
+ int stmt_pos, ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, ExpressionT* cond,
+ StatementT* next, StatementT* body, bool* ok);
// Same as the above, but handles those cases where <init> is a
// lexical variable declaration.
StatementT ParseStandardForLoopWithLexicalDeclarations(
int stmt_pos, StatementT init, ForInfo* for_info,
- ZonePtrList<const AstRawString>* labels, bool* ok);
+ ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, bool* ok);
StatementT ParseForAwaitStatement(ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels,
bool* ok);
bool IsNextLetKeyword();
@@ -1574,17 +1587,18 @@ ParserBase<Impl>::FunctionState::FunctionState(
DeclarationScope* scope)
: BlockState(scope_stack, scope),
expected_property_count_(0),
+ suspend_count_(0),
function_state_stack_(function_state_stack),
outer_function_state_(*function_state_stack),
scope_(scope),
- destructuring_assignments_to_rewrite_(16, scope->zone()),
- reported_errors_(16, scope->zone()),
+ destructuring_assignments_to_rewrite_(scope->zone()),
+ reported_errors_(scope_->zone()),
dont_optimize_reason_(BailoutReason::kNoReason),
- suspend_count_(0),
next_function_is_likely_called_(false),
previous_function_was_likely_called_(false),
contains_function_or_eval_(false) {
*function_state_stack = this;
+ reported_errors_.reserve(16);
if (outer_function_state_) {
outer_function_state_->previous_function_was_likely_called_ =
outer_function_state_->next_function_is_likely_called_;
@@ -1875,7 +1889,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
return impl()->ExpressionFromLiteral(Next(), beg_pos);
case Token::ASYNC:
- if (!scanner()->HasAnyLineTerminatorAfterNext() &&
+ if (!scanner()->HasLineTerminatorAfterNext() &&
PeekAhead() == Token::FUNCTION) {
BindingPatternUnexpectedToken();
Consume(Token::ASYNC);
@@ -2179,10 +2193,10 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
int pos = peek_position();
if (!*is_generator && token == Token::ASYNC &&
- !scanner()->HasAnyLineTerminatorAfterNext()) {
+ !scanner()->HasLineTerminatorAfterNext()) {
Consume(Token::ASYNC);
token = peek();
- if (token == Token::MUL && !scanner()->HasAnyLineTerminatorBeforeNext()) {
+ if (token == Token::MUL && !scanner()->HasLineTerminatorBeforeNext()) {
Consume(Token::MUL);
token = peek();
*is_generator = true;
@@ -2300,11 +2314,9 @@ template <typename Impl>
typename ParserBase<Impl>::ClassLiteralPropertyT
ParserBase<Impl>::ParseClassPropertyDefinition(
ClassLiteralChecker* checker, ClassInfo* class_info, IdentifierT* name,
- bool has_extends, bool* is_computed_name, bool* has_seen_constructor,
- ClassLiteralProperty::Kind* property_kind, bool* is_static,
- bool* has_name_static_property, bool* ok) {
- DCHECK_NOT_NULL(has_seen_constructor);
- DCHECK_NOT_NULL(has_name_static_property);
+ bool has_extends, bool* is_computed_name,
+ ClassLiteralProperty::Kind* property_kind, bool* is_static, bool* ok) {
+ DCHECK_NOT_NULL(class_info);
bool is_get = false;
bool is_set = false;
bool is_generator = false;
@@ -2353,8 +2365,9 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
CHECK_OK_CUSTOM(NullLiteralProperty));
}
- if (!*has_name_static_property && *is_static && impl()->IsName(*name)) {
- *has_name_static_property = true;
+ if (!class_info->has_name_static_property && *is_static &&
+ impl()->IsName(*name)) {
+ class_info->has_name_static_property = true;
}
switch (kind) {
@@ -2416,7 +2429,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
FunctionKind kind = MethodKindFor(is_generator, is_async);
if (!*is_static && impl()->IsConstructor(*name)) {
- *has_seen_constructor = true;
+ class_info->has_seen_constructor = true;
kind = has_extends ? FunctionKind::kDerivedConstructor
: FunctionKind::kBaseConstructor;
}
@@ -2875,11 +2888,11 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
this, classifier()->duplicate_finder());
Scope::Snapshot scope_snapshot(scope());
- int rewritable_length =
- function_state_->destructuring_assignments_to_rewrite().length();
+ int rewritable_length = static_cast<int>(
+ function_state_->destructuring_assignments_to_rewrite().size());
bool is_async = peek() == Token::ASYNC &&
- !scanner()->HasAnyLineTerminatorAfterNext() &&
+ !scanner()->HasLineTerminatorAfterNext() &&
IsValidArrowFormalParametersStart(PeekAhead());
bool parenthesized_formals = peek() == Token::LPAREN;
@@ -3056,7 +3069,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
// The following initialization is necessary.
ExpressionT expression = impl()->NullExpression();
bool delegating = false; // yield*
- if (!scanner()->HasAnyLineTerminatorBeforeNext()) {
+ if (!scanner()->HasLineTerminatorBeforeNext()) {
if (Check(Token::MUL)) delegating = true;
switch (peek()) {
case Token::EOS:
@@ -3307,8 +3320,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePostfixExpression(
int lhs_beg_pos = peek_position();
ExpressionT expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
- Token::IsCountOp(peek())) {
+ if (!scanner()->HasLineTerminatorBeforeNext() && Token::IsCountOp(peek())) {
BindingPatternUnexpectedToken();
ArrowFormalParametersUnexpectedToken();
@@ -3375,6 +3387,7 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
// function literal eagerly, we can also compile it eagerly.
if (result->IsFunctionLiteral()) {
result->AsFunctionLiteral()->SetShouldEagerCompile();
+ result->AsFunctionLiteral()->mark_as_iife();
}
}
Scanner::Location spread_pos;
@@ -4168,7 +4181,7 @@ ParserBase<Impl>::ParseAsyncFunctionDeclaration(
// ( FormalParameters[Await] ) { AsyncFunctionBody }
DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
int pos = position();
- if (scanner()->HasAnyLineTerminatorBeforeNext()) {
+ if (scanner()->HasLineTerminatorBeforeNext()) {
*ok = false;
impl()->ReportUnexpectedToken(scanner()->current_token());
return impl()->NullStatement();
@@ -4360,7 +4373,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
- if (peek() == Token::ARROW && scanner_->HasAnyLineTerminatorBeforeNext()) {
+ if (peek() == Token::ARROW && scanner_->HasLineTerminatorBeforeNext()) {
// ASI inserts `;` after arrow parameters if a line terminator is found.
// `=> ...` is never a valid expression, so report as syntax error.
// If next token is not `=>`, it's a syntax error anyways.
@@ -4547,8 +4560,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
bool is_constructor = !class_info.has_seen_constructor;
ClassLiteralPropertyT property = ParseClassPropertyDefinition(
&checker, &class_info, &property_name, has_extends, &is_computed_name,
- &class_info.has_seen_constructor, &property_kind, &is_static,
- &class_info.has_name_static_property, CHECK_OK);
+ &property_kind, &is_static, CHECK_OK);
if (!class_info.has_static_computed_names && is_static &&
is_computed_name) {
class_info.has_static_computed_names = true;
@@ -4971,7 +4983,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatementListItem(
break;
case Token::ASYNC:
if (PeekAhead() == Token::FUNCTION &&
- !scanner()->HasAnyLineTerminatorAfterNext()) {
+ !scanner()->HasLineTerminatorAfterNext()) {
Consume(Token::ASYNC);
return ParseAsyncFunctionDeclaration(nullptr, false, ok);
}
@@ -4979,12 +4991,13 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatementListItem(
default:
break;
}
- return ParseStatement(nullptr, kAllowLabelledFunctionStatement, ok);
+ return ParseStatement(nullptr, nullptr, kAllowLabelledFunctionStatement, ok);
}
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels,
AllowLabelledFunctionStatement allow_function, bool* ok) {
// Statement ::
// Block
@@ -5003,6 +5016,9 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
// TryStatement
// DebuggerStatement
+ // {own_labels} is always a subset of {labels}.
+ DCHECK_IMPLIES(labels == nullptr, own_labels == nullptr);
+
// Note: Since labels can only be used by 'break' and 'continue'
// statements, which themselves are only valid within blocks,
// iterations or 'switch' statements (i.e., BreakableStatements),
@@ -5018,14 +5034,14 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
case Token::IF:
return ParseIfStatement(labels, ok);
case Token::DO:
- return ParseDoWhileStatement(labels, ok);
+ return ParseDoWhileStatement(labels, own_labels, ok);
case Token::WHILE:
- return ParseWhileStatement(labels, ok);
+ return ParseWhileStatement(labels, own_labels, ok);
case Token::FOR:
if (V8_UNLIKELY(is_async_function() && PeekAhead() == Token::AWAIT)) {
- return ParseForAwaitStatement(labels, ok);
+ return ParseForAwaitStatement(labels, own_labels, ok);
}
- return ParseForStatement(labels, ok);
+ return ParseForStatement(labels, own_labels, ok);
case Token::CONTINUE:
return ParseContinueStatement(ok);
case Token::BREAK:
@@ -5068,7 +5084,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
case Token::VAR:
return ParseVariableStatement(kStatement, nullptr, ok);
case Token::ASYNC:
- if (!scanner()->HasAnyLineTerminatorAfterNext() &&
+ if (!scanner()->HasLineTerminatorAfterNext() &&
PeekAhead() == Token::FUNCTION) {
impl()->ReportMessageAt(
scanner()->peek_location(),
@@ -5078,7 +5094,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatement(
}
V8_FALLTHROUGH;
default:
- return ParseExpressionOrLabelledStatement(labels, allow_function, ok);
+ return ParseExpressionOrLabelledStatement(labels, own_labels,
+ allow_function, ok);
}
}
@@ -5118,7 +5135,7 @@ template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseScopedStatement(
ZonePtrList<const AstRawString>* labels, bool* ok) {
if (is_strict(language_mode()) || peek() != Token::FUNCTION) {
- return ParseStatement(labels, ok);
+ return ParseStatement(labels, nullptr, ok);
} else {
// Make a block around the statement for a lexical binding
// is introduced by a FunctionDeclaration.
@@ -5178,6 +5195,7 @@ template <typename Impl>
typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseExpressionOrLabelledStatement(
ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels,
AllowLabelledFunctionStatement allow_function, bool* ok) {
// ExpressionStatement | LabelledStatement ::
// Expression ';'
@@ -5203,7 +5221,7 @@ ParserBase<Impl>::ParseExpressionOrLabelledStatement(
// However, ASI may insert a line break before an identifier or a brace.
if (next_next != Token::LBRACK &&
((next_next != Token::LBRACE && next_next != Token::IDENTIFIER) ||
- scanner_->HasAnyLineTerminatorAfterNext())) {
+ scanner_->HasLineTerminatorAfterNext())) {
break;
}
impl()->ReportMessageAt(scanner()->peek_location(),
@@ -5221,22 +5239,22 @@ ParserBase<Impl>::ParseExpressionOrLabelledStatement(
impl()->IsIdentifier(expr)) {
// The whole expression was a single identifier, and not, e.g.,
// something starting with an identifier or a parenthesized identifier.
- labels = impl()->DeclareLabel(labels, impl()->AsIdentifierExpression(expr),
- CHECK_OK);
+ impl()->DeclareLabel(&labels, &own_labels,
+ impl()->AsIdentifierExpression(expr), CHECK_OK);
Consume(Token::COLON);
// ES#sec-labelled-function-declarations Labelled Function Declarations
if (peek() == Token::FUNCTION && is_sloppy(language_mode()) &&
allow_function == kAllowLabelledFunctionStatement) {
return ParseFunctionDeclaration(ok);
}
- return ParseStatement(labels, allow_function, ok);
+ return ParseStatement(labels, own_labels, allow_function, ok);
}
// If we have an extension, we allow a native function declaration.
// A native function declaration starts with "native function" with
// no line-terminator between the two words.
if (extension_ != nullptr && peek() == Token::FUNCTION &&
- !scanner()->HasAnyLineTerminatorBeforeNext() && impl()->IsNative(expr) &&
+ !scanner()->HasLineTerminatorBeforeNext() && impl()->IsNative(expr) &&
!scanner()->literal_contains_escapes()) {
return ParseNativeDeclaration(ok);
}
@@ -5289,7 +5307,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseContinueStatement(
Expect(Token::CONTINUE, CHECK_OK);
IdentifierT label = impl()->NullIdentifier();
Token::Value tok = peek();
- if (!scanner()->HasAnyLineTerminatorBeforeNext() && tok != Token::SEMICOLON &&
+ if (!scanner()->HasLineTerminatorBeforeNext() && tok != Token::SEMICOLON &&
tok != Token::RBRACE && tok != Token::EOS) {
// ECMA allows "eval" or "arguments" as labels even in strict mode.
label = ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
@@ -5326,7 +5344,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseBreakStatement(
Expect(Token::BREAK, CHECK_OK);
IdentifierT label = impl()->NullIdentifier();
Token::Value tok = peek();
- if (!scanner()->HasAnyLineTerminatorBeforeNext() && tok != Token::SEMICOLON &&
+ if (!scanner()->HasLineTerminatorBeforeNext() && tok != Token::SEMICOLON &&
tok != Token::RBRACE && tok != Token::EOS) {
// ECMA allows "eval" or "arguments" as labels even in strict mode.
label = ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
@@ -5380,7 +5398,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseReturnStatement(
Token::Value tok = peek();
ExpressionT return_value = impl()->NullExpression();
- if (scanner()->HasAnyLineTerminatorBeforeNext() || tok == Token::SEMICOLON ||
+ if (scanner()->HasLineTerminatorBeforeNext() || tok == Token::SEMICOLON ||
tok == Token::RBRACE || tok == Token::EOS) {
if (IsDerivedConstructor(function_state_->kind())) {
return_value = impl()->ThisExpression(loc.beg_pos);
@@ -5421,7 +5439,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWithStatement(
{
BlockState block_state(&scope_, with_scope);
with_scope->set_start_position(scanner()->peek_location().beg_pos);
- body = ParseStatement(labels, CHECK_OK);
+ body = ParseStatement(labels, nullptr, CHECK_OK);
with_scope->set_end_position(scanner()->location().end_pos);
}
return factory()->NewWithStatement(with_scope, expr, body, pos);
@@ -5429,11 +5447,13 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWithStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDoWhileStatement(
- ZonePtrList<const AstRawString>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, bool* ok) {
// DoStatement ::
// 'do' Statement 'while' '(' Expression ')' ';'
- auto loop = factory()->NewDoWhileStatement(labels, peek_position());
+ auto loop =
+ factory()->NewDoWhileStatement(labels, own_labels, peek_position());
typename Types::Target target(this, loop);
SourceRange body_range;
@@ -5442,7 +5462,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDoWhileStatement(
Expect(Token::DO, CHECK_OK);
{
SourceRangeScope range_scope(scanner(), &body_range);
- body = ParseStatement(nullptr, CHECK_OK);
+ body = ParseStatement(nullptr, nullptr, CHECK_OK);
}
Expect(Token::WHILE, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
@@ -5464,11 +5484,12 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseDoWhileStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWhileStatement(
- ZonePtrList<const AstRawString>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, bool* ok) {
// WhileStatement ::
// 'while' '(' Expression ')' Statement
- auto loop = factory()->NewWhileStatement(labels, peek_position());
+ auto loop = factory()->NewWhileStatement(labels, own_labels, peek_position());
typename Types::Target target(this, loop);
SourceRange body_range;
@@ -5480,7 +5501,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseWhileStatement(
Expect(Token::RPAREN, CHECK_OK);
{
SourceRangeScope range_scope(scanner(), &body_range);
- body = ParseStatement(nullptr, CHECK_OK);
+ body = ParseStatement(nullptr, nullptr, CHECK_OK);
}
loop->Initialize(cond, body);
@@ -5497,7 +5518,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseThrowStatement(
Expect(Token::THROW, CHECK_OK);
int pos = position();
- if (scanner()->HasAnyLineTerminatorBeforeNext()) {
+ if (scanner()->HasLineTerminatorBeforeNext()) {
ReportMessage(MessageTemplate::kNewlineAfterThrow);
*ok = false;
return impl()->NullStatement();
@@ -5679,7 +5700,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
- ZonePtrList<const AstRawString>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, bool* ok) {
// Either a standard for loop
// for (<init>; <cond>; <next>) { ... }
// or a for-each loop
@@ -5719,8 +5741,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
if (CheckInOrOf(&for_info.mode)) {
scope()->set_is_hidden();
- return ParseForEachStatementWithDeclarations(stmt_pos, &for_info, labels,
- inner_block_scope, ok);
+ return ParseForEachStatementWithDeclarations(
+ stmt_pos, &for_info, labels, own_labels, inner_block_scope, ok);
}
Expect(Token::SEMICOLON, CHECK_OK);
@@ -5732,8 +5754,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
// No variable declarations will have been created in inner_block_scope.
DCHECK_NULL(finalized);
USE(finalized);
- return ParseStandardForLoopWithLexicalDeclarations(stmt_pos, init,
- &for_info, labels, ok);
+ return ParseStandardForLoopWithLexicalDeclarations(
+ stmt_pos, init, &for_info, labels, own_labels, ok);
}
StatementT init = impl()->NullStatement();
@@ -5745,7 +5767,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
if (CheckInOrOf(&for_info.mode)) {
return ParseForEachStatementWithDeclarations(stmt_pos, &for_info, labels,
- nullptr, ok);
+ own_labels, nullptr, ok);
}
init = impl()->BuildInitializationBlock(&for_info.parsing_result, nullptr,
@@ -5768,9 +5790,9 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
}
if (is_for_each) {
- return ParseForEachStatementWithoutDeclarations(stmt_pos, expression,
- lhs_beg_pos, lhs_end_pos,
- &for_info, labels, ok);
+ return ParseForEachStatementWithoutDeclarations(
+ stmt_pos, expression, lhs_beg_pos, lhs_end_pos, &for_info, labels,
+ own_labels, ok);
}
// Initializer is just an expression.
init = factory()->NewExpressionStatement(expression, lhs_beg_pos);
@@ -5782,8 +5804,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
ExpressionT cond = impl()->NullExpression();
StatementT next = impl()->NullStatement();
StatementT body = impl()->NullStatement();
- ForStatementT loop =
- ParseStandardForLoop(stmt_pos, labels, &cond, &next, &body, CHECK_OK);
+ ForStatementT loop = ParseStandardForLoop(stmt_pos, labels, own_labels, &cond,
+ &next, &body, CHECK_OK);
loop->Initialize(init, cond, next, body);
return loop;
}
@@ -5792,7 +5814,8 @@ template <typename Impl>
typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseForEachStatementWithDeclarations(
int stmt_pos, ForInfo* for_info, ZonePtrList<const AstRawString>* labels,
- Scope* inner_block_scope, bool* ok) {
+ ZonePtrList<const AstRawString>* own_labels, Scope* inner_block_scope,
+ bool* ok) {
// Just one declaration followed by in/of.
if (for_info->parsing_result.declarations.size() != 1) {
impl()->ReportMessageAt(for_info->parsing_result.bindings_loc,
@@ -5820,7 +5843,8 @@ ParserBase<Impl>::ParseForEachStatementWithDeclarations(
BlockT init_block = impl()->RewriteForVarInLegacy(*for_info);
- auto loop = factory()->NewForEachStatement(for_info->mode, labels, stmt_pos);
+ auto loop = factory()->NewForEachStatement(for_info->mode, labels, own_labels,
+ stmt_pos);
typename Types::Target target(this, loop);
ExpressionT enumerable = impl()->NullExpression();
@@ -5850,7 +5874,7 @@ ParserBase<Impl>::ParseForEachStatementWithDeclarations(
SourceRange body_range;
SourceRangeScope range_scope(scanner(), &body_range);
- StatementT body = ParseStatement(nullptr, CHECK_OK);
+ StatementT body = ParseStatement(nullptr, nullptr, CHECK_OK);
impl()->RecordIterationStatementSourceRange(loop, range_scope.Finalize());
impl()->DesugarBindingInForEachStatement(for_info, &body_block,
@@ -5888,7 +5912,8 @@ template <typename Impl>
typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseForEachStatementWithoutDeclarations(
int stmt_pos, ExpressionT expression, int lhs_beg_pos, int lhs_end_pos,
- ForInfo* for_info, ZonePtrList<const AstRawString>* labels, bool* ok) {
+ ForInfo* for_info, ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, bool* ok) {
// Initializer is reference followed by in/of.
if (!expression->IsArrayLiteral() && !expression->IsObjectLiteral()) {
expression = CheckAndRewriteReferenceExpression(
@@ -5896,7 +5921,8 @@ ParserBase<Impl>::ParseForEachStatementWithoutDeclarations(
kSyntaxError, CHECK_OK);
}
- auto loop = factory()->NewForEachStatement(for_info->mode, labels, stmt_pos);
+ auto loop = factory()->NewForEachStatement(for_info->mode, labels, own_labels,
+ stmt_pos);
typename Types::Target target(this, loop);
ExpressionT enumerable = impl()->NullExpression();
@@ -5915,7 +5941,7 @@ ParserBase<Impl>::ParseForEachStatementWithoutDeclarations(
SourceRange body_range;
SourceRangeScope range_scope(scanner(), &body_range);
- body = ParseStatement(nullptr, CHECK_OK);
+ body = ParseStatement(nullptr, nullptr, CHECK_OK);
impl()->RecordIterationStatementSourceRange(loop, range_scope.Finalize());
}
return impl()->InitializeForEachStatement(loop, expression, enumerable, body);
@@ -5925,7 +5951,8 @@ template <typename Impl>
typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseStandardForLoopWithLexicalDeclarations(
int stmt_pos, StatementT init, ForInfo* for_info,
- ZonePtrList<const AstRawString>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, bool* ok) {
// The condition and the next statement of the for loop must be parsed
// in a new scope.
Scope* inner_scope = NewScope(BLOCK_SCOPE);
@@ -5936,8 +5963,8 @@ ParserBase<Impl>::ParseStandardForLoopWithLexicalDeclarations(
{
BlockState block_state(&scope_, inner_scope);
scope()->set_start_position(scanner()->location().beg_pos);
- loop =
- ParseStandardForLoop(stmt_pos, labels, &cond, &next, &body, CHECK_OK);
+ loop = ParseStandardForLoop(stmt_pos, labels, own_labels, &cond, &next,
+ &body, CHECK_OK);
scope()->set_end_position(scanner()->location().end_pos);
}
@@ -5980,9 +6007,10 @@ ParserBase<Impl>::ParseStandardForLoopWithLexicalDeclarations(
template <typename Impl>
typename ParserBase<Impl>::ForStatementT ParserBase<Impl>::ParseStandardForLoop(
- int stmt_pos, ZonePtrList<const AstRawString>* labels, ExpressionT* cond,
+ int stmt_pos, ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, ExpressionT* cond,
StatementT* next, StatementT* body, bool* ok) {
- ForStatementT loop = factory()->NewForStatement(labels, stmt_pos);
+ ForStatementT loop = factory()->NewForStatement(labels, own_labels, stmt_pos);
typename Types::Target target(this, loop);
if (peek() != Token::SEMICOLON) {
@@ -5999,7 +6027,7 @@ typename ParserBase<Impl>::ForStatementT ParserBase<Impl>::ParseStandardForLoop(
SourceRange body_range;
{
SourceRangeScope range_scope(scanner(), &body_range);
- *body = ParseStatement(nullptr, CHECK_OK);
+ *body = ParseStatement(nullptr, nullptr, CHECK_OK);
}
impl()->RecordIterationStatementSourceRange(loop, body_range);
@@ -6019,7 +6047,8 @@ void ParserBase<Impl>::MarkLoopVariableAsAssigned(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
- ZonePtrList<const AstRawString>* labels, bool* ok) {
+ ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, bool* ok) {
// for await '(' ForDeclaration of AssignmentExpression ')'
DCHECK(is_async_function());
@@ -6036,7 +6065,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
scope()->set_start_position(scanner()->location().beg_pos);
scope()->set_is_hidden();
- auto loop = factory()->NewForOfStatement(labels, stmt_pos);
+ auto loop = factory()->NewForOfStatement(labels, own_labels, stmt_pos);
typename Types::Target target(this, loop);
ExpressionT each_variable = impl()->NullExpression();
@@ -6119,7 +6148,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
SourceRange body_range;
SourceRangeScope range_scope(scanner(), &body_range);
- body = ParseStatement(nullptr, CHECK_OK);
+ body = ParseStatement(nullptr, nullptr, CHECK_OK);
scope()->set_end_position(scanner()->location().end_pos);
impl()->RecordIterationStatementSourceRange(loop, range_scope.Finalize());
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index dacce7d38f..41ff551091 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -412,7 +412,8 @@ Parser::Parser(ParseInfo* info)
info->runtime_call_stats(), info->logger(),
info->script().is_null() ? -1 : info->script()->id(),
info->is_module(), true),
- scanner_(info->unicode_cache()),
+ scanner_(info->unicode_cache(), info->character_stream(),
+ info->is_module()),
reusable_preparser_(nullptr),
mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
source_range_map_(info->source_range_map()),
@@ -507,7 +508,7 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
// Initialize parser state.
DeserializeScopeChain(isolate, info, info->maybe_outer_scope_info());
- scanner_.Initialize(info->character_stream(), info->is_module());
+ scanner_.Initialize();
FunctionLiteral* result = DoParseProgram(isolate, info);
MaybeResetCharacterStream(info, result);
@@ -701,7 +702,7 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
// Initialize parser state.
Handle<String> name(shared_info->Name(), isolate);
info->set_function_name(ast_value_factory()->GetString(name));
- scanner_.Initialize(info->character_stream(), info->is_module());
+ scanner_.Initialize();
FunctionLiteral* result =
DoParseFunction(isolate, info, info->function_name());
@@ -775,7 +776,7 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
if (IsArrowFunction(kind)) {
if (IsAsyncFunction(kind)) {
- DCHECK(!scanner()->HasAnyLineTerminatorAfterNext());
+ DCHECK(!scanner()->HasLineTerminatorAfterNext());
if (!Check(Token::ASYNC)) {
CHECK(stack_overflow());
return nullptr;
@@ -798,7 +799,7 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
ParserFormalParameters formals(scope);
// The outer FunctionState should not contain destructuring assignments.
DCHECK_EQ(0,
- function_state.destructuring_assignments_to_rewrite().length());
+ function_state.destructuring_assignments_to_rewrite().size());
{
// Parsing patterns as variable reference expression creates
// NewUnresolved references in current scope. Enter arrow function
@@ -943,10 +944,8 @@ const AstRawString* Parser::ParseModuleSpecifier(bool* ok) {
return GetSymbol();
}
-void Parser::ParseExportClause(ZonePtrList<const AstRawString>* export_names,
- ZoneList<Scanner::Location>* export_locations,
- ZonePtrList<const AstRawString>* local_names,
- Scanner::Location* reserved_loc, bool* ok) {
+ZoneChunkList<Parser::ExportClauseData>* Parser::ParseExportClause(
+ Scanner::Location* reserved_loc, bool* ok) {
// ExportClause :
// '{' '}'
// '{' ExportsList '}'
@@ -959,8 +958,10 @@ void Parser::ParseExportClause(ZonePtrList<const AstRawString>* export_names,
// ExportSpecifier :
// IdentifierName
// IdentifierName 'as' IdentifierName
+ ZoneChunkList<ExportClauseData>* export_data =
+ new (zone()) ZoneChunkList<ExportClauseData>(zone());
- Expect(Token::LBRACE, CHECK_OK_VOID);
+ Expect(Token::LBRACE, CHECK_OK);
Token::Value name_tok;
while ((name_tok = peek()) != Token::RBRACE) {
@@ -971,11 +972,11 @@ void Parser::ParseExportClause(ZonePtrList<const AstRawString>* export_names,
parsing_module_)) {
*reserved_loc = scanner()->location();
}
- const AstRawString* local_name = ParseIdentifierName(CHECK_OK_VOID);
+ const AstRawString* local_name = ParseIdentifierName(CHECK_OK);
const AstRawString* export_name = nullptr;
Scanner::Location location = scanner()->location();
if (CheckContextualKeyword(Token::AS)) {
- export_name = ParseIdentifierName(CHECK_OK_VOID);
+ export_name = ParseIdentifierName(CHECK_OK);
// Set the location to the whole "a as b" string, so that it makes sense
// both for errors due to "a" and for errors due to "b".
location.end_pos = scanner()->location().end_pos;
@@ -983,14 +984,13 @@ void Parser::ParseExportClause(ZonePtrList<const AstRawString>* export_names,
if (export_name == nullptr) {
export_name = local_name;
}
- export_names->Add(export_name, zone());
- local_names->Add(local_name, zone());
- export_locations->Add(location, zone());
+ export_data->push_back({export_name, local_name, location});
if (peek() == Token::RBRACE) break;
- Expect(Token::COMMA, CHECK_OK_VOID);
+ Expect(Token::COMMA, CHECK_OK);
}
- Expect(Token::RBRACE, CHECK_OK_VOID);
+ Expect(Token::RBRACE, CHECK_OK);
+ return export_data;
}
ZonePtrList<const Parser::NamedImport>* Parser::ParseNamedImports(int pos,
@@ -1179,7 +1179,7 @@ Statement* Parser::ParseExportDefault(bool* ok) {
case Token::ASYNC:
if (PeekAhead() == Token::FUNCTION &&
- !scanner()->HasAnyLineTerminatorAfterNext()) {
+ !scanner()->HasLineTerminatorAfterNext()) {
Consume(Token::ASYNC);
result = ParseAsyncFunctionDeclaration(&local_names, true, CHECK_OK);
break;
@@ -1264,11 +1264,8 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
// encountered, and then throw a SyntaxError if we are in the
// non-FromClause case.
Scanner::Location reserved_loc = Scanner::Location::invalid();
- ZonePtrList<const AstRawString> export_names(1, zone());
- ZoneList<Scanner::Location> export_locations(1, zone());
- ZonePtrList<const AstRawString> original_names(1, zone());
- ParseExportClause(&export_names, &export_locations, &original_names,
- &reserved_loc, CHECK_OK);
+ ZoneChunkList<ExportClauseData>* export_data =
+ ParseExportClause(&reserved_loc, CHECK_OK);
const AstRawString* module_specifier = nullptr;
Scanner::Location specifier_loc;
if (CheckContextualKeyword(Token::FROM)) {
@@ -1281,21 +1278,18 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
return nullptr;
}
ExpectSemicolon(CHECK_OK);
- const int length = export_names.length();
- DCHECK_EQ(length, original_names.length());
- DCHECK_EQ(length, export_locations.length());
if (module_specifier == nullptr) {
- for (int i = 0; i < length; ++i) {
- module()->AddExport(original_names[i], export_names[i],
- export_locations[i], zone());
+ for (const ExportClauseData& data : *export_data) {
+ module()->AddExport(data.local_name, data.export_name, data.location,
+ zone());
}
- } else if (length == 0) {
+ } else if (export_data->is_empty()) {
module()->AddEmptyImport(module_specifier, specifier_loc);
} else {
- for (int i = 0; i < length; ++i) {
- module()->AddExport(original_names[i], export_names[i],
- module_specifier, export_locations[i],
- specifier_loc, zone());
+ for (const ExportClauseData& data : *export_data) {
+ module()->AddExport(data.local_name, data.export_name,
+ module_specifier, data.location, specifier_loc,
+ zone());
}
}
return factory()->NewEmptyStatement(pos);
@@ -1410,7 +1404,7 @@ Block* Parser::BuildInitializationBlock(
DeclarationParsingResult* parsing_result,
ZonePtrList<const AstRawString>* names, bool* ok) {
Block* result = factory()->NewBlock(1, true);
- for (auto declaration : parsing_result->declarations) {
+ for (const auto& declaration : parsing_result->declarations) {
DeclareAndInitializeVariables(result, &(parsing_result->descriptor),
&declaration, names, CHECK_OK);
}
@@ -1473,29 +1467,40 @@ Statement* Parser::DeclareNative(const AstRawString* name, int pos, bool* ok) {
pos);
}
-ZonePtrList<const AstRawString>* Parser::DeclareLabel(
- ZonePtrList<const AstRawString>* labels, VariableProxy* var, bool* ok) {
+void Parser::DeclareLabel(ZonePtrList<const AstRawString>** labels,
+ ZonePtrList<const AstRawString>** own_labels,
+ VariableProxy* var, bool* ok) {
DCHECK(IsIdentifier(var));
const AstRawString* label = var->raw_name();
+
// TODO(1240780): We don't check for redeclaration of labels
// during preparsing since keeping track of the set of active
// labels requires nontrivial changes to the way scopes are
// structured. However, these are probably changes we want to
// make later anyway so we should go back and fix this then.
- if (ContainsLabel(labels, label) || TargetStackContainsLabel(label)) {
+ if (ContainsLabel(*labels, label) || TargetStackContainsLabel(label)) {
ReportMessage(MessageTemplate::kLabelRedeclaration, label);
*ok = false;
- return nullptr;
+ return;
}
- if (labels == nullptr) {
- labels = new (zone()) ZonePtrList<const AstRawString>(1, zone());
+
+ // Add {label} to both {labels} and {own_labels}.
+ if (*labels == nullptr) {
+ DCHECK_NULL(*own_labels);
+ *labels = new (zone()) ZonePtrList<const AstRawString>(1, zone());
+ *own_labels = new (zone()) ZonePtrList<const AstRawString>(1, zone());
+ } else {
+ if (*own_labels == nullptr) {
+ *own_labels = new (zone()) ZonePtrList<const AstRawString>(1, zone());
+ }
}
- labels->Add(label, zone());
+ (*labels)->Add(label, zone());
+ (*own_labels)->Add(label, zone());
+
// Remove the "ghost" variable that turned out to be a label
// from the top scope. This way, we don't try to resolve it
// during the scope processing.
scope()->RemoveUnresolved(var);
- return labels;
}
bool Parser::ContainsLabel(ZonePtrList<const AstRawString>* labels,
@@ -2194,7 +2199,7 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// need to know about it. This should be safe because we don't run any code
// in this function that looks up break targets.
ForStatement* outer_loop =
- factory()->NewForStatement(nullptr, kNoSourcePosition);
+ factory()->NewForStatement(nullptr, nullptr, kNoSourcePosition);
outer_block->statements()->Add(outer_loop, zone());
outer_block->set_scope(scope());
@@ -3396,9 +3401,10 @@ IterationStatement* Parser::LookupContinueTarget(const AstRawString* label,
if (stat == nullptr) continue;
DCHECK(stat->is_target_for_anonymous());
- if (anonymous || ContainsLabel(stat->labels(), label)) {
+ if (anonymous || ContainsLabel(stat->own_labels(), label)) {
return stat;
}
+ if (ContainsLabel(stat->labels(), label)) break;
}
return nullptr;
}
@@ -3442,7 +3448,7 @@ void Parser::ParseOnBackground(ParseInfo* info) {
DCHECK_NULL(info->literal());
FunctionLiteral* result = nullptr;
- scanner_.Initialize(info->character_stream(), info->is_module());
+ scanner_.Initialize();
DCHECK(info->maybe_outer_scope_info().is_null());
DCHECK(original_scope_);
@@ -3652,10 +3658,11 @@ void Parser::RewriteAsyncFunctionBody(ZonePtrList<Statement>* body,
void Parser::RewriteDestructuringAssignments() {
const auto& assignments =
function_state_->destructuring_assignments_to_rewrite();
- for (int i = assignments.length() - 1; i >= 0; --i) {
+ auto it = assignments.rbegin();
+ for (; it != assignments.rend(); ++it) {
// Rewrite list in reverse, so that nested assignment patterns are rewritten
// correctly.
- RewritableExpression* to_rewrite = assignments[i];
+ RewritableExpression* to_rewrite = *it;
DCHECK_NOT_NULL(to_rewrite);
if (!to_rewrite->is_rewritten()) {
// Since this function is called at the end of parsing the program,
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 2dec83b274..00e73f37a2 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -14,9 +14,9 @@
#include "src/globals.h"
#include "src/parsing/parser-base.h"
#include "src/parsing/parsing.h"
-#include "src/parsing/preparse-data.h"
#include "src/parsing/preparser.h"
#include "src/utils.h"
+#include "src/zone/zone-chunk-list.h"
namespace v8 {
@@ -262,10 +262,13 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
void ParseImportDeclaration(bool* ok);
Statement* ParseExportDeclaration(bool* ok);
Statement* ParseExportDefault(bool* ok);
- void ParseExportClause(ZonePtrList<const AstRawString>* export_names,
- ZoneList<Scanner::Location>* export_locations,
- ZonePtrList<const AstRawString>* local_names,
- Scanner::Location* reserved_loc, bool* ok);
+ struct ExportClauseData {
+ const AstRawString* export_name;
+ const AstRawString* local_name;
+ Scanner::Location location;
+ };
+ ZoneChunkList<ExportClauseData>* ParseExportClause(
+ Scanner::Location* reserved_loc, bool* ok);
struct NamedImport : public ZoneObject {
const AstRawString* import_name;
const AstRawString* local_name;
@@ -280,8 +283,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Block* BuildInitializationBlock(DeclarationParsingResult* parsing_result,
ZonePtrList<const AstRawString>* names,
bool* ok);
- ZonePtrList<const AstRawString>* DeclareLabel(
- ZonePtrList<const AstRawString>* labels, VariableProxy* expr, bool* ok);
+ void DeclareLabel(ZonePtrList<const AstRawString>** labels,
+ ZonePtrList<const AstRawString>** own_labels,
+ VariableProxy* expr, bool* ok);
bool ContainsLabel(ZonePtrList<const AstRawString>* labels,
const AstRawString* label);
Expression* RewriteReturn(Expression* return_value, int pos);
@@ -954,7 +958,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
void SetFunctionNameFromIdentifierRef(Expression* value,
Expression* identifier);
- V8_INLINE ZoneList<typename ExpressionClassifier::Error>*
+ V8_INLINE ZoneVector<typename ExpressionClassifier::Error>*
GetReportedErrorList() const {
return function_state_->GetReportedErrorList();
}
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index b981b6d12e..ed3231c151 100644
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -671,7 +671,8 @@ void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
// #maybe_store_and_unset_done;
// #increment_index;
// }
- WhileStatement* loop = factory()->NewWhileStatement(nullptr, nopos);
+ WhileStatement* loop =
+ factory()->NewWhileStatement(nullptr, nullptr, nopos);
{
Expression* condition = factory()->NewUnaryOperation(
Token::NOT, factory()->NewVariableProxy(done), nopos);
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
deleted file mode 100644
index e39218111d..0000000000
--- a/deps/v8/src/parsing/preparse-data.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/parsing/preparse-data.h"
-#include "src/base/hashmap.h"
-#include "src/base/logging.h"
-#include "src/globals.h"
-#include "src/objects-inl.h"
-#include "src/parsing/parser.h"
-
-namespace v8 {
-namespace internal {
-
-PreParseData::FunctionData PreParseData::GetFunctionData(int start) const {
- auto it = functions_.find(start);
- if (it != functions_.end()) {
- return it->second;
- }
- return FunctionData();
-}
-
-void PreParseData::AddFunctionData(int start, FunctionData&& data) {
- DCHECK(data.is_valid());
- functions_[start] = std::move(data);
-}
-
-void PreParseData::AddFunctionData(int start, const FunctionData& data) {
- DCHECK(data.is_valid());
- functions_[start] = data;
-}
-
-size_t PreParseData::size() const { return functions_.size(); }
-
-PreParseData::const_iterator PreParseData::begin() const {
- return functions_.begin();
-}
-
-PreParseData::const_iterator PreParseData::end() const {
- return functions_.end();
-}
-
-} // namespace internal
-} // namespace v8.
diff --git a/deps/v8/src/parsing/preparse-data.h b/deps/v8/src/parsing/preparse-data.h
deleted file mode 100644
index 0e40c76927..0000000000
--- a/deps/v8/src/parsing/preparse-data.h
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PARSING_PREPARSE_DATA_H_
-#define V8_PARSING_PREPARSE_DATA_H_
-
-#include <unordered_map>
-
-#include "src/allocation.h"
-#include "src/base/hashmap.h"
-#include "src/collector.h"
-#include "src/messages.h"
-namespace v8 {
-namespace internal {
-
-class PreParserLogger final {
- public:
- PreParserLogger()
- : end_(-1),
- num_parameters_(-1),
- num_inner_functions_(-1) {}
-
- void LogFunction(int end, int num_parameters, int num_inner_functions) {
- end_ = end;
- num_parameters_ = num_parameters;
- num_inner_functions_ = num_inner_functions;
- }
-
- int end() const { return end_; }
- int num_parameters() const {
- return num_parameters_;
- }
- int num_inner_functions() const { return num_inner_functions_; }
-
- private:
- int end_;
- // For function entries.
- int num_parameters_;
- int num_inner_functions_;
-};
-
-class PreParseData final {
- public:
- struct FunctionData {
- int end;
- int num_parameters;
- int num_inner_functions;
- LanguageMode language_mode;
- bool uses_super_property : 1;
-
- FunctionData() : end(kNoSourcePosition) {}
-
- FunctionData(int end, int num_parameters, int num_inner_functions,
- LanguageMode language_mode, bool uses_super_property)
- : end(end),
- num_parameters(num_parameters),
- num_inner_functions(num_inner_functions),
- language_mode(language_mode),
- uses_super_property(uses_super_property) {}
-
- bool is_valid() const {
- DCHECK_IMPLIES(end < 0, end == kNoSourcePosition);
- return end != kNoSourcePosition;
- }
- };
-
- FunctionData GetFunctionData(int start) const;
- void AddFunctionData(int start, FunctionData&& data);
- void AddFunctionData(int start, const FunctionData& data);
- size_t size() const;
-
- typedef std::unordered_map<int, FunctionData>::const_iterator const_iterator;
- const_iterator begin() const;
- const_iterator end() const;
-
- private:
- std::unordered_map<int, FunctionData> functions_;
-};
-
-} // namespace internal
-} // namespace v8.
-
-#endif // V8_PARSING_PREPARSE_DATA_H_
diff --git a/deps/v8/src/parsing/preparsed-scope-data.cc b/deps/v8/src/parsing/preparsed-scope-data.cc
index 0dab3f9ee1..90e8819e32 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.cc
+++ b/deps/v8/src/parsing/preparsed-scope-data.cc
@@ -481,6 +481,11 @@ uint8_t ConsumedPreParsedScopeData::ByteData::ReadQuarter() {
return result;
}
+size_t ConsumedPreParsedScopeData::ByteData::RemainingBytes() const {
+ DCHECK_NOT_NULL(data_);
+ return data_->length() - index_;
+}
+
ConsumedPreParsedScopeData::ConsumedPreParsedScopeData()
: isolate_(nullptr), scope_data_(new ByteData()), child_index_(0) {}
@@ -577,7 +582,7 @@ void ConsumedPreParsedScopeData::RestoreData(Scope* scope) {
if (scope_data_->RemainingBytes() < kUint8Size) {
// Temporary debugging code for detecting inconsistent data. Write debug
// information on the stack, then crash.
- data_->GetIsolate()->PushStackTraceAndDie();
+ isolate_->PushStackTraceAndDie();
}
// scope_type is stored only in debug mode.
diff --git a/deps/v8/src/parsing/preparsed-scope-data.h b/deps/v8/src/parsing/preparsed-scope-data.h
index 6ad0f491f8..61d67291a4 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.h
+++ b/deps/v8/src/parsing/preparsed-scope-data.h
@@ -12,7 +12,6 @@
#include "src/globals.h"
#include "src/handles.h"
#include "src/objects/shared-function-info.h"
-#include "src/parsing/preparse-data.h"
#include "src/zone/zone-chunk-list.h"
namespace v8 {
@@ -212,10 +211,7 @@ class ConsumedPreParsedScopeData {
uint8_t ReadUint8();
uint8_t ReadQuarter();
- size_t RemainingBytes() const {
- DCHECK_NOT_NULL(data_);
- return data_->length() - index_;
- }
+ size_t RemainingBytes() const;
// private:
PodArray<uint8_t>* data_;
diff --git a/deps/v8/src/parsing/preparser-logger.h b/deps/v8/src/parsing/preparser-logger.h
new file mode 100644
index 0000000000..55394ca2be
--- /dev/null
+++ b/deps/v8/src/parsing/preparser-logger.h
@@ -0,0 +1,35 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARSING_PREPARSER_LOGGER_H_
+#define V8_PARSING_PREPARSER_LOGGER_H_
+
+namespace v8 {
+namespace internal {
+
+class PreParserLogger final {
+ public:
+ PreParserLogger() : end_(-1), num_parameters_(-1), num_inner_functions_(-1) {}
+
+ void LogFunction(int end, int num_parameters, int num_inner_functions) {
+ end_ = end;
+ num_parameters_ = num_parameters;
+ num_inner_functions_ = num_inner_functions;
+ }
+
+ int end() const { return end_; }
+ int num_parameters() const { return num_parameters_; }
+ int num_inner_functions() const { return num_inner_functions_; }
+
+ private:
+ int end_;
+ // For function entries.
+ int num_parameters_;
+ int num_inner_functions_;
+};
+
+} // namespace internal
+} // namespace v8.
+
+#endif // V8_PARSING_PREPARSER_LOGGER_H_
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index 832d2033f2..d449c8d76b 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -11,7 +11,6 @@
#include "src/globals.h"
#include "src/parsing/duplicate-finder.h"
#include "src/parsing/parser-base.h"
-#include "src/parsing/preparse-data.h"
#include "src/parsing/preparsed-scope-data.h"
#include "src/parsing/preparser.h"
#include "src/unicode.h"
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index aa4f06d354..10c42fa940 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -8,8 +8,9 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/parsing/parser-base.h"
-#include "src/parsing/preparse-data.h"
+#include "src/parsing/preparser-logger.h"
#include "src/pending-compilation-error-handler.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -64,7 +65,7 @@ class PreParserIdentifier {
bool IsPrivateName() const { return type_ == kPrivateNameIdentifier; }
private:
- enum Type {
+ enum Type : uint8_t {
kNullIdentifier,
kUnknownIdentifier,
kEvalIdentifier,
@@ -76,10 +77,11 @@ class PreParserIdentifier {
kPrivateNameIdentifier
};
- explicit PreParserIdentifier(Type type) : type_(type), string_(nullptr) {}
- Type type_;
+ explicit PreParserIdentifier(Type type) : string_(nullptr), type_(type) {}
// Only non-nullptr when PreParser.track_unresolved_variables_ is true.
const AstRawString* string_;
+
+ Type type_;
friend class PreParserExpression;
friend class PreParser;
friend class PreParserFactory;
@@ -341,6 +343,7 @@ class PreParserExpression {
// More dummy implementations of things PreParser doesn't need to track:
void SetShouldEagerCompile() {}
+ void mark_as_iife() {}
int position() const { return kNoSourcePosition; }
void set_function_token_position(int position) {}
@@ -400,7 +403,7 @@ class PreParserExpression {
typedef BitField<ExpressionType, TypeField::kNext, 4> ExpressionTypeField;
typedef BitField<bool, TypeField::kNext, 1> IsUseStrictField;
typedef BitField<bool, IsUseStrictField::kNext, 1> IsUseAsmField;
- typedef BitField<PreParserIdentifier::Type, TypeField::kNext, 10>
+ typedef BitField<PreParserIdentifier::Type, TypeField::kNext, 8>
IdentifierTypeField;
typedef BitField<bool, TypeField::kNext, 1> HasCoverInitializedNameField;
@@ -787,12 +790,14 @@ class PreParserFactory {
}
PreParserStatement NewDoWhileStatement(
- ZonePtrList<const AstRawString>* labels, int pos) {
+ ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, int pos) {
return PreParserStatement::Default();
}
- PreParserStatement NewWhileStatement(ZonePtrList<const AstRawString>* labels,
- int pos) {
+ PreParserStatement NewWhileStatement(
+ ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, int pos) {
return PreParserStatement::Default();
}
@@ -807,25 +812,28 @@ class PreParserFactory {
return PreParserStatement::Default();
}
- PreParserStatement NewForStatement(ZonePtrList<const AstRawString>* labels,
- int pos) {
+ PreParserStatement NewForStatement(
+ ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, int pos) {
return PreParserStatement::Default();
}
PreParserStatement NewForEachStatement(
ForEachStatement::VisitMode visit_mode,
- ZonePtrList<const AstRawString>* labels, int pos) {
+ ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, int pos) {
return PreParserStatement::Default();
}
- PreParserStatement NewForOfStatement(ZonePtrList<const AstRawString>* labels,
- int pos) {
+ PreParserStatement NewForOfStatement(
+ ZonePtrList<const AstRawString>* labels,
+ ZonePtrList<const AstRawString>* own_labels, int pos) {
return PreParserStatement::Default();
}
- PreParserExpression NewCallRuntime(Runtime::FunctionId id,
- ZoneList<PreParserExpression>* arguments,
- int pos) {
+ PreParserExpression NewCallRuntime(
+ Runtime::FunctionId id, ZoneChunkList<PreParserExpression>* arguments,
+ int pos) {
return PreParserExpression::Default();
}
@@ -1070,12 +1078,11 @@ class PreParser : public ParserBase<PreParser> {
const DeclarationParsingResult::Declaration* declaration,
ZonePtrList<const AstRawString>* names, bool* ok);
- V8_INLINE ZonePtrList<const AstRawString>* DeclareLabel(
- ZonePtrList<const AstRawString>* labels, const PreParserExpression& expr,
- bool* ok) {
+ V8_INLINE void DeclareLabel(ZonePtrList<const AstRawString>** labels,
+ ZonePtrList<const AstRawString>** own_labels,
+ const PreParserExpression& expr, bool* ok) {
DCHECK(!parsing_module_ || !expr.AsIdentifier().IsAwait());
DCHECK(IsIdentifier(expr));
- return labels;
}
// TODO(nikolaos): The preparser currently does not keep track of labels.
@@ -1726,7 +1733,7 @@ class PreParser : public ParserBase<PreParser> {
const PreParserExpression& value, const PreParserExpression& identifier) {
}
- V8_INLINE ZoneList<typename ExpressionClassifier::Error>*
+ V8_INLINE ZoneVector<typename ExpressionClassifier::Error>*
GetReportedErrorList() const {
return function_state_->GetReportedErrorList();
}
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 052b6007ae..d38fdd7c42 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -88,8 +88,8 @@ class ExternalStringStream {
template <typename Char>
class ChunkedStream {
public:
- explicit ChunkedStream(ScriptCompiler::ExternalSourceStream* source,
- RuntimeCallStats* stats)
+ ChunkedStream(ScriptCompiler::ExternalSourceStream* source,
+ RuntimeCallStats* stats)
: source_(source), stats_(stats) {}
Range<Char> GetDataAt(size_t pos) {
@@ -100,15 +100,15 @@ class ChunkedStream {
}
~ChunkedStream() {
- for (size_t i = 0; i < chunks_.size(); i++) {
- delete[] chunks_[i].data;
- }
+ for (Chunk& chunk : chunks_) delete[] chunk.data;
}
static const bool kCanAccessHeap = false;
private:
struct Chunk {
+ Chunk(const Char* const data, size_t position, size_t length)
+ : data(data), position(position), length(length) {}
const Char* const data;
// The logical position of data.
const size_t position;
@@ -117,7 +117,7 @@ class ChunkedStream {
};
Chunk FindChunk(size_t position) {
- if (chunks_.empty()) FetchChunk(size_t{0});
+ while (V8_UNLIKELY(chunks_.empty())) FetchChunk(size_t{0});
// Walk forwards while the position is in front of the current chunk.
while (position >= chunks_.back().end_position() &&
@@ -134,6 +134,14 @@ class ChunkedStream {
UNREACHABLE();
}
+ virtual void ProcessChunk(const uint8_t* data, size_t position,
+ size_t length) {
+ // Incoming data has to be aligned to Char size.
+ DCHECK_EQ(0, length % sizeof(Char));
+ chunks_.emplace_back(reinterpret_cast<const Char*>(data), position,
+ length / sizeof(Char));
+ }
+
void FetchChunk(size_t position) {
const uint8_t* data = nullptr;
size_t length;
@@ -142,21 +150,110 @@ class ChunkedStream {
RuntimeCallCounterId::kGetMoreDataCallback);
length = source_->GetMoreData(&data);
}
- // Incoming data has to be aligned to Char size.
- DCHECK_EQ(0, length % sizeof(Char));
- chunks_.push_back(
- {reinterpret_cast<const Char*>(data), position, length / sizeof(Char)});
+ ProcessChunk(data, position, length);
}
- std::vector<struct Chunk> chunks_;
ScriptCompiler::ExternalSourceStream* source_;
RuntimeCallStats* stats_;
+
+ protected:
+ std::vector<struct Chunk> chunks_;
+};
+
+template <typename Char>
+class Utf8ChunkedStream : public ChunkedStream<uint16_t> {
+ public:
+ Utf8ChunkedStream(ScriptCompiler::ExternalSourceStream* source,
+ RuntimeCallStats* stats)
+ : ChunkedStream<uint16_t>(source, stats) {}
+
+ STATIC_ASSERT(sizeof(Char) == sizeof(uint16_t));
+ void ProcessChunk(const uint8_t* data, size_t position, size_t length) final {
+ if (length == 0) {
+ unibrow::uchar t = unibrow::Utf8::ValueOfIncrementalFinish(&state_);
+ if (t != unibrow::Utf8::kBufferEmpty) {
+ DCHECK_EQ(t, unibrow::Utf8::kBadChar);
+ incomplete_char_ = 0;
+ uint16_t* result = new uint16_t[1];
+ result[0] = unibrow::Utf8::kBadChar;
+ chunks_.emplace_back(result, position, 1);
+ position++;
+ }
+ chunks_.emplace_back(nullptr, position, 0);
+ delete[] data;
+ return;
+ }
+
+ // First count the number of complete characters that can be produced.
+
+ unibrow::Utf8::State state = state_;
+ uint32_t incomplete_char = incomplete_char_;
+ bool seen_bom = seen_bom_;
+
+ size_t i = 0;
+ size_t chars = 0;
+ while (i < length) {
+ unibrow::uchar t = unibrow::Utf8::ValueOfIncremental(data[i], &i, &state,
+ &incomplete_char);
+ if (!seen_bom && t == kUtf8Bom && position + chars == 0) {
+ seen_bom = true;
+ // BOM detected at beginning of the stream. Don't copy it.
+ } else if (t != unibrow::Utf8::kIncomplete) {
+ chars++;
+ if (t > unibrow::Utf16::kMaxNonSurrogateCharCode) chars++;
+ }
+ }
+
+ // Process the data.
+
+ // If there aren't any complete characters, update the state without
+ // producing a chunk.
+ if (chars == 0) {
+ state_ = state;
+ incomplete_char_ = incomplete_char;
+ seen_bom_ = seen_bom;
+ delete[] data;
+ return;
+ }
+
+ // Update the state and produce a chunk with complete characters.
+ uint16_t* result = new uint16_t[chars];
+ uint16_t* cursor = result;
+ i = 0;
+
+ while (i < length) {
+ unibrow::uchar t = unibrow::Utf8::ValueOfIncremental(data[i], &i, &state_,
+ &incomplete_char_);
+ if (V8_LIKELY(t < kUtf8Bom)) {
+ *(cursor++) = static_cast<uc16>(t); // The by most frequent case.
+ } else if (t == unibrow::Utf8::kIncomplete) {
+ continue;
+ } else if (!seen_bom_ && t == kUtf8Bom && position == 0 &&
+ cursor == result) {
+ // BOM detected at beginning of the stream. Don't copy it.
+ seen_bom_ = true;
+ } else if (t <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ *(cursor++) = static_cast<uc16>(t);
+ } else {
+ *(cursor++) = unibrow::Utf16::LeadSurrogate(t);
+ *(cursor++) = unibrow::Utf16::TrailSurrogate(t);
+ }
+ }
+
+ chunks_.emplace_back(result, position, chars);
+ delete[] data;
+ }
+
+ private:
+ uint32_t incomplete_char_ = 0;
+ unibrow::Utf8::State state_ = unibrow::Utf8::State::kAccept;
+ bool seen_bom_ = false;
};
// Provides a buffered utf-16 view on the bytes from the underlying ByteStream.
// Chars are buffered if either the underlying stream isn't utf-16 or the
// underlying utf-16 stream might move (is on-heap).
-template <typename Char, template <typename T> class ByteStream>
+template <template <typename T> class ByteStream>
class BufferedCharacterStream : public Utf16CharacterStream {
public:
template <class... TArgs>
@@ -165,13 +262,13 @@ class BufferedCharacterStream : public Utf16CharacterStream {
}
protected:
- bool ReadBlock() override {
+ bool ReadBlock() final {
size_t position = pos();
buffer_pos_ = position;
buffer_start_ = &buffer_[0];
buffer_cursor_ = buffer_start_;
- Range<Char> range = byte_stream_.GetDataAt(position);
+ Range<uint8_t> range = byte_stream_.GetDataAt(position);
if (range.length() == 0) {
buffer_end_ = buffer_start_;
return false;
@@ -183,14 +280,12 @@ class BufferedCharacterStream : public Utf16CharacterStream {
return true;
}
- bool can_access_heap() override {
- return ByteStream<uint16_t>::kCanAccessHeap;
- }
+ bool can_access_heap() final { return ByteStream<uint8_t>::kCanAccessHeap; }
private:
static const size_t kBufferSize = 512;
uc16 buffer_[kBufferSize];
- ByteStream<Char> byte_stream_;
+ ByteStream<uint8_t> byte_stream_;
};
// Provides a unbuffered utf-16 view on the bytes from the underlying
@@ -200,12 +295,11 @@ class UnbufferedCharacterStream : public Utf16CharacterStream {
public:
template <class... TArgs>
UnbufferedCharacterStream(size_t pos, TArgs... args) : byte_stream_(args...) {
- DCHECK(!ByteStream<uint16_t>::kCanAccessHeap);
buffer_pos_ = pos;
}
protected:
- bool ReadBlock() override {
+ bool ReadBlock() final {
size_t position = pos();
buffer_pos_ = position;
Range<uint16_t> range = byte_stream_.GetDataAt(position);
@@ -219,12 +313,50 @@ class UnbufferedCharacterStream : public Utf16CharacterStream {
return true;
}
- bool can_access_heap() override { return false; }
+ bool can_access_heap() final { return ByteStream<uint16_t>::kCanAccessHeap; }
- private:
ByteStream<uint16_t> byte_stream_;
};
+// Provides a unbuffered utf-16 view on the bytes from the underlying
+// ByteStream.
+class RelocatingCharacterStream
+ : public UnbufferedCharacterStream<OnHeapStream> {
+ public:
+ template <class... TArgs>
+ RelocatingCharacterStream(Isolate* isolate, size_t pos, TArgs... args)
+ : UnbufferedCharacterStream<OnHeapStream>(pos, args...),
+ isolate_(isolate) {
+ isolate->heap()->AddGCEpilogueCallback(UpdateBufferPointersCallback,
+ v8::kGCTypeAll, this);
+ }
+
+ private:
+ ~RelocatingCharacterStream() final {
+ isolate_->heap()->RemoveGCEpilogueCallback(UpdateBufferPointersCallback,
+ this);
+ }
+
+ static void UpdateBufferPointersCallback(v8::Isolate* v8_isolate,
+ v8::GCType type,
+ v8::GCCallbackFlags flags,
+ void* stream) {
+ reinterpret_cast<RelocatingCharacterStream*>(stream)
+ ->UpdateBufferPointers();
+ }
+
+ void UpdateBufferPointers() {
+ Range<uint16_t> range = byte_stream_.GetDataAt(0);
+ if (range.start != buffer_start_) {
+ buffer_cursor_ = (buffer_cursor_ - buffer_start_) + range.start;
+ buffer_start_ = range.start;
+ buffer_end_ = range.end;
+ }
+ }
+
+ Isolate* isolate_;
+};
+
// ----------------------------------------------------------------------------
// BufferedUtf16CharacterStreams
//
@@ -240,7 +372,7 @@ class BufferedUtf16CharacterStream : public Utf16CharacterStream {
protected:
static const size_t kBufferSize = 512;
- bool ReadBlock() override;
+ bool ReadBlock() final;
// FillBuffer should read up to kBufferSize characters at position and store
// them into buffer_[0..]. It returns the number of characters stored.
@@ -285,14 +417,14 @@ class Utf8ExternalStreamingStream : public BufferedUtf16CharacterStream {
: current_({0, {0, 0, 0, unibrow::Utf8::State::kAccept}}),
source_stream_(source_stream),
stats_(stats) {}
- ~Utf8ExternalStreamingStream() override {
+ ~Utf8ExternalStreamingStream() final {
for (size_t i = 0; i < chunks_.size(); i++) delete[] chunks_[i].data;
}
- bool can_access_heap() override { return false; }
+ bool can_access_heap() final { return false; }
protected:
- size_t FillBuffer(size_t position) override;
+ size_t FillBuffer(size_t position) final;
private:
// A position within the data stream. It stores:
@@ -571,7 +703,7 @@ Utf16CharacterStream* ScannerStream::For(Isolate* isolate, Handle<String> data,
data = String::Flatten(isolate, data);
}
if (data->IsExternalOneByteString()) {
- return new BufferedCharacterStream<uint8_t, ExternalStringStream>(
+ return new BufferedCharacterStream<ExternalStringStream>(
static_cast<size_t>(start_pos),
ExternalOneByteString::cast(*data)->GetChars() + start_offset,
static_cast<size_t>(end_pos));
@@ -581,13 +713,14 @@ Utf16CharacterStream* ScannerStream::For(Isolate* isolate, Handle<String> data,
ExternalTwoByteString::cast(*data)->GetChars() + start_offset,
static_cast<size_t>(end_pos));
} else if (data->IsSeqOneByteString()) {
- return new BufferedCharacterStream<uint8_t, OnHeapStream>(
+ return new BufferedCharacterStream<OnHeapStream>(
static_cast<size_t>(start_pos), Handle<SeqOneByteString>::cast(data),
start_offset, static_cast<size_t>(end_pos));
} else if (data->IsSeqTwoByteString()) {
- return new BufferedCharacterStream<uint16_t, OnHeapStream>(
- static_cast<size_t>(start_pos), Handle<SeqTwoByteString>::cast(data),
- start_offset, static_cast<size_t>(end_pos));
+ return new RelocatingCharacterStream(
+ isolate, static_cast<size_t>(start_pos),
+ Handle<SeqTwoByteString>::cast(data), start_offset,
+ static_cast<size_t>(end_pos));
} else {
UNREACHABLE();
}
@@ -601,7 +734,7 @@ std::unique_ptr<Utf16CharacterStream> ScannerStream::ForTesting(
std::unique_ptr<Utf16CharacterStream> ScannerStream::ForTesting(
const char* data, size_t length) {
return std::unique_ptr<Utf16CharacterStream>(
- new BufferedCharacterStream<uint8_t, ExternalStringStream>(
+ new BufferedCharacterStream<ExternalStringStream>(
static_cast<size_t>(0), reinterpret_cast<const uint8_t*>(data),
static_cast<size_t>(length)));
}
@@ -615,8 +748,8 @@ Utf16CharacterStream* ScannerStream::For(
return new UnbufferedCharacterStream<ChunkedStream>(
static_cast<size_t>(0), source_stream, stats);
case v8::ScriptCompiler::StreamedSource::ONE_BYTE:
- return new BufferedCharacterStream<uint8_t, ChunkedStream>(
- static_cast<size_t>(0), source_stream, stats);
+ return new BufferedCharacterStream<ChunkedStream>(static_cast<size_t>(0),
+ source_stream, stats);
case v8::ScriptCompiler::StreamedSource::UTF8:
return new Utf8ExternalStreamingStream(source_stream, stats);
}
diff --git a/deps/v8/src/parsing/scanner-character-streams.h b/deps/v8/src/parsing/scanner-character-streams.h
index 12c5847f2f..091ef5b8ea 100644
--- a/deps/v8/src/parsing/scanner-character-streams.h
+++ b/deps/v8/src/parsing/scanner-character-streams.h
@@ -27,7 +27,6 @@ class V8_EXPORT_PRIVATE ScannerStream {
ScriptCompiler::StreamedSource::Encoding encoding,
RuntimeCallStats* stats);
- // For testing:
static std::unique_ptr<Utf16CharacterStream> ForTesting(const char* data);
static std::unique_ptr<Utf16CharacterStream> ForTesting(const char* data,
size_t length);
diff --git a/deps/v8/src/parsing/scanner-inl.h b/deps/v8/src/parsing/scanner-inl.h
new file mode 100644
index 0000000000..809ef655a7
--- /dev/null
+++ b/deps/v8/src/parsing/scanner-inl.h
@@ -0,0 +1,43 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARSING_SCANNER_INL_H_
+#define V8_PARSING_SCANNER_INL_H_
+
+#include "src/parsing/scanner.h"
+#include "src/unicode-cache-inl.h"
+
+namespace v8 {
+namespace internal {
+
+V8_INLINE Token::Value Scanner::SkipWhiteSpace() {
+ int start_position = source_pos();
+
+ while (true) {
+ // We won't skip behind the end of input.
+ DCHECK(!unicode_cache_->IsWhiteSpace(kEndOfInput));
+
+ // Advance as long as character is a WhiteSpace or LineTerminator.
+ // Remember if the latter is the case.
+ if (unibrow::IsLineTerminator(c0_)) {
+ next().after_line_terminator = true;
+ } else if (!unicode_cache_->IsWhiteSpace(c0_)) {
+ break;
+ }
+ Advance();
+ }
+
+ // Return whether or not we skipped any characters.
+ if (source_pos() == start_position) {
+ DCHECK_NE('0', c0_);
+ return Token::ILLEGAL;
+ }
+
+ return Token::WHITESPACE;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARSING_SCANNER_INL_H_
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 852b5e400b..781832c2e6 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -15,7 +15,7 @@
#include "src/conversions-inl.h"
#include "src/objects/bigint.h"
#include "src/parsing/duplicate-finder.h" // For Scanner::FindSymbol
-#include "src/unicode-cache-inl.h"
+#include "src/parsing/scanner-inl.h"
namespace v8 {
namespace internal {
@@ -60,6 +60,7 @@ class Scanner::ErrorState {
// Scanner::LiteralBuffer
Handle<String> Scanner::LiteralBuffer::Internalize(Isolate* isolate) const {
+ DCHECK(is_used_);
if (is_one_byte()) {
return isolate->factory()->InternalizeOneByteString(one_byte_literal());
}
@@ -103,16 +104,9 @@ void Scanner::LiteralBuffer::ConvertToTwoByte() {
is_one_byte_ = false;
}
-void Scanner::LiteralBuffer::AddCharSlow(uc32 code_unit) {
+void Scanner::LiteralBuffer::AddTwoByteChar(uc32 code_unit) {
+ DCHECK(!is_one_byte_);
if (position_ >= backing_store_.length()) ExpandBuffer();
- if (is_one_byte_) {
- if (code_unit <= static_cast<uc32>(unibrow::Latin1::kMaxChar)) {
- backing_store_[position_] = static_cast<byte>(code_unit);
- position_ += kOneByteSize;
- return;
- }
- ConvertToTwoByte();
- }
if (code_unit <=
static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
*reinterpret_cast<uint16_t*>(&backing_store_[position_]) = code_unit;
@@ -140,16 +134,16 @@ const size_t Scanner::BookmarkScope::kBookmarkWasApplied =
void Scanner::BookmarkScope::Set() {
DCHECK_EQ(bookmark_, kNoBookmark);
- DCHECK_EQ(scanner_->next_next_.token, Token::UNINITIALIZED);
+ DCHECK_EQ(scanner_->next_next().token, Token::UNINITIALIZED);
// The first token is a bit special, since current_ will still be
// uninitialized. In this case, store kBookmarkAtFirstPos and special-case it
// when
// applying the bookmark.
- DCHECK_IMPLIES(
- scanner_->current_.token == Token::UNINITIALIZED,
- scanner_->current_.location.beg_pos == scanner_->next_.location.beg_pos);
- bookmark_ = (scanner_->current_.token == Token::UNINITIALIZED)
+ DCHECK_IMPLIES(scanner_->current().token == Token::UNINITIALIZED,
+ scanner_->current().location.beg_pos ==
+ scanner_->next().location.beg_pos);
+ bookmark_ = (scanner_->current().token == Token::UNINITIALIZED)
? kBookmarkAtFirstPos
: scanner_->location().beg_pos;
}
@@ -177,22 +171,24 @@ bool Scanner::BookmarkScope::HasBeenApplied() {
// ----------------------------------------------------------------------------
// Scanner
-Scanner::Scanner(UnicodeCache* unicode_cache)
+Scanner::Scanner(UnicodeCache* unicode_cache, Utf16CharacterStream* source,
+ bool is_module)
: unicode_cache_(unicode_cache),
+ source_(source),
octal_pos_(Location::invalid()),
octal_message_(MessageTemplate::kNone),
found_html_comment_(false),
allow_harmony_bigint_(false),
- allow_harmony_numeric_separator_(false) {}
-
-void Scanner::Initialize(Utf16CharacterStream* source, bool is_module) {
+ allow_harmony_numeric_separator_(false),
+ is_module_(is_module) {
DCHECK_NOT_NULL(source);
- source_ = source;
- is_module_ = is_module;
+}
+
+void Scanner::Initialize() {
// Need to capture identifiers in order to recognize "get" and "set"
// in object literals.
Init();
- has_line_terminator_before_next_ = true;
+ next().after_line_terminator = true;
Scan();
}
@@ -377,96 +373,43 @@ static const byte one_char_tokens[] = {
// clang-format on
Token::Value Scanner::Next() {
- if (next_.token == Token::EOS) {
- next_.location.beg_pos = current_.location.beg_pos;
- next_.location.end_pos = current_.location.end_pos;
- }
+ if (next().token == Token::EOS) next().location = current().location;
+ // Rotate through tokens.
+ TokenDesc* previous = current_;
current_ = next_;
- if (V8_UNLIKELY(next_next_.token != Token::UNINITIALIZED)) {
+ // Either we already have the next token lined up, in which case next_next_
+ // simply becomes next_. In that case we use current_ as new next_next_ and
+ // clear its token to indicate that it wasn't scanned yet. Otherwise we use
+ // current_ as next_ and scan into it, leaving next_next_ uninitialized.
+ if (V8_LIKELY(next_next().token == Token::UNINITIALIZED)) {
+ next_ = previous;
+ next().after_line_terminator = false;
+ Scan();
+ } else {
next_ = next_next_;
- next_next_.token = Token::UNINITIALIZED;
- next_next_.contextual_token = Token::UNINITIALIZED;
- has_line_terminator_before_next_ = has_line_terminator_after_next_;
- return current_.token;
+ next_next_ = previous;
+ previous->token = Token::UNINITIALIZED;
+ previous->contextual_token = Token::UNINITIALIZED;
+ DCHECK_NE(Token::UNINITIALIZED, current().token);
}
- has_line_terminator_before_next_ = false;
- has_multiline_comment_before_next_ = false;
- Scan();
- return current_.token;
+ return current().token;
}
Token::Value Scanner::PeekAhead() {
- DCHECK(next_.token != Token::DIV);
- DCHECK(next_.token != Token::ASSIGN_DIV);
-
- if (next_next_.token != Token::UNINITIALIZED) {
- return next_next_.token;
- }
- TokenDesc prev = current_;
- bool has_line_terminator_before_next =
- has_line_terminator_before_next_ || has_multiline_comment_before_next_;
- Next();
- has_line_terminator_after_next_ =
- has_line_terminator_before_next_ || has_multiline_comment_before_next_;
- has_line_terminator_before_next_ = has_line_terminator_before_next;
- Token::Value ret = next_.token;
- next_next_ = next_;
- next_ = current_;
- current_ = prev;
- return ret;
-}
-
-
-Token::Value Scanner::SkipWhiteSpace() {
- int start_position = source_pos();
-
- while (true) {
- while (true) {
- // We won't skip behind the end of input.
- DCHECK(!unicode_cache_->IsWhiteSpace(kEndOfInput));
-
- // Advance as long as character is a WhiteSpace or LineTerminator.
- // Remember if the latter is the case.
- if (unibrow::IsLineTerminator(c0_)) {
- has_line_terminator_before_next_ = true;
- } else if (!unicode_cache_->IsWhiteSpace(c0_)) {
- break;
- }
- Advance();
- }
+ DCHECK(next().token != Token::DIV);
+ DCHECK(next().token != Token::ASSIGN_DIV);
- // If there is an HTML comment end '-->' at the beginning of a
- // line (with only whitespace in front of it), we treat the rest
- // of the line as a comment. This is in line with the way
- // SpiderMonkey handles it.
- if (c0_ != '-' || !has_line_terminator_before_next_) break;
-
- Advance();
- if (c0_ != '-') {
- PushBack('-'); // undo Advance()
- break;
- }
-
- Advance();
- if (c0_ != '>') {
- PushBack2('-', '-'); // undo 2x Advance();
- break;
- }
-
- // Treat the rest of the line as a comment.
- Token::Value token = SkipSingleHTMLComment();
- if (token == Token::ILLEGAL) {
- return token;
- }
+ if (next_next().token != Token::UNINITIALIZED) {
+ return next_next().token;
}
-
- // Return whether or not we skipped any characters.
- if (source_pos() == start_position) {
- return Token::ILLEGAL;
- }
-
- return Token::WHITESPACE;
+ TokenDesc* temp = next_;
+ next_ = next_next_;
+ next().after_line_terminator = false;
+ Scan();
+ next_next_ = next_;
+ next_ = temp;
+ return next_next().token;
}
Token::Value Scanner::SkipSingleHTMLComment() {
@@ -478,21 +421,16 @@ Token::Value Scanner::SkipSingleHTMLComment() {
}
Token::Value Scanner::SkipSingleLineComment() {
- Advance();
-
// The line terminator at the end of the line is not considered
// to be part of the single-line comment; it is recognized
// separately by the lexical grammar and becomes part of the
// stream of input elements for the syntactic grammar (see
// ECMA-262, section 7.4).
- while (c0_ != kEndOfInput && !unibrow::IsLineTerminator(c0_)) {
- Advance();
- }
+ AdvanceUntil([](uc32 c0_) { return unibrow::IsLineTerminator(c0_); });
return Token::WHITESPACE;
}
-
Token::Value Scanner::SkipSourceURLComment() {
TryToParseSourceURLComment();
while (c0_ != kEndOfInput && !unibrow::IsLineTerminator(c0_)) {
@@ -502,7 +440,6 @@ Token::Value Scanner::SkipSourceURLComment() {
return Token::WHITESPACE;
}
-
void Scanner::TryToParseSourceURLComment() {
// Magic comments are of the form: //[#@]\s<name>=\s*<value>\s*.* and this
// function will just return if it cannot parse a magic comment.
@@ -510,6 +447,7 @@ void Scanner::TryToParseSourceURLComment() {
if (!unicode_cache_->IsWhiteSpace(c0_)) return;
Advance();
LiteralBuffer name;
+ name.Start();
while (c0_ != kEndOfInput &&
!unicode_cache_->IsWhiteSpaceOrLineTerminator(c0_) && c0_ != '=') {
@@ -528,15 +466,16 @@ void Scanner::TryToParseSourceURLComment() {
}
if (c0_ != '=')
return;
+ value->Drop();
+ value->Start();
Advance();
- value->Reset();
while (unicode_cache_->IsWhiteSpace(c0_)) {
Advance();
}
while (c0_ != kEndOfInput && !unibrow::IsLineTerminator(c0_)) {
// Disallowed characters.
if (c0_ == '"' || c0_ == '\'') {
- value->Reset();
+ value->Drop();
return;
}
if (unicode_cache_->IsWhiteSpace(c0_)) {
@@ -548,34 +487,33 @@ void Scanner::TryToParseSourceURLComment() {
// Allow whitespace at the end.
while (c0_ != kEndOfInput && !unibrow::IsLineTerminator(c0_)) {
if (!unicode_cache_->IsWhiteSpace(c0_)) {
- value->Reset();
+ value->Drop();
break;
}
Advance();
}
}
-
Token::Value Scanner::SkipMultiLineComment() {
DCHECK_EQ(c0_, '*');
Advance();
while (c0_ != kEndOfInput) {
- uc32 ch = c0_;
- Advance();
DCHECK(!unibrow::IsLineTerminator(kEndOfInput));
- if (unibrow::IsLineTerminator(ch)) {
+ if (!HasLineTerminatorBeforeNext() && unibrow::IsLineTerminator(c0_)) {
// Following ECMA-262, section 7.4, a comment containing
// a newline will make the comment count as a line-terminator.
- has_multiline_comment_before_next_ = true;
+ next().after_line_terminator = true;
}
- // If we have reached the end of the multi-line comment, we
- // consume the '/' and insert a whitespace. This way all
- // multi-line comments are treated as whitespace.
- if (ch == '*' && c0_ == '/') {
- c0_ = ' ';
- return Token::WHITESPACE;
+
+ while (V8_UNLIKELY(c0_ == '*')) {
+ Advance();
+ if (c0_ == '/') {
+ Advance();
+ return Token::WHITESPACE;
+ }
}
+ Advance();
}
// Unterminated multi-line comment.
@@ -586,25 +524,20 @@ Token::Value Scanner::ScanHtmlComment() {
// Check for <!-- comments.
DCHECK_EQ(c0_, '!');
Advance();
- if (c0_ != '-') {
+ if (c0_ != '-' || Peek() != '-') {
PushBack('!'); // undo Advance()
return Token::LT;
}
-
Advance();
- if (c0_ != '-') {
- PushBack2('-', '!'); // undo 2x Advance()
- return Token::LT;
- }
found_html_comment_ = true;
return SkipSingleHTMLComment();
}
void Scanner::Scan() {
- next_.literal_chars = nullptr;
- next_.raw_literal_chars = nullptr;
- next_.invalid_template_escape_message = MessageTemplate::kNone;
+ next().literal_chars.Drop();
+ next().raw_literal_chars.Drop();
+ next().invalid_template_escape_message = MessageTemplate::kNone;
Token::Value token;
do {
@@ -612,17 +545,17 @@ void Scanner::Scan() {
Token::Value token = static_cast<Token::Value>(one_char_tokens[c0_]);
if (token != Token::ILLEGAL) {
int pos = source_pos();
- next_.token = token;
- next_.contextual_token = Token::UNINITIALIZED;
- next_.location.beg_pos = pos;
- next_.location.end_pos = pos + 1;
+ next().token = token;
+ next().contextual_token = Token::UNINITIALIZED;
+ next().location.beg_pos = pos;
+ next().location.end_pos = pos + 1;
Advance();
return;
}
}
// Remember the position of the next token
- next_.location.beg_pos = source_pos();
+ next().location.beg_pos = source_pos();
switch (c0_) {
case '"':
@@ -703,7 +636,7 @@ void Scanner::Scan() {
Advance();
if (c0_ == '-') {
Advance();
- if (c0_ == '>' && HasAnyLineTerminatorBeforeNext()) {
+ if (c0_ == '>' && HasLineTerminatorBeforeNext()) {
// For compatibility with SpiderMonkey, we skip lines that
// start with an HTML comment end '-->'.
token = SkipSingleHTMLComment();
@@ -738,12 +671,12 @@ void Scanner::Scan() {
// / // /* /=
Advance();
if (c0_ == '/') {
- Advance();
- if (c0_ == '#' || c0_ == '@') {
+ uc32 c = Peek();
+ if (c == '#' || c == '@') {
+ Advance();
Advance();
token = SkipSourceURLComment();
} else {
- PushBack(c0_);
token = SkipSingleLineComment();
}
} else if (c0_ == '*') {
@@ -792,12 +725,10 @@ void Scanner::Scan() {
} else {
token = Token::PERIOD;
if (c0_ == '.') {
- Advance();
- if (c0_ == '.') {
+ if (Peek() == '.') {
+ Advance();
Advance();
token = Token::ELLIPSIS;
- } else {
- PushBack('.');
}
}
}
@@ -831,19 +762,19 @@ void Scanner::Scan() {
// whitespace.
} while (token == Token::WHITESPACE);
- next_.location.end_pos = source_pos();
+ next().location.end_pos = source_pos();
if (Token::IsContextualKeyword(token)) {
- next_.token = Token::IDENTIFIER;
- next_.contextual_token = token;
+ next().token = Token::IDENTIFIER;
+ next().contextual_token = token;
} else {
- next_.token = token;
- next_.contextual_token = Token::UNINITIALIZED;
+ next().token = token;
+ next().contextual_token = Token::UNINITIALIZED;
}
#ifdef DEBUG
- SanityCheckTokenDesc(current_);
- SanityCheckTokenDesc(next_);
- SanityCheckTokenDesc(next_next_);
+ SanityCheckTokenDesc(current());
+ SanityCheckTokenDesc(next());
+ SanityCheckTokenDesc(next_next());
#endif
}
@@ -864,8 +795,8 @@ void Scanner::SanityCheckTokenDesc(const TokenDesc& token) const {
break;
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL:
- DCHECK_NOT_NULL(token.raw_literal_chars);
- DCHECK_NOT_NULL(token.literal_chars);
+ DCHECK(token.raw_literal_chars.is_used());
+ DCHECK(token.literal_chars.is_used());
break;
case Token::ESCAPED_KEYWORD:
case Token::ESCAPED_STRICT_RESERVED_WORD:
@@ -877,13 +808,13 @@ void Scanner::SanityCheckTokenDesc(const TokenDesc& token) const {
case Token::SMI:
case Token::STRING:
case Token::PRIVATE_NAME:
- DCHECK_NOT_NULL(token.literal_chars);
- DCHECK_NULL(token.raw_literal_chars);
+ DCHECK(token.literal_chars.is_used());
+ DCHECK(!token.raw_literal_chars.is_used());
DCHECK_EQ(token.invalid_template_escape_message, MessageTemplate::kNone);
break;
default:
- DCHECK_NULL(token.literal_chars);
- DCHECK_NULL(token.raw_literal_chars);
+ DCHECK(!token.literal_chars.is_used());
+ DCHECK(!token.raw_literal_chars.is_used());
DCHECK_EQ(token.invalid_template_escape_message, MessageTemplate::kNone);
break;
}
@@ -900,9 +831,9 @@ void Scanner::SanityCheckTokenDesc(const TokenDesc& token) const {
void Scanner::SeekForward(int pos) {
// After this call, we will have the token at the given position as
// the "next" token. The "current" token will be invalid.
- if (pos == next_.location.beg_pos) return;
+ if (pos == next().location.beg_pos) return;
int current_pos = source_pos();
- DCHECK_EQ(next_.location.end_pos, current_pos);
+ DCHECK_EQ(next().location.end_pos, current_pos);
// Positions inside the lookahead token aren't supported.
DCHECK(pos >= current_pos);
if (pos != current_pos) {
@@ -911,23 +842,21 @@ void Scanner::SeekForward(int pos) {
// This function is only called to seek to the location
// of the end of a function (at the "}" token). It doesn't matter
// whether there was a line terminator in the part we skip.
- has_line_terminator_before_next_ = false;
- has_multiline_comment_before_next_ = false;
+ next().after_line_terminator = false;
}
Scan();
}
-
-template <bool capture_raw, bool in_template_literal>
+template <bool capture_raw>
bool Scanner::ScanEscape() {
uc32 c = c0_;
Advance<capture_raw>();
// Skip escaped newlines.
DCHECK(!unibrow::IsLineTerminator(kEndOfInput));
- if (!in_template_literal && unibrow::IsLineTerminator(c)) {
+ if (!capture_raw && unibrow::IsLineTerminator(c)) {
// Allow escaped CR+LF newlines in multiline string literals.
- if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance<capture_raw>();
+ if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance();
return true;
}
@@ -961,7 +890,7 @@ bool Scanner::ScanEscape() {
case '5': // fall through
case '6': // fall through
case '7':
- c = ScanOctalEscape<capture_raw>(c, 2, in_template_literal);
+ c = ScanOctalEscape<capture_raw>(c, 2);
break;
}
@@ -971,7 +900,7 @@ bool Scanner::ScanEscape() {
}
template <bool capture_raw>
-uc32 Scanner::ScanOctalEscape(uc32 c, int length, bool in_template_literal) {
+uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
uc32 x = c - '0';
int i = 0;
for (; i < length; i++) {
@@ -989,14 +918,12 @@ uc32 Scanner::ScanOctalEscape(uc32 c, int length, bool in_template_literal) {
// occur before the "use strict" directive.
if (c != '0' || i > 0 || c0_ == '8' || c0_ == '9') {
octal_pos_ = Location(source_pos() - i - 1, source_pos() - 1);
- octal_message_ = in_template_literal
- ? MessageTemplate::kTemplateOctalLiteral
- : MessageTemplate::kStrictOctalEscape;
+ octal_message_ = capture_raw ? MessageTemplate::kTemplateOctalLiteral
+ : MessageTemplate::kStrictOctalEscape;
}
return x;
}
-
Token::Value Scanner::ScanString() {
uc32 quote = c0_;
Advance(); // consume quote
@@ -1014,7 +941,7 @@ Token::Value Scanner::ScanString() {
if (c0_ == '\\') {
Advance();
// TODO(verwaest): Check whether we can remove the additional check.
- if (c0_ == kEndOfInput || !ScanEscape<false, false>()) {
+ if (c0_ == kEndOfInput || !ScanEscape<false>()) {
return Token::ILLEGAL;
}
continue;
@@ -1032,15 +959,14 @@ Token::Value Scanner::ScanPrivateName() {
LiteralScope literal(this);
DCHECK_EQ(c0_, '#');
- AddLiteralCharAdvance();
DCHECK(!unicode_cache_->IsIdentifierStart(kEndOfInput));
- if (!unicode_cache_->IsIdentifierStart(c0_)) {
- PushBack(c0_);
+ if (!unicode_cache_->IsIdentifierStart(Peek())) {
ReportScannerError(source_pos(),
MessageTemplate::kInvalidOrUnexpectedToken);
return Token::ILLEGAL;
}
+ AddLiteralCharAdvance();
Token::Value token = ScanIdentifierOrKeywordInner(&literal);
return token == Token::ILLEGAL ? Token::ILLEGAL : Token::PRIVATE_NAME;
}
@@ -1069,89 +995,87 @@ Token::Value Scanner::ScanTemplateSpan() {
LiteralScope literal(this);
StartRawLiteral();
const bool capture_raw = true;
- const bool in_template_literal = true;
while (true) {
uc32 c = c0_;
- Advance<capture_raw>();
if (c == '`') {
+ Advance(); // Consume '`'
result = Token::TEMPLATE_TAIL;
- ReduceRawLiteralLength(1);
break;
- } else if (c == '$' && c0_ == '{') {
- Advance<capture_raw>(); // Consume '{'
- ReduceRawLiteralLength(2);
+ } else if (c == '$' && Peek() == '{') {
+ Advance(); // Consume '$'
+ Advance(); // Consume '{'
break;
} else if (c == '\\') {
+ Advance(); // Consume '\\'
DCHECK(!unibrow::IsLineTerminator(kEndOfInput));
+ if (capture_raw) AddRawLiteralChar('\\');
if (unibrow::IsLineTerminator(c0_)) {
// The TV of LineContinuation :: \ LineTerminatorSequence is the empty
// code unit sequence.
uc32 lastChar = c0_;
- Advance<capture_raw>();
+ Advance();
if (lastChar == '\r') {
- ReduceRawLiteralLength(1); // Remove \r
- if (c0_ == '\n') {
- Advance<capture_raw>(); // Adds \n
- } else {
- AddRawLiteralChar('\n');
- }
+ // Also skip \n.
+ if (c0_ == '\n') Advance();
+ lastChar = '\n';
}
+ if (capture_raw) AddRawLiteralChar(lastChar);
} else {
- bool success = ScanEscape<capture_raw, in_template_literal>();
+ bool success = ScanEscape<capture_raw>();
USE(success);
DCHECK_EQ(!success, has_error());
// For templates, invalid escape sequence checking is handled in the
// parser.
- scanner_error_state.MoveErrorTo(&next_);
- octal_error_state.MoveErrorTo(&next_);
+ scanner_error_state.MoveErrorTo(next_);
+ octal_error_state.MoveErrorTo(next_);
}
} else if (c < 0) {
// Unterminated template literal
- PushBack(c);
break;
} else {
+ Advance(); // Consume c.
// The TRV of LineTerminatorSequence :: <CR> is the CV 0x000A.
// The TRV of LineTerminatorSequence :: <CR><LF> is the sequence
// consisting of the CV 0x000A.
if (c == '\r') {
- ReduceRawLiteralLength(1); // Remove \r
- if (c0_ == '\n') {
- Advance<capture_raw>(); // Adds \n
- } else {
- AddRawLiteralChar('\n');
- }
+ if (c0_ == '\n') Advance(); // Consume '\n'
c = '\n';
}
+ if (capture_raw) AddRawLiteralChar(c);
AddLiteralChar(c);
}
}
literal.Complete();
- next_.location.end_pos = source_pos();
- next_.token = result;
- next_.contextual_token = Token::UNINITIALIZED;
+ next().location.end_pos = source_pos();
+ next().token = result;
+ next().contextual_token = Token::UNINITIALIZED;
return result;
}
-
Token::Value Scanner::ScanTemplateStart() {
- DCHECK_EQ(next_next_.token, Token::UNINITIALIZED);
+ DCHECK_EQ(next_next().token, Token::UNINITIALIZED);
DCHECK_EQ(c0_, '`');
- next_.location.beg_pos = source_pos();
+ next().location.beg_pos = source_pos();
Advance(); // Consume `
return ScanTemplateSpan();
}
Handle<String> Scanner::SourceUrl(Isolate* isolate) const {
Handle<String> tmp;
- if (source_url_.length() > 0) tmp = source_url_.Internalize(isolate);
+ if (source_url_.length() > 0) {
+ DCHECK(source_url_.is_used());
+ tmp = source_url_.Internalize(isolate);
+ }
return tmp;
}
Handle<String> Scanner::SourceMappingUrl(Isolate* isolate) const {
Handle<String> tmp;
- if (source_mapping_url_.length() > 0)
+ if (source_mapping_url_.length() > 0) {
+ DCHECK(source_mapping_url_.is_used());
tmp = source_mapping_url_.Internalize(isolate);
+ }
return tmp;
}
@@ -1375,10 +1299,10 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
return Token::ILLEGAL;
}
- if (next_.literal_chars->one_byte_literal().length() <= 10 &&
+ if (next().literal_chars.one_byte_literal().length() <= 10 &&
value <= Smi::kMaxValue && c0_ != '.' &&
!unicode_cache_->IsIdentifierStart(c0_)) {
- next_.smi_value_ = static_cast<uint32_t>(value);
+ next().smi_value_ = static_cast<uint32_t>(value);
literal.Complete();
if (kind == DECIMAL_WITH_LEADING_ZERO) {
@@ -1448,7 +1372,6 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
return is_bigint ? Token::BIGINT : Token::NUMBER;
}
-
uc32 Scanner::ScanIdentifierUnicodeEscape() {
Advance();
if (c0_ != 'u') return -1;
@@ -1456,7 +1379,6 @@ uc32 Scanner::ScanIdentifierUnicodeEscape() {
return ScanUnicodeEscape<false>();
}
-
template <bool capture_raw>
uc32 Scanner::ScanUnicodeEscape() {
// Accept both \uxxxx and \u{xxxxxx}. In the latter case, the number of
@@ -1622,13 +1544,15 @@ Token::Value Scanner::ScanIdentifierOrKeywordInner(LiteralScope* literal) {
bool escaped = false;
if (IsInRange(c0_, 'a', 'z') || c0_ == '_') {
do {
- AddLiteralCharAdvance();
+ AddLiteralChar(static_cast<char>(c0_));
+ Advance();
} while (IsInRange(c0_, 'a', 'z') || c0_ == '_');
if (IsDecimalDigit(c0_) || IsInRange(c0_, 'A', 'Z') || c0_ == '$') {
// Identifier starting with lowercase or _.
do {
- AddLiteralCharAdvance();
+ AddLiteralChar(static_cast<char>(c0_));
+ Advance();
} while (IsAsciiIdentifier(c0_));
if (c0_ <= kMaxAscii && c0_ != '\\') {
@@ -1637,7 +1561,7 @@ Token::Value Scanner::ScanIdentifierOrKeywordInner(LiteralScope* literal) {
}
} else if (c0_ <= kMaxAscii && c0_ != '\\') {
// Only a-z+ or _: could be a keyword or identifier.
- Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
+ Vector<const uint8_t> chars = next().literal_chars.one_byte_literal();
Token::Value token =
KeywordOrIdentifierToken(chars.start(), chars.length());
if (token == Token::IDENTIFIER ||
@@ -1648,7 +1572,8 @@ Token::Value Scanner::ScanIdentifierOrKeywordInner(LiteralScope* literal) {
}
} else if (IsInRange(c0_, 'A', 'Z') || c0_ == '$') {
do {
- AddLiteralCharAdvance();
+ AddLiteralChar(static_cast<char>(c0_));
+ Advance();
} while (IsAsciiIdentifier(c0_));
if (c0_ <= kMaxAscii && c0_ != '\\') {
@@ -1686,8 +1611,8 @@ Token::Value Scanner::ScanIdentifierOrKeywordInner(LiteralScope* literal) {
}
}
- if (next_.literal_chars->is_one_byte()) {
- Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
+ if (next().literal_chars.is_one_byte()) {
+ Vector<const uint8_t> chars = next().literal_chars.one_byte_literal();
Token::Value token =
KeywordOrIdentifierToken(chars.start(), chars.length());
/* TODO(adamk): YIELD should be handled specially. */
@@ -1715,17 +1640,17 @@ Token::Value Scanner::ScanIdentifierOrKeywordInner(LiteralScope* literal) {
}
bool Scanner::ScanRegExpPattern() {
- DCHECK(next_next_.token == Token::UNINITIALIZED);
- DCHECK(next_.token == Token::DIV || next_.token == Token::ASSIGN_DIV);
+ DCHECK_EQ(Token::UNINITIALIZED, next_next().token);
+ DCHECK(next().token == Token::DIV || next().token == Token::ASSIGN_DIV);
// Scan: ('/' | '/=') RegularExpressionBody '/' RegularExpressionFlags
bool in_character_class = false;
- bool seen_equal = (next_.token == Token::ASSIGN_DIV);
+ bool seen_equal = (next().token == Token::ASSIGN_DIV);
// Previous token is either '/' or '/=', in the second case, the
// pattern starts at =.
- next_.location.beg_pos = source_pos() - (seen_equal ? 2 : 1);
- next_.location.end_pos = source_pos() - (seen_equal ? 1 : 0);
+ next().location.beg_pos = source_pos() - (seen_equal ? 2 : 1);
+ next().location.end_pos = source_pos() - (seen_equal ? 1 : 0);
// Scan regular expression body: According to ECMA-262, 3rd, 7.8.5,
// the scanner should pass uninterpreted bodies to the RegExp
@@ -1764,14 +1689,14 @@ bool Scanner::ScanRegExpPattern() {
Advance(); // consume '/'
literal.Complete();
- next_.token = Token::REGEXP_LITERAL;
- next_.contextual_token = Token::UNINITIALIZED;
+ next().token = Token::REGEXP_LITERAL;
+ next().contextual_token = Token::UNINITIALIZED;
return true;
}
Maybe<RegExp::Flags> Scanner::ScanRegExpFlags() {
- DCHECK(next_.token == Token::REGEXP_LITERAL);
+ DCHECK_EQ(Token::REGEXP_LITERAL, next().token);
// Scan regular expression flags.
int flags = 0;
@@ -1806,7 +1731,7 @@ Maybe<RegExp::Flags> Scanner::ScanRegExpFlags() {
flags |= flag;
}
- next_.location.end_pos = source_pos();
+ next().location.end_pos = source_pos();
return Just(RegExp::Flags(flags));
}
@@ -1869,24 +1794,17 @@ void Scanner::SeekNext(size_t position) {
// 1, Reset the current_, next_ and next_next_ tokens
// (next_ + next_next_ will be overwrittem by Next(),
// current_ will remain unchanged, so overwrite it fully.)
- current_ = {{0, 0},
- nullptr,
- nullptr,
- 0,
- Token::UNINITIALIZED,
- MessageTemplate::kNone,
- {0, 0},
- Token::UNINITIALIZED};
- next_.token = Token::UNINITIALIZED;
- next_.contextual_token = Token::UNINITIALIZED;
- next_next_.token = Token::UNINITIALIZED;
- next_next_.contextual_token = Token::UNINITIALIZED;
+ for (TokenDesc& token : token_storage_) {
+ token.token = Token::UNINITIALIZED;
+ token.contextual_token = Token::UNINITIALIZED;
+ }
// 2, reset the source to the desired position,
source_->Seek(position);
// 3, re-scan, by scanning the look-ahead char + 1 token (next_).
c0_ = source_->Advance();
- Next();
- DCHECK_EQ(next_.location.beg_pos, static_cast<int>(position));
+ next().after_line_terminator = false;
+ Scan();
+ DCHECK_EQ(next().location.beg_pos, static_cast<int>(position));
}
} // namespace internal
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 34da5fafbf..e592debd8e 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -7,6 +7,8 @@
#ifndef V8_PARSING_SCANNER_H_
#define V8_PARSING_SCANNER_H_
+#include <algorithm>
+
#include "src/allocation.h"
#include "src/base/logging.h"
#include "src/char-predicates.h"
@@ -36,25 +38,51 @@ class Utf16CharacterStream {
public:
static const uc32 kEndOfInput = -1;
- virtual ~Utf16CharacterStream() { }
+ virtual ~Utf16CharacterStream() {}
- // Returns and advances past the next UTF-16 code unit in the input
- // stream. If there are no more code units it returns kEndOfInput.
- inline uc32 Advance() {
+ inline uc32 Peek() {
if (V8_LIKELY(buffer_cursor_ < buffer_end_)) {
- return static_cast<uc32>(*(buffer_cursor_++));
+ return static_cast<uc32>(*buffer_cursor_);
} else if (ReadBlockChecked()) {
- return static_cast<uc32>(*(buffer_cursor_++));
+ return static_cast<uc32>(*buffer_cursor_);
} else {
- // Note: currently the following increment is necessary to avoid a
- // parser problem! The scanner treats the final kEndOfInput as
- // a code unit with a position, and does math relative to that
- // position.
- buffer_cursor_++;
return kEndOfInput;
}
}
+ // Returns and advances past the next UTF-16 code unit in the input
+ // stream. If there are no more code units it returns kEndOfInput.
+ inline uc32 Advance() {
+ uc32 result = Peek();
+ buffer_cursor_++;
+ return result;
+ }
+
+ // Returns and advances past the next UTF-16 code unit in the input stream
+ // that meets the checks requirement. If there are no more code units it
+ // returns kEndOfInput.
+ template <typename FunctionType>
+ V8_INLINE uc32 AdvanceUntil(FunctionType check) {
+ while (true) {
+ auto next_cursor_pos =
+ std::find_if(buffer_cursor_, buffer_end_, [&check](uint16_t raw_c0_) {
+ uc32 c0_ = static_cast<uc32>(raw_c0_);
+ return check(c0_);
+ });
+
+ if (next_cursor_pos == buffer_end_) {
+ buffer_cursor_ = buffer_end_;
+ if (!ReadBlockChecked()) {
+ buffer_cursor_++;
+ return kEndOfInput;
+ }
+ } else {
+ buffer_cursor_ = next_cursor_pos + 1;
+ return static_cast<uc32>(*next_cursor_pos);
+ }
+ }
+ }
+
// Go back one by one character in the input stream.
// This undoes the most recent Advance().
inline void Back() {
@@ -68,17 +96,6 @@ class Utf16CharacterStream {
}
}
- // Go back one by two characters in the input stream. (This is the same as
- // calling Back() twice. But Back() may - in some instances - do substantial
- // work. Back2() guarantees this work will be done only once.)
- inline void Back2() {
- if (V8_LIKELY(buffer_cursor_ - 2 >= buffer_start_)) {
- buffer_cursor_ -= 2;
- } else {
- ReadBlockAt(pos() - 2);
- }
- }
-
inline size_t pos() const {
return buffer_pos_ + (buffer_cursor_ - buffer_start_);
}
@@ -157,7 +174,6 @@ class Utf16CharacterStream {
size_t buffer_pos_;
};
-
// ----------------------------------------------------------------------------
// JavaScript Scanner.
@@ -207,23 +223,24 @@ class Scanner {
static const int kNoOctalLocation = -1;
static const uc32 kEndOfInput = Utf16CharacterStream::kEndOfInput;
- explicit Scanner(UnicodeCache* scanner_contants);
+ explicit Scanner(UnicodeCache* scanner_contants, Utf16CharacterStream* source,
+ bool is_module);
- void Initialize(Utf16CharacterStream* source, bool is_module);
+ void Initialize();
// Returns the next token and advances input.
Token::Value Next();
// Returns the token following peek()
Token::Value PeekAhead();
// Returns the current token again.
- Token::Value current_token() { return current_.token; }
+ Token::Value current_token() { return current().token; }
- Token::Value current_contextual_token() { return current_.contextual_token; }
- Token::Value next_contextual_token() { return next_.contextual_token; }
+ Token::Value current_contextual_token() { return current().contextual_token; }
+ Token::Value next_contextual_token() { return next().contextual_token; }
// Returns the location information for the current token
// (the token last returned by Next()).
- Location location() const { return current_.location; }
+ Location location() const { return current().location; }
// This error is specifically an invalid hex or unicode escape sequence.
bool has_error() const { return scanner_error_ != MessageTemplate::kNone; }
@@ -231,26 +248,26 @@ class Scanner {
Location error_location() const { return scanner_error_location_; }
bool has_invalid_template_escape() const {
- return current_.invalid_template_escape_message != MessageTemplate::kNone;
+ return current().invalid_template_escape_message != MessageTemplate::kNone;
}
MessageTemplate::Template invalid_template_escape_message() const {
DCHECK(has_invalid_template_escape());
- return current_.invalid_template_escape_message;
+ return current().invalid_template_escape_message;
}
Location invalid_template_escape_location() const {
DCHECK(has_invalid_template_escape());
- return current_.invalid_template_escape_location;
+ return current().invalid_template_escape_location;
}
// Similar functions for the upcoming token.
// One token look-ahead (past the token returned by Next()).
- Token::Value peek() const { return next_.token; }
+ Token::Value peek() const { return next().token; }
- Location peek_location() const { return next_.location; }
+ Location peek_location() const { return next().location; }
bool literal_contains_escapes() const {
- return LiteralContainsEscapes(current_);
+ return LiteralContainsEscapes(current());
}
const AstRawString* CurrentSymbol(AstValueFactory* ast_value_factory) const;
@@ -264,12 +281,12 @@ class Scanner {
inline bool CurrentMatches(Token::Value token) const {
DCHECK(Token::IsKeyword(token));
- return current_.token == token;
+ return current().token == token;
}
inline bool CurrentMatchesContextual(Token::Value token) const {
DCHECK(Token::IsContextualKeyword(token));
- return current_.contextual_token == token;
+ return current().contextual_token == token;
}
// Match the token against the contextual keyword or literal buffer.
@@ -278,17 +295,17 @@ class Scanner {
// Escaped keywords are not matched as tokens. So if we require escape
// and/or string processing we need to look at the literal content
// (which was escape-processed already).
- // Conveniently, current_.literal_chars == nullptr for all proper keywords,
- // so this second condition should exit early in common cases.
- return (current_.contextual_token == token) ||
- (current_.literal_chars &&
- current_.literal_chars->Equals(Vector<const char>(
+ // Conveniently, !current().literal_chars.is_used() for all proper
+ // keywords, so this second condition should exit early in common cases.
+ return (current().contextual_token == token) ||
+ (current().literal_chars.is_used() &&
+ current().literal_chars.Equals(Vector<const char>(
Token::String(token), Token::StringLength(token))));
}
bool IsUseStrict() const {
- return current_.token == Token::STRING &&
- current_.literal_chars->Equals(
+ return current().token == Token::STRING &&
+ current().literal_chars.Equals(
Vector<const char>("use strict", strlen("use strict")));
}
bool IsGetOrSet(bool* is_get, bool* is_set) const {
@@ -318,7 +335,7 @@ class Scanner {
MessageTemplate::Template octal_message() const { return octal_message_; }
// Returns the value of the last smi that was scanned.
- uint32_t smi_value() const { return current_.smi_value_; }
+ uint32_t smi_value() const { return current().smi_value_; }
// Seek forward to the given position. This operation does not
// work in general, for instance when there are pushed back
@@ -328,15 +345,14 @@ class Scanner {
// Returns true if there was a line terminator before the peek'ed token,
// possibly inside a multi-line comment.
- bool HasAnyLineTerminatorBeforeNext() const {
- return has_line_terminator_before_next_ ||
- has_multiline_comment_before_next_;
+ bool HasLineTerminatorBeforeNext() const {
+ return next().after_line_terminator;
}
- bool HasAnyLineTerminatorAfterNext() {
+ bool HasLineTerminatorAfterNext() {
Token::Value ensure_next_next = PeekAhead();
USE(ensure_next_next);
- return has_line_terminator_after_next_;
+ return next_next().after_line_terminator;
}
// Scans the input as a regular expression pattern, next token must be /(=).
@@ -348,8 +364,8 @@ class Scanner {
// Scans the input as a template literal
Token::Value ScanTemplateStart();
Token::Value ScanTemplateContinuation() {
- DCHECK_EQ(next_.token, Token::RBRACE);
- next_.location.beg_pos = source_pos() - 1; // We already consumed }
+ DCHECK_EQ(next().token, Token::RBRACE);
+ next().location.beg_pos = source_pos() - 1; // We already consumed }
return ScanTemplateSpan();
}
@@ -399,33 +415,40 @@ class Scanner {
// LiteralBuffer - Collector of chars of literals.
class LiteralBuffer {
public:
- LiteralBuffer() : is_one_byte_(true), position_(0), backing_store_() {}
+ LiteralBuffer()
+ : position_(0), is_one_byte_(true), is_used_(false), backing_store_() {}
~LiteralBuffer() { backing_store_.Dispose(); }
V8_INLINE void AddChar(char code_unit) {
+ DCHECK(is_used_);
DCHECK(IsValidAscii(code_unit));
AddOneByteChar(static_cast<byte>(code_unit));
}
V8_INLINE void AddChar(uc32 code_unit) {
- if (is_one_byte_ &&
- code_unit <= static_cast<uc32>(unibrow::Latin1::kMaxChar)) {
- AddOneByteChar(static_cast<byte>(code_unit));
- } else {
- AddCharSlow(code_unit);
+ DCHECK(is_used_);
+ if (is_one_byte_) {
+ if (code_unit <= static_cast<uc32>(unibrow::Latin1::kMaxChar)) {
+ AddOneByteChar(static_cast<byte>(code_unit));
+ return;
+ }
+ ConvertToTwoByte();
}
+ AddTwoByteChar(code_unit);
}
bool is_one_byte() const { return is_one_byte_; }
bool Equals(Vector<const char> keyword) const {
+ DCHECK(is_used_);
return is_one_byte() && keyword.length() == position_ &&
(memcmp(keyword.start(), backing_store_.start(), position_) == 0);
}
Vector<const uint16_t> two_byte_literal() const {
DCHECK(!is_one_byte_);
+ DCHECK(is_used_);
DCHECK_EQ(position_ & 0x1, 0);
return Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(backing_store_.start()),
@@ -434,17 +457,23 @@ class Scanner {
Vector<const uint8_t> one_byte_literal() const {
DCHECK(is_one_byte_);
+ DCHECK(is_used_);
return Vector<const uint8_t>(
reinterpret_cast<const uint8_t*>(backing_store_.start()), position_);
}
int length() const { return is_one_byte_ ? position_ : (position_ >> 1); }
- void ReduceLength(int delta) {
- position_ -= delta * (is_one_byte_ ? kOneByteSize : kUC16Size);
+ void Start() {
+ DCHECK(!is_used_);
+ DCHECK_EQ(0, position_);
+ is_used_ = true;
}
- void Reset() {
+ bool is_used() const { return is_used_; }
+
+ void Drop() {
+ is_used_ = false;
position_ = 0;
is_one_byte_ = true;
}
@@ -472,13 +501,14 @@ class Scanner {
position_ += kOneByteSize;
}
- void AddCharSlow(uc32 code_unit);
+ void AddTwoByteChar(uc32 code_unit);
int NewCapacity(int min_capacity);
void ExpandBuffer();
void ConvertToTwoByte();
- bool is_one_byte_;
int position_;
+ bool is_one_byte_;
+ bool is_used_;
Vector<byte> backing_store_;
DISALLOW_COPY_AND_ASSIGN(LiteralBuffer);
@@ -486,14 +516,16 @@ class Scanner {
// The current and look-ahead token.
struct TokenDesc {
- Location location;
- LiteralBuffer* literal_chars;
- LiteralBuffer* raw_literal_chars;
- uint32_t smi_value_;
- Token::Value token;
- MessageTemplate::Template invalid_template_escape_message;
+ Location location = {0, 0};
+ LiteralBuffer literal_chars;
+ LiteralBuffer raw_literal_chars;
+ Token::Value token = Token::UNINITIALIZED;
+ MessageTemplate::Template invalid_template_escape_message =
+ MessageTemplate::kNone;
Location invalid_template_escape_location;
- Token::Value contextual_token;
+ Token::Value contextual_token = Token::UNINITIALIZED;
+ uint32_t smi_value_ = 0;
+ bool after_line_terminator = false;
};
enum NumberKind {
@@ -510,29 +542,18 @@ class Scanner {
// Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
template <bool capture_raw>
- uc32 ScanOctalEscape(uc32 c, int length, bool in_template_literal);
+ uc32 ScanOctalEscape(uc32 c, int length);
// Call this after setting source_ to the input.
void Init() {
// Set c0_ (one character ahead)
STATIC_ASSERT(kCharacterLookaheadBufferSize == 1);
Advance();
- // Initialize current_ to not refer to a literal.
- current_.token = Token::UNINITIALIZED;
- current_.contextual_token = Token::UNINITIALIZED;
- current_.literal_chars = nullptr;
- current_.raw_literal_chars = nullptr;
- current_.invalid_template_escape_message = MessageTemplate::kNone;
- next_.token = Token::UNINITIALIZED;
- next_.contextual_token = Token::UNINITIALIZED;
- next_.literal_chars = nullptr;
- next_.raw_literal_chars = nullptr;
- next_.invalid_template_escape_message = MessageTemplate::kNone;
- next_next_.token = Token::UNINITIALIZED;
- next_next_.contextual_token = Token::UNINITIALIZED;
- next_next_.literal_chars = nullptr;
- next_next_.raw_literal_chars = nullptr;
- next_next_.invalid_template_escape_message = MessageTemplate::kNone;
+
+ current_ = &token_storage_[0];
+ next_ = &token_storage_[1];
+ next_next_ = &token_storage_[2];
+
found_html_comment_ = false;
scanner_error_ = MessageTemplate::kNone;
}
@@ -554,52 +575,23 @@ class Scanner {
void SeekNext(size_t position);
// Literal buffer support
- inline void StartLiteral() {
- LiteralBuffer* free_buffer =
- (current_.literal_chars == &literal_buffer0_)
- ? &literal_buffer1_
- : (current_.literal_chars == &literal_buffer1_) ? &literal_buffer2_
- : &literal_buffer0_;
- free_buffer->Reset();
- next_.literal_chars = free_buffer;
- }
+ inline void StartLiteral() { next().literal_chars.Start(); }
- inline void StartRawLiteral() {
- LiteralBuffer* free_buffer =
- (current_.raw_literal_chars == &raw_literal_buffer0_)
- ? &raw_literal_buffer1_
- : (current_.raw_literal_chars == &raw_literal_buffer1_)
- ? &raw_literal_buffer2_
- : &raw_literal_buffer0_;
- free_buffer->Reset();
- next_.raw_literal_chars = free_buffer;
- }
+ inline void StartRawLiteral() { next().raw_literal_chars.Start(); }
- V8_INLINE void AddLiteralChar(uc32 c) {
- DCHECK_NOT_NULL(next_.literal_chars);
- next_.literal_chars->AddChar(c);
- }
+ V8_INLINE void AddLiteralChar(uc32 c) { next().literal_chars.AddChar(c); }
- V8_INLINE void AddLiteralChar(char c) {
- DCHECK_NOT_NULL(next_.literal_chars);
- next_.literal_chars->AddChar(c);
- }
+ V8_INLINE void AddLiteralChar(char c) { next().literal_chars.AddChar(c); }
V8_INLINE void AddRawLiteralChar(uc32 c) {
- DCHECK_NOT_NULL(next_.raw_literal_chars);
- next_.raw_literal_chars->AddChar(c);
- }
-
- V8_INLINE void ReduceRawLiteralLength(int delta) {
- DCHECK_NOT_NULL(next_.raw_literal_chars);
- next_.raw_literal_chars->ReduceLength(delta);
+ next().raw_literal_chars.AddChar(c);
}
// Stops scanning of a literal and drop the collected characters,
// e.g., due to an encountered error.
inline void DropLiteral() {
- next_.literal_chars = nullptr;
- next_.raw_literal_chars = nullptr;
+ next().literal_chars.Drop();
+ next().raw_literal_chars.Drop();
}
inline void AddLiteralCharAdvance() {
@@ -616,6 +608,11 @@ class Scanner {
c0_ = source_->Advance();
}
+ template <typename FunctionType>
+ V8_INLINE void AdvanceUntil(FunctionType check) {
+ c0_ = source_->AdvanceUntil(check);
+ }
+
bool CombineSurrogatePair() {
DCHECK(!unibrow::Utf16::IsLeadSurrogate(kEndOfInput));
if (unibrow::Utf16::IsLeadSurrogate(c0_)) {
@@ -631,22 +628,12 @@ class Scanner {
}
void PushBack(uc32 ch) {
- if (c0_ > static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
- source_->Back2();
- } else {
- source_->Back();
- }
+ DCHECK_LE(c0_, static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode));
+ source_->Back();
c0_ = ch;
}
- // Same as PushBack(ch1); PushBack(ch2).
- // - Potentially more efficient as it uses Back2() on the stream.
- // - Uses char as parameters, since we're only calling it with ASCII chars in
- // practice. This way, we can avoid a few edge cases.
- void PushBack2(char ch1, char ch2) {
- source_->Back2();
- c0_ = ch2;
- }
+ uc32 Peek() const { return source_->Peek(); }
inline Token::Value Select(Token::Value tok) {
Advance();
@@ -676,45 +663,46 @@ class Scanner {
// token as a one-byte literal. E.g. Token::FUNCTION pretends to have a
// literal "function".
Vector<const uint8_t> literal_one_byte_string() const {
- if (current_.literal_chars)
- return current_.literal_chars->one_byte_literal();
- const char* str = Token::String(current_.token);
+ if (current().literal_chars.is_used())
+ return current().literal_chars.one_byte_literal();
+ const char* str = Token::String(current().token);
const uint8_t* str_as_uint8 = reinterpret_cast<const uint8_t*>(str);
return Vector<const uint8_t>(str_as_uint8,
- Token::StringLength(current_.token));
+ Token::StringLength(current().token));
}
Vector<const uint16_t> literal_two_byte_string() const {
- DCHECK_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->two_byte_literal();
+ DCHECK(current().literal_chars.is_used());
+ return current().literal_chars.two_byte_literal();
}
bool is_literal_one_byte() const {
- return !current_.literal_chars || current_.literal_chars->is_one_byte();
+ return !current().literal_chars.is_used() ||
+ current().literal_chars.is_one_byte();
}
// Returns the literal string for the next token (the token that
// would be returned if Next() were called).
Vector<const uint8_t> next_literal_one_byte_string() const {
- DCHECK_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->one_byte_literal();
+ DCHECK(next().literal_chars.is_used());
+ return next().literal_chars.one_byte_literal();
}
Vector<const uint16_t> next_literal_two_byte_string() const {
- DCHECK_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->two_byte_literal();
+ DCHECK(next().literal_chars.is_used());
+ return next().literal_chars.two_byte_literal();
}
bool is_next_literal_one_byte() const {
- DCHECK_NOT_NULL(next_.literal_chars);
- return next_.literal_chars->is_one_byte();
+ DCHECK(next().literal_chars.is_used());
+ return next().literal_chars.is_one_byte();
}
Vector<const uint8_t> raw_literal_one_byte_string() const {
- DCHECK_NOT_NULL(current_.raw_literal_chars);
- return current_.raw_literal_chars->one_byte_literal();
+ DCHECK(current().raw_literal_chars.is_used());
+ return current().raw_literal_chars.one_byte_literal();
}
Vector<const uint16_t> raw_literal_two_byte_string() const {
- DCHECK_NOT_NULL(current_.raw_literal_chars);
- return current_.raw_literal_chars->two_byte_literal();
+ DCHECK(current().raw_literal_chars.is_used());
+ return current().raw_literal_chars.two_byte_literal();
}
bool is_raw_literal_one_byte() const {
- DCHECK_NOT_NULL(current_.raw_literal_chars);
- return current_.raw_literal_chars->is_one_byte();
+ DCHECK(current().raw_literal_chars.is_used());
+ return current().raw_literal_chars.is_one_byte();
}
template <bool capture_raw, bool unicode = false>
@@ -728,7 +716,7 @@ class Scanner {
// Scans a single JavaScript token.
void Scan();
- Token::Value SkipWhiteSpace();
+ V8_INLINE Token::Value SkipWhiteSpace();
Token::Value SkipSingleHTMLComment();
Token::Value SkipSingleLineComment();
Token::Value SkipSourceURLComment();
@@ -759,7 +747,7 @@ class Scanner {
// Scans an escape-sequence which is part of a string and adds the
// decoded character to the current literal. Returns true if a pattern
// is scanned.
- template <bool capture_raw, bool in_template_literal>
+ template <bool capture_raw>
bool ScanEscape();
// Decodes a Unicode escape-sequence which is part of an identifier.
@@ -769,8 +757,6 @@ class Scanner {
template <bool capture_raw>
uc32 ScanUnicodeEscape();
- bool is_module_;
-
Token::Value ScanTemplateSpan();
// Return the current source position.
@@ -785,8 +771,8 @@ class Scanner {
// Subtract delimiters.
source_length -= 2;
}
- return token.literal_chars &&
- (token.literal_chars->length() != source_length);
+ return token.literal_chars.is_used() &&
+ (token.literal_chars.length() != source_length);
}
#ifdef DEBUG
@@ -795,26 +781,24 @@ class Scanner {
UnicodeCache* unicode_cache_;
- // Buffers collecting literal strings, numbers, etc.
- LiteralBuffer literal_buffer0_;
- LiteralBuffer literal_buffer1_;
- LiteralBuffer literal_buffer2_;
-
// Values parsed from magic comments.
LiteralBuffer source_url_;
LiteralBuffer source_mapping_url_;
- // Buffer to store raw string values
- LiteralBuffer raw_literal_buffer0_;
- LiteralBuffer raw_literal_buffer1_;
- LiteralBuffer raw_literal_buffer2_;
+ TokenDesc token_storage_[3];
+
+ TokenDesc& next() { return *next_; }
+
+ const TokenDesc& current() const { return *current_; }
+ const TokenDesc& next() const { return *next_; }
+ const TokenDesc& next_next() const { return *next_next_; }
- TokenDesc current_; // desc for current token (as returned by Next())
- TokenDesc next_; // desc for next token (one token look-ahead)
- TokenDesc next_next_; // desc for the token after next (after PeakAhead())
+ TokenDesc* current_; // desc for current token (as returned by Next())
+ TokenDesc* next_; // desc for next token (one token look-ahead)
+ TokenDesc* next_next_; // desc for the token after next (after PeakAhead())
// Input stream. Must be initialized to an Utf16CharacterStream.
- Utf16CharacterStream* source_;
+ Utf16CharacterStream* const source_;
// Last-seen positions of potentially problematic tokens.
Location octal_pos_;
@@ -823,15 +807,6 @@ class Scanner {
// One Unicode character look-ahead; c0_ < 0 at the end of the input.
uc32 c0_;
- // Whether there is a line terminator whitespace character after
- // the current token, and before the next. Does not count newlines
- // inside multiline comments.
- bool has_line_terminator_before_next_;
- // Whether there is a multi-line comment that contains a
- // line-terminator after the current token, and before the next.
- bool has_multiline_comment_before_next_;
- bool has_line_terminator_after_next_;
-
// Whether this scanner encountered an HTML comment.
bool found_html_comment_;
@@ -840,6 +815,8 @@ class Scanner {
bool allow_harmony_private_fields_;
bool allow_harmony_numeric_separator_;
+ const bool is_module_;
+
MessageTemplate::Template scanner_error_;
Location scanner_error_location_;
};
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 73c2e8dfe2..e3dbaa96c9 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -54,8 +54,8 @@ void RelocInfo::apply(intptr_t delta) {
// absolute code pointer inside code object moves with the code object.
if (IsInternalReference(rmode_)) {
// Jump table entry
- Address target = Memory::Address_at(pc_);
- Memory::Address_at(pc_) = target + delta;
+ Address target = Memory<Address>(pc_);
+ Memory<Address>(pc_) = target + delta;
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
@@ -69,7 +69,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_internal_reference() {
if (IsInternalReference(rmode_)) {
// Jump table entry
- return Memory::Address_at(pc_);
+ return Memory<Address>(pc_);
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
@@ -181,8 +181,7 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
- heap->incremental_marking()->RecordWriteIntoCode(host(), this, target);
- heap->RecordWriteIntoCode(host(), this, target);
+ WriteBarrierForCode(host(), this, target);
}
}
@@ -220,11 +219,12 @@ Address RelocInfo::target_off_heap_target() {
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
- IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
+ IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) ||
+ IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
// Jump table entry
- Memory::Address_at(pc_) = kNullAddress;
- } else if (IsInternalReferenceEncoded(rmode_)) {
+ Memory<Address>(pc_) = kNullAddress;
+ } else if (IsInternalReferenceEncoded(rmode_) || IsOffHeapTarget(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress,
@@ -272,7 +272,7 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
if (FLAG_enable_embedded_constant_pool && constant_pool) {
ConstantPoolEntry::Access access;
if (IsConstantPoolLoadStart(pc, &access))
- return Memory::Address_at(target_constant_pool_address_at(
+ return Memory<Address>(target_constant_pool_address_at(
pc, constant_pool, access, ConstantPoolEntry::INTPTR));
}
@@ -441,7 +441,7 @@ void Assembler::deserialization_set_target_internal_reference_at(
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
set_target_address_at(pc, kNullAddress, target, SKIP_ICACHE_FLUSH);
} else {
- Memory::Address_at(pc) = target;
+ Memory<Address>(pc) = target;
}
}
@@ -453,7 +453,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
if (FLAG_enable_embedded_constant_pool && constant_pool) {
ConstantPoolEntry::Access access;
if (IsConstantPoolLoadStart(pc, &access)) {
- Memory::Address_at(target_constant_pool_address_at(
+ Memory<Address>(target_constant_pool_address_at(
pc, constant_pool, access, ConstantPoolEntry::INTPTR)) = target;
return;
}
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index c43b955210..24d9d2b8f3 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -2094,8 +2094,8 @@ void Assembler::EmitRelocations() {
// Fix up internal references now that they are guaranteed to be bound.
if (RelocInfo::IsInternalReference(rmode)) {
// Jump table entry
- intptr_t pos = static_cast<intptr_t>(Memory::Address_at(pc));
- Memory::Address_at(pc) = reinterpret_cast<Address>(buffer_) + pos;
+ intptr_t pos = static_cast<intptr_t>(Memory<Address>(pc));
+ Memory<Address>(pc) = reinterpret_cast<Address>(buffer_) + pos;
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
intptr_t pos = static_cast<intptr_t>(target_address_at(pc, kNullAddress));
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 0fde450f07..b737320cbb 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -298,7 +298,6 @@ GENERAL_REGISTERS(DEFINE_REGISTER)
constexpr Register no_reg = Register::no_reg();
// Aliases
-constexpr Register kLithiumScratch = r11; // lithium scratch.
constexpr Register kConstantPoolRegister = r28; // Constant pool.
constexpr Register kRootRegister = r29; // Roots array pointer.
constexpr Register cp = r30; // JavaScript context pointer.
@@ -597,9 +596,6 @@ class Assembler : public AssemblerBase {
Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
- // Size of an instruction.
- static constexpr int kInstrSize = sizeof(Instr);
-
// Here we are patching the address in the LUI/ORI instruction pair.
// These values are used in the serialization process and must be zero for
// PPC platform, as Code, Embedded Object or External-reference pointers
@@ -663,7 +659,6 @@ class Assembler : public AssemblerBase {
template <class R> \
inline void name(const R rt, const Register ra, const Register rb, \
const RCBit rc = LeaveRC) { \
- DCHECK(ra != r0); \
x_form(instr_name, rt.code(), ra.code(), rb.code(), rc); \
} \
template <class R> \
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index f4c286fdc7..cfa2709fd5 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -4,7 +4,7 @@
#if V8_TARGET_ARCH_PPC
-#include "src/api-arguments.h"
+#include "src/api-arguments-inl.h"
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
@@ -222,9 +222,9 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
if (tasm->isolate()->function_entry_hook() != nullptr) {
PredictableCodeSizeScope predictable(tasm,
#if V8_TARGET_ARCH_PPC64
- 14 * Assembler::kInstrSize);
+ 14 * kInstrSize);
#else
- 11 * Assembler::kInstrSize);
+ 11 * kInstrSize);
#endif
tasm->mflr(r0);
tasm->Push(r0, ip);
@@ -238,9 +238,9 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != nullptr) {
PredictableCodeSizeScope predictable(masm,
#if V8_TARGET_ARCH_PPC64
- 14 * Assembler::kInstrSize);
+ 14 * kInstrSize);
#else
- 11 * Assembler::kInstrSize);
+ 11 * kInstrSize);
#endif
ProfileEntryHookStub stub(masm->isolate());
__ mflr(r0);
@@ -255,7 +255,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// The entry hook is a "push lr, ip" instruction, followed by a call.
const int32_t kReturnAddressDistanceFromFunctionStart =
- Assembler::kCallTargetAddressOffset + 3 * Assembler::kInstrSize;
+ Assembler::kCallTargetAddressOffset + 3 * kInstrSize;
// This should contain all kJSCallerSaved registers.
const RegList kSavedRegs = kJSCallerSaved | // Caller saved registers.
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index a2a9013b1c..65963b9af6 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -37,7 +37,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
+ DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
+ !RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index 673e5dc9b7..0f2679008c 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -88,22 +88,6 @@ inline Condition NegateCondition(Condition cond) {
}
-// Commute a condition such that {a cond b == b cond' a}.
-inline Condition CommuteCondition(Condition cond) {
- switch (cond) {
- case lt:
- return gt;
- case gt:
- return lt;
- case ge:
- return le;
- case le:
- return ge;
- default:
- return cond;
- }
-}
-
// -----------------------------------------------------------------------------
// Instructions encoding.
@@ -2756,10 +2740,13 @@ const Instr rtCallRedirInstr = TWI;
// return ((type == 0) || (type == 1)) && instr->HasS();
// }
//
+
+constexpr uint8_t kInstrSize = 4;
+constexpr uint8_t kInstrSizeLog2 = 2;
+constexpr uint8_t kPcLoadDelta = 8;
+
class Instruction {
public:
- enum { kInstrSize = 4, kInstrSizeLog2 = 2, kPCReadOffset = 8 };
-
// Helper macro to define static accessors.
// We use the cast to char* trick to bypass the strict anti-aliasing rules.
#define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index 5564fd9c32..1b8a1139a3 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -1157,7 +1157,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
// The first field will be identified as a jump table entry. We
// emit the rest of the structure as zero, so just skip past them.
Format(instr, "constant");
- return Instruction::kInstrSize;
+ return kInstrSize;
}
uint32_t opcode = instr->OpcodeValue() << 26;
@@ -1466,7 +1466,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
}
}
- return Instruction::kInstrSize;
+ return kInstrSize;
}
} // namespace internal
} // namespace v8
@@ -1512,13 +1512,6 @@ const char* NameConverter::NameInCode(byte* addr) const {
//------------------------------------------------------------------------------
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-
-Disassembler::~Disassembler() {}
-
-
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte* instruction) {
v8::internal::Decoder d(converter_, buffer);
@@ -1529,10 +1522,10 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
// The PPC assembler does not currently use constant pools.
int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
-
-void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
+ UnimplementedOpcodeAction unimplemented_action) {
NameConverter converter;
- Disassembler d(converter);
+ Disassembler d(converter, unimplemented_action);
for (byte* pc = begin; pc < end;) {
v8::internal::EmbeddedVector<char, 128> buffer;
buffer[0] = '\0';
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index c446a74e10..857ab7a883 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -246,30 +246,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-namespace {
-
-void InterpreterCEntryDescriptor_InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r3, // argument count (argc)
- r5, // address of first argument (argv)
- r4 // the runtime function to call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace
-
-void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
-void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 13e04a2c8c..5605907d6f 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -20,6 +20,7 @@
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/snapshot.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/ppc/macro-assembler-ppc.h"
@@ -196,6 +197,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do
@@ -212,18 +214,11 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Jump(static_cast<intptr_t>(code.address()), rmode, cond, cr);
}
-int TurboAssembler::CallSize(Register target) { return 2 * kInstrSize; }
-
void TurboAssembler::Call(Register target) {
BlockTrampolinePoolScope block_trampoline_pool(this);
- Label start;
- bind(&start);
-
// branch via link register and set LK bit for return point
mtctr(target);
bctrl();
-
- DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::CallJSEntry(Register target) {
@@ -231,12 +226,6 @@ void MacroAssembler::CallJSEntry(Register target) {
Call(target);
}
-int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
- Condition cond) {
- Operand mov_operand = Operand(target, rmode);
- return (2 + instructions_required_for_mov(ip, mov_operand)) * kInstrSize;
-}
-
int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond) {
@@ -248,13 +237,6 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(cond == al);
-#ifdef DEBUG
- // Check the expected size before generating code to ensure we assume the same
- // constant pool availability (e.g., whether constant pool is full or not).
- int expected_size = CallSize(target, rmode, cond);
- Label start;
- bind(&start);
-#endif
// This can likely be optimized to make use of bc() with 24bit relative
//
// RecordRelocInfo(x.rmode_, x.immediate);
@@ -264,13 +246,6 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
mov(ip, Operand(target, rmode));
mtctr(ip);
bctrl();
-
- DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
-}
-
-int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond) {
- return CallSize(code.address(), rmode, cond);
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
@@ -294,6 +269,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
DCHECK(Builtins::IsBuiltinId(builtin_index));
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
@@ -933,11 +909,10 @@ void TurboAssembler::LoadPC(Register dst) {
}
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
- Label current_pc;
- mov_label_addr(dst, &current_pc);
-
- bind(&current_pc);
- subi(dst, dst, Operand(pc_offset()));
+ mflr(r0);
+ LoadPC(dst);
+ subi(dst, dst, Operand(pc_offset() - kInstrSize));
+ mtlr(r0);
}
void TurboAssembler::LoadConstantPoolPointerRegister() {
@@ -1789,6 +1764,18 @@ void TurboAssembler::Abort(AbortReason reason) {
return;
}
+ if (should_abort_hard()) {
+ // We don't care if we constructed a frame. Just pretend we did.
+ FrameScope assume_frame(this, StackFrame::NONE);
+ mov(r3, Operand(static_cast<int>(reason)));
+ PrepareCallCFunction(1, 0, r4);
+ Move(ip, ExternalReference::abort_with_reason());
+ // Use Call directly to avoid any unneeded overhead. The function won't
+ // return anyway.
+ Call(ip);
+ return;
+ }
+
LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(reason)));
// Disable stub call restrictions to always allow calls to abort.
@@ -2910,8 +2897,10 @@ void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
}
void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
- if (dst.ra() != r0) DCHECK(!AreAliased(src, dst.ra(), scratch));
- if (dst.rb() != r0) DCHECK(!AreAliased(src, dst.rb(), scratch));
+ if (dst.ra() != r0 && dst.ra().is_valid())
+ DCHECK(!AreAliased(src, dst.ra(), scratch));
+ if (dst.rb() != r0 && dst.rb().is_valid())
+ DCHECK(!AreAliased(src, dst.rb(), scratch));
DCHECK(!AreAliased(src, scratch));
mr(scratch, src);
LoadP(src, dst, r0);
@@ -3004,57 +2993,6 @@ void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
StoreDouble(scratch_1, src, r0);
}
-#ifdef DEBUG
-bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
- Register reg5, Register reg6, Register reg7, Register reg8,
- Register reg9, Register reg10) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
- reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
- reg10.is_valid();
-
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
- if (reg7.is_valid()) regs |= reg7.bit();
- if (reg8.is_valid()) regs |= reg8.bit();
- if (reg9.is_valid()) regs |= reg9.bit();
- if (reg10.is_valid()) regs |= reg10.bit();
- int n_of_non_aliasing_regs = NumRegs(regs);
-
- return n_of_valid_regs != n_of_non_aliasing_regs;
-}
-
-bool AreAliased(DoubleRegister reg1, DoubleRegister reg2, DoubleRegister reg3,
- DoubleRegister reg4, DoubleRegister reg5, DoubleRegister reg6,
- DoubleRegister reg7, DoubleRegister reg8, DoubleRegister reg9,
- DoubleRegister reg10) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
- reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
- reg10.is_valid();
-
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
- if (reg7.is_valid()) regs |= reg7.bit();
- if (reg8.is_valid()) regs |= reg8.bit();
- if (reg9.is_valid()) regs |= reg9.bit();
- if (reg10.is_valid()) regs |= reg10.bit();
- int n_of_non_aliasing_regs = NumRegs(regs);
-
- return n_of_valid_regs != n_of_non_aliasing_regs;
-}
-#endif
-
void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index daf1fbdb6a..364b60d037 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -37,6 +37,7 @@ constexpr Register kJavaScriptCallExtraArg1Register = r5;
constexpr Register kOffHeapTrampolineRegister = ip;
constexpr Register kRuntimeCallFunctionRegister = r4;
constexpr Register kRuntimeCallArgCountRegister = r3;
+constexpr Register kRuntimeCallArgvRegister = r5;
constexpr Register kWasmInstanceRegister = r10;
// ----------------------------------------------------------------------------
@@ -47,16 +48,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
-
-// Flags used for AllocateHeapNumber
-enum TaggingMode {
- // Tag the result.
- TAG_RESULT,
- // Don't tag
- DONT_TAG_RESULT
-};
-
-
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
@@ -68,20 +59,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
Register reg5 = no_reg,
Register reg6 = no_reg);
-
-#ifdef DEBUG
-bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
- Register reg4 = no_reg, Register reg5 = no_reg,
- Register reg6 = no_reg, Register reg7 = no_reg,
- Register reg8 = no_reg, Register reg9 = no_reg,
- Register reg10 = no_reg);
-bool AreAliased(DoubleRegister reg1, DoubleRegister reg2,
- DoubleRegister reg3 = no_dreg, DoubleRegister reg4 = no_dreg,
- DoubleRegister reg5 = no_dreg, DoubleRegister reg6 = no_dreg,
- DoubleRegister reg7 = no_dreg, DoubleRegister reg8 = no_dreg,
- DoubleRegister reg9 = no_dreg, DoubleRegister reg10 = no_dreg);
-#endif
-
// These exist to provide portability between 32 and 64bit
#if V8_TARGET_ARCH_PPC64
#define LoadPX ldx
@@ -96,8 +73,6 @@ bool AreAliased(DoubleRegister reg1, DoubleRegister reg2,
#define ShiftLeft_ sld
#define ShiftRight_ srd
#define ShiftRightArith srad
-#define Mul mulld
-#define Div divd
#else
#define LoadPX lwzx
#define LoadPUX lwzux
@@ -111,11 +86,9 @@ bool AreAliased(DoubleRegister reg1, DoubleRegister reg2,
#define ShiftLeft_ slw
#define ShiftRight_ srw
#define ShiftRightArith sraw
-#define Mul mullw
-#define Div divw
#endif
-class TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
@@ -445,12 +418,6 @@ class TurboAssembler : public TurboAssemblerBase {
void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
void LoadRootRelative(Register destination, int32_t offset) override;
- // Returns the size of a call in instructions. Note, the value returned is
- // only valid as long as no entries are added to the constant pool between
- // checking the call size and emitting the actual call.
- static int CallSize(Register target);
- int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
-
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
@@ -461,9 +428,6 @@ class TurboAssembler : public TurboAssemblerBase {
CRegister cr = cr7);
void Call(Register target);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
- int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
Condition cond = al);
void Call(Label* target);
@@ -711,13 +675,6 @@ class MacroAssembler : public TurboAssembler {
void IncrementalMarkingRecordWriteHelper(Register object, Register value,
Register address);
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr, Register scratch,
- SaveFPRegsMode save_fp);
-
void JumpToJSEntry(Register target);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
@@ -768,11 +725,6 @@ class MacroAssembler : public TurboAssembler {
void PushSafepointRegisters();
void PopSafepointRegisters();
- // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
- // from C.
- // Does not handle errors.
- void FlushICache(Register address, size_t size, Register scratch);
-
// Enter exit frame.
// stack_space - extra stack space, used for parameters before call to C.
// At least one slot (for the return address) should be provided.
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 350d4687ce..0fd03df30c 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -73,8 +73,7 @@ void PPCDebugger::Stop(Instruction* instr) {
// use of kStopCodeMask not right on PowerPC
uint32_t code = instr->SvcValue() & kStopCodeMask;
// Retrieve the encoded address, which comes just after this stop.
- char* msg =
- *reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
+ char* msg = *reinterpret_cast<char**>(sim_->get_pc() + kInstrSize);
// Update this stop description.
if (sim_->isWatchedStop(code) && !sim_->watched_stops_[code].desc) {
sim_->watched_stops_[code].desc = msg;
@@ -85,7 +84,7 @@ void PPCDebugger::Stop(Instruction* instr) {
} else {
PrintF("Simulator hit %s\n", msg);
}
- sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize + kPointerSize);
+ sim_->set_pc(sim_->get_pc() + kInstrSize + kPointerSize);
Debug();
}
@@ -233,7 +232,7 @@ void PPCDebugger::Debug() {
// If at a breakpoint, proceed past it.
if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
->InstructionBits() == 0x7D821008) {
- sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
+ sim_->set_pc(sim_->get_pc() + kInstrSize);
} else {
sim_->ExecuteInstruction(
reinterpret_cast<Instruction*>(sim_->get_pc()));
@@ -257,7 +256,7 @@ void PPCDebugger::Debug() {
// If at a breakpoint, proceed past it.
if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
->InstructionBits() == 0x7D821008) {
- sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
+ sim_->set_pc(sim_->get_pc() + kInstrSize);
} else {
// Execute the one instruction we broke at with breakpoints disabled.
sim_->ExecuteInstruction(
@@ -430,7 +429,7 @@ void PPCDebugger::Debug() {
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstrSize);
+ end = cur + (10 * kInstrSize);
} else if (argc == 2) {
int regnum = Registers::Number(arg1);
if (regnum != kNoRegister || strncmp(arg1, "0x", 2) == 0) {
@@ -439,7 +438,7 @@ void PPCDebugger::Debug() {
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(value);
// Disassemble 10 instructions at <arg1>.
- end = cur + (10 * Instruction::kInstrSize);
+ end = cur + (10 * kInstrSize);
}
} else {
// The argument is the number of instructions.
@@ -447,7 +446,7 @@ void PPCDebugger::Debug() {
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
// Disassemble <arg1> instructions.
- end = cur + (value * Instruction::kInstrSize);
+ end = cur + (value * kInstrSize);
}
}
} else {
@@ -455,7 +454,7 @@ void PPCDebugger::Debug() {
intptr_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte*>(value1);
- end = cur + (value2 * Instruction::kInstrSize);
+ end = cur + (value2 * kInstrSize);
}
}
@@ -498,11 +497,10 @@ void PPCDebugger::Debug() {
PrintF("FPSCR: %08x\n", sim_->fp_condition_reg_);
} else if (strcmp(cmd, "stop") == 0) {
intptr_t value;
- intptr_t stop_pc =
- sim_->get_pc() - (Instruction::kInstrSize + kPointerSize);
+ intptr_t stop_pc = sim_->get_pc() - (kInstrSize + kPointerSize);
Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
Instruction* msg_address =
- reinterpret_cast<Instruction*>(stop_pc + Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(stop_pc + kInstrSize);
if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
// Remove the current stop.
if (sim_->isStopInstruction(stop_instr)) {
@@ -725,9 +723,8 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
if (cache_hit) {
// Check that the data in memory matches the contents of the I-cache.
- CHECK_EQ(0,
- memcmp(reinterpret_cast<void*>(instr),
- cache_page->CachedData(offset), Instruction::kInstrSize));
+ CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr),
+ cache_page->CachedData(offset), kInstrSize));
} else {
// Cache miss. Load memory into the cache.
memcpy(cached_line, line, CachePage::kLineLength);
@@ -1469,7 +1466,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PPCDebugger dbg(this);
dbg.Stop(instr);
} else {
- set_pc(get_pc() + Instruction::kInstrSize + kPointerSize);
+ set_pc(get_pc() + kInstrSize + kPointerSize);
}
} else {
// This is not a valid svc code.
@@ -3922,11 +3919,10 @@ void Simulator::ExecuteInstruction(Instruction* instr) {
ExecuteGeneric(instr);
}
if (!pc_modified_) {
- set_pc(reinterpret_cast<intptr_t>(instr) + Instruction::kInstrSize);
+ set_pc(reinterpret_cast<intptr_t>(instr) + kInstrSize);
}
}
-
void Simulator::Execute() {
// Get the PC to simulate. Cannot use the accessor here as we need the
// raw PC value and not the one used as input to arithmetic instructions.
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index e5aa7c554e..71e297c4bf 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -4,7 +4,7 @@
#include "src/profiler/heap-profiler.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/debug/debug.h"
#include "src/heap/heap-inl.h"
#include "src/profiler/allocation-tracker.h"
@@ -222,6 +222,8 @@ void HeapProfiler::ClearHeapObjectMap() {
Heap* HeapProfiler::heap() const { return ids_->heap(); }
+Isolate* HeapProfiler::isolate() const { return heap()->isolate(); }
+
void HeapProfiler::QueryObjects(Handle<Context> context,
debug::QueryObjectPredicate* predicate,
PersistentValueVector<v8::Object>* objects) {
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index fc0b005e1c..8ce379d59d 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -85,7 +85,7 @@ class HeapProfiler : public HeapObjectAllocationTracker {
Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
void ClearHeapObjectMap();
- Isolate* isolate() const { return heap()->isolate(); }
+ Isolate* isolate() const;
void QueryObjects(Handle<Context> context,
debug::QueryObjectPredicate* predicate,
diff --git a/deps/v8/src/profiler/heap-snapshot-generator-inl.h b/deps/v8/src/profiler/heap-snapshot-generator-inl.h
index 83f210e86a..edf6559706 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator-inl.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator-inl.h
@@ -7,6 +7,9 @@
#include "src/profiler/heap-snapshot-generator.h"
+#include "src/profiler/heap-profiler.h"
+#include "src/string-hasher-inl.h"
+
namespace v8 {
namespace internal {
@@ -38,6 +41,12 @@ int HeapEntry::set_children_index(int index) {
return next_index;
}
+void HeapEntry::add_child(HeapGraphEdge* edge) {
+ *(children_begin() + children_count_++) = edge;
+}
+
+HeapGraphEdge* HeapEntry::child(int i) { return *(children_begin() + i); }
+
std::deque<HeapGraphEdge*>::iterator HeapEntry::children_begin() {
DCHECK_GE(children_index_, 0);
SLOW_DCHECK(
@@ -51,9 +60,23 @@ std::deque<HeapGraphEdge*>::iterator HeapEntry::children_end() {
return children_begin() + children_count_;
}
-
Isolate* HeapEntry::isolate() const { return snapshot_->profiler()->isolate(); }
+uint32_t HeapSnapshotJSONSerializer::StringHash(const void* string) {
+ const char* s = reinterpret_cast<const char*>(string);
+ int len = static_cast<int>(strlen(s));
+ return StringHasher::HashSequentialString(s, len,
+ v8::internal::kZeroHashSeed);
+}
+
+int HeapSnapshotJSONSerializer::to_node_index(const HeapEntry* e) {
+ return to_node_index(e->index());
+}
+
+int HeapSnapshotJSONSerializer::to_node_index(int entry_index) {
+ return entry_index * kNodeFieldsCount;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index b51ea0de7e..96ac785273 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -6,7 +6,7 @@
#include <utility>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/code-stubs.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
@@ -16,7 +16,10 @@
#include "src/objects-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-generator-inl.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/literal-objects-inl.h"
@@ -246,6 +249,9 @@ HeapEntry* HeapSnapshot::AddGcSubrootEntry(Root root, SnapshotObjectId id) {
return entry;
}
+void HeapSnapshot::AddLocation(int entry, int scriptId, int line, int col) {
+ locations_.emplace_back(entry, scriptId, line, col);
+}
HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
const char* name,
@@ -608,6 +614,33 @@ HeapEntry* V8HeapExplorer::AllocateEntry(HeapThing ptr) {
return AddEntry(reinterpret_cast<HeapObject*>(ptr));
}
+void V8HeapExplorer::ExtractLocation(int entry, HeapObject* object) {
+ if (object->IsJSFunction()) {
+ JSFunction* func = JSFunction::cast(object);
+ ExtractLocationForJSFunction(entry, func);
+
+ } else if (object->IsJSGeneratorObject()) {
+ JSGeneratorObject* gen = JSGeneratorObject::cast(object);
+ ExtractLocationForJSFunction(entry, gen->function());
+
+ } else if (object->IsJSObject()) {
+ JSObject* obj = JSObject::cast(object);
+ JSFunction* maybe_constructor = GetConstructor(obj);
+
+ if (maybe_constructor)
+ ExtractLocationForJSFunction(entry, maybe_constructor);
+ }
+}
+
+void V8HeapExplorer::ExtractLocationForJSFunction(int entry, JSFunction* func) {
+ if (!func->shared()->script()->IsScript()) return;
+ Script* script = Script::cast(func->shared()->script());
+ int scriptId = script->id();
+ int start = func->shared()->StartPosition();
+ int line = script->GetLineNumber(start);
+ int col = script->GetColumnNumber(start);
+ snapshot_->AddLocation(entry, scriptId, line, col);
+}
HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
if (object->IsJSFunction()) {
@@ -854,6 +887,8 @@ void V8HeapExplorer::ExtractReferences(int entry, HeapObject* obj) {
ExtractJSCollectionReferences(entry, JSMap::cast(obj));
} else if (obj->IsJSPromise()) {
ExtractJSPromiseReferences(entry, JSPromise::cast(obj));
+ } else if (obj->IsJSGeneratorObject()) {
+ ExtractJSGeneratorObjectReferences(entry, JSGeneratorObject::cast(obj));
}
ExtractJSObjectReferences(entry, JSObject::cast(obj));
} else if (obj->IsString()) {
@@ -876,8 +911,6 @@ void V8HeapExplorer::ExtractReferences(int entry, HeapObject* obj) {
ExtractCellReferences(entry, Cell::cast(obj));
} else if (obj->IsFeedbackCell()) {
ExtractFeedbackCellReferences(entry, FeedbackCell::cast(obj));
- } else if (obj->IsWeakCell()) {
- ExtractWeakCellReferences(entry, WeakCell::cast(obj));
} else if (obj->IsPropertyCell()) {
ExtractPropertyCellReferences(entry, PropertyCell::cast(obj));
} else if (obj->IsAllocationSite()) {
@@ -1119,8 +1152,6 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
Map::kTransitionsOrPrototypeInfoOffset);
} else if (maybe_raw_transitions_or_prototype_info->ToStrongHeapObject(
&raw_transitions_or_prototype_info)) {
- DCHECK(!raw_transitions_or_prototype_info->IsWeakCell());
-
if (raw_transitions_or_prototype_info->IsTransitionArray()) {
TransitionArray* transitions =
TransitionArray::cast(raw_transitions_or_prototype_info);
@@ -1172,9 +1203,6 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
TagObject(map->dependent_code(), "(dependent code)");
SetInternalReference(map, entry, "dependent_code", map->dependent_code(),
Map::kDependentCodeOffset);
- TagObject(map->weak_cell_cache(), "(weak cell)");
- SetInternalReference(map, entry, "weak_cell_cache", map->weak_cell_cache(),
- Map::kWeakCellCacheOffset);
}
@@ -1198,17 +1226,13 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
SetInternalReference(obj, entry, "name_or_scope_info",
shared->name_or_scope_info(),
SharedFunctionInfo::kNameOrScopeInfoOffset);
- SetInternalReference(obj, entry,
- "script", shared->script(),
- SharedFunctionInfo::kScriptOffset);
+ SetInternalReference(obj, entry, "script_or_debug_info",
+ shared->script_or_debug_info(),
+ SharedFunctionInfo::kScriptOrDebugInfoOffset);
SetInternalReference(obj, entry,
"function_data", shared->function_data(),
SharedFunctionInfo::kFunctionDataOffset);
SetInternalReference(
- obj, entry, "function_identifier_or_debug_info",
- shared->function_identifier_or_debug_info(),
- SharedFunctionInfo::kFunctionIdentifierOrDebugInfoOffset);
- SetInternalReference(
obj, entry, "raw_outer_scope_info_or_feedback_metadata",
shared->raw_outer_scope_info_or_feedback_metadata(),
SharedFunctionInfo::kOuterScopeInfoOrFeedbackMetadataOffset);
@@ -1294,12 +1318,6 @@ void V8HeapExplorer::ExtractFeedbackCellReferences(
FeedbackCell::kValueOffset);
}
-void V8HeapExplorer::ExtractWeakCellReferences(int entry, WeakCell* weak_cell) {
- TagObject(weak_cell, "(weak cell)");
- SetWeakReference(weak_cell, entry, "value", weak_cell->value(),
- WeakCell::kValueOffset);
-}
-
void V8HeapExplorer::ExtractPropertyCellReferences(int entry,
PropertyCell* cell) {
SetInternalReference(cell, entry, "value", cell->value(),
@@ -1363,6 +1381,19 @@ void V8HeapExplorer::ExtractJSPromiseReferences(int entry, JSPromise* promise) {
JSPromise::kReactionsOrResultOffset);
}
+void V8HeapExplorer::ExtractJSGeneratorObjectReferences(
+ int entry, JSGeneratorObject* generator) {
+ SetInternalReference(generator, entry, "function", generator->function(),
+ JSGeneratorObject::kFunctionOffset);
+ SetInternalReference(generator, entry, "context", generator->context(),
+ JSGeneratorObject::kContextOffset);
+ SetInternalReference(generator, entry, "receiver", generator->receiver(),
+ JSGeneratorObject::kReceiverOffset);
+ SetInternalReference(generator, entry, "parameters_and_registers",
+ generator->parameters_and_registers(),
+ JSGeneratorObject::kParametersAndRegistersOffset);
+}
+
void V8HeapExplorer::ExtractFixedArrayReferences(int entry, FixedArray* array) {
for (int i = 0, l = array->length(); i < l; ++i) {
DCHECK(!HasWeakHeapObjectTag(array->get(i)));
@@ -1513,6 +1544,17 @@ void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj, int entry) {
}
}
+JSFunction* V8HeapExplorer::GetConstructor(JSReceiver* receiver) {
+ Isolate* isolate = receiver->GetIsolate();
+ DisallowHeapAllocation no_gc;
+ HandleScope scope(isolate);
+ MaybeHandle<JSFunction> maybe_constructor =
+ JSReceiver::GetConstructor(handle(receiver, isolate));
+
+ if (maybe_constructor.is_null()) return nullptr;
+
+ return *maybe_constructor.ToHandleChecked();
+}
String* V8HeapExplorer::GetConstructorName(JSObject* object) {
Isolate* isolate = object->GetIsolate();
@@ -1602,6 +1644,9 @@ bool V8HeapExplorer::IterateAndExtractReferences(SnapshotFiller* filler) {
DCHECK(!visited_fields_[i]);
}
+ // Extract location for specific object types
+ ExtractLocation(entry, obj);
+
if (!progress_->ProgressReport(false)) interrupted = true;
}
@@ -2630,6 +2675,11 @@ void HeapSnapshotJSONSerializer::SerializeImpl() {
if (writer_->aborted()) return;
writer_->AddString("],\n");
+ writer_->AddString("\"locations\":[");
+ SerializeLocations();
+ if (writer_->aborted()) return;
+ writer_->AddString("],\n");
+
writer_->AddString("\"strings\":[");
SerializeStrings();
if (writer_->aborted()) return;
@@ -2709,7 +2759,7 @@ void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
buffer[buffer_pos++] = ',';
buffer_pos = utoa(edge_name_or_index, buffer, buffer_pos);
buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry_index(edge->to()), buffer, buffer_pos);
+ buffer_pos = utoa(to_node_index(edge->to()), buffer, buffer_pos);
buffer[buffer_pos++] = '\n';
buffer[buffer_pos++] = '\0';
writer_->AddString(buffer.start());
@@ -2734,7 +2784,7 @@ void HeapSnapshotJSONSerializer::SerializeNode(const HeapEntry* entry) {
+ 6 + 1 + 1;
EmbeddedVector<char, kBufferSize> buffer;
int buffer_pos = 0;
- if (entry_index(entry) != 0) {
+ if (to_node_index(entry) != 0) {
buffer[buffer_pos++] = ',';
}
buffer_pos = utoa(entry->type(), buffer, buffer_pos);
@@ -2767,6 +2817,8 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
writer_->AddString("\"meta\":");
// The object describing node serialization layout.
// We use a set of macros to improve readability.
+
+// clang-format off
#define JSON_A(s) "[" s "]"
#define JSON_O(s) "{" s "}"
#define JSON_S(s) "\"" s "\""
@@ -2830,7 +2882,13 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
JSON_S("children")) ","
JSON_S("sample_fields") ":" JSON_A(
JSON_S("timestamp_us") ","
- JSON_S("last_assigned_id"))));
+ JSON_S("last_assigned_id")) ","
+ JSON_S("location_fields") ":" JSON_A(
+ JSON_S("object_index") ","
+ JSON_S("script_id") ","
+ JSON_S("line") ","
+ JSON_S("column"))));
+// clang-format on
#undef JSON_S
#undef JSON_O
#undef JSON_A
@@ -3037,6 +3095,33 @@ void HeapSnapshotJSONSerializer::SerializeStrings() {
}
}
+void HeapSnapshotJSONSerializer::SerializeLocation(
+ const SourceLocation& location) {
+ // The buffer needs space for 4 unsigned ints, 3 commas, \n and \0
+ static const int kBufferSize =
+ MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned * 4 + 3 + 2;
+ EmbeddedVector<char, kBufferSize> buffer;
+ int buffer_pos = 0;
+ buffer_pos = utoa(to_node_index(location.entry_index), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(location.scriptId, buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(location.line, buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(location.col, buffer, buffer_pos);
+ buffer[buffer_pos++] = '\n';
+ buffer[buffer_pos++] = '\0';
+ writer_->AddString(buffer.start());
+}
+
+void HeapSnapshotJSONSerializer::SerializeLocations() {
+ const std::vector<SourceLocation>& locations = snapshot_->locations();
+ for (size_t i = 0; i < locations.size(); i++) {
+ if (i > 0) writer_->AddCharacter(',');
+ SerializeLocation(locations[i]);
+ if (writer_->aborted()) return;
+ }
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 4f4fbee742..f28852fdc2 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -33,6 +33,16 @@ class JSCollection;
class JSWeakCollection;
class SnapshotFiller;
+struct SourceLocation {
+ SourceLocation(int entry_index, int scriptId, int line, int col)
+ : entry_index(entry_index), scriptId(scriptId), line(line), col(col) {}
+
+ const int entry_index;
+ const int scriptId;
+ const int line;
+ const int col;
+};
+
class HeapGraphEdge BASE_EMBEDDED {
public:
enum Type {
@@ -125,10 +135,8 @@ class HeapEntry BASE_EMBEDDED {
V8_INLINE int index() const;
int children_count() const { return children_count_; }
V8_INLINE int set_children_index(int index);
- void add_child(HeapGraphEdge* edge) {
- *(children_begin() + children_count_++) = edge;
- }
- HeapGraphEdge* child(int i) { return *(children_begin() + i); }
+ V8_INLINE void add_child(HeapGraphEdge* edge);
+ V8_INLINE HeapGraphEdge* child(int i);
V8_INLINE Isolate* isolate() const;
void SetIndexedReference(
@@ -175,11 +183,13 @@ class HeapSnapshot {
std::vector<HeapEntry>& entries() { return entries_; }
std::deque<HeapGraphEdge>& edges() { return edges_; }
std::deque<HeapGraphEdge*>& children() { return children_; }
+ const std::vector<SourceLocation>& locations() const { return locations_; }
void RememberLastJSObjectId();
SnapshotObjectId max_snapshot_js_object_id() const {
return max_snapshot_js_object_id_;
}
+ void AddLocation(int entry, int scriptId, int line, int col);
HeapEntry* AddEntry(HeapEntry::Type type,
const char* name,
SnapshotObjectId id,
@@ -205,6 +215,7 @@ class HeapSnapshot {
std::deque<HeapGraphEdge> edges_;
std::deque<HeapGraphEdge*> children_;
std::vector<HeapEntry*> sorted_entries_;
+ std::vector<SourceLocation> locations_;
SnapshotObjectId max_snapshot_js_object_id_;
friend class HeapSnapshotTester;
@@ -353,6 +364,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
const char* name,
size_t size);
+ static JSFunction* GetConstructor(JSReceiver* receiver);
static String* GetConstructorName(JSObject* object);
private:
@@ -365,6 +377,8 @@ class V8HeapExplorer : public HeapEntriesAllocator {
const char* GetSystemEntryName(HeapObject* object);
+ void ExtractLocation(int entry, HeapObject* object);
+ void ExtractLocationForJSFunction(int entry, JSFunction* func);
void ExtractReferences(int entry, HeapObject* obj);
void ExtractJSGlobalProxyReferences(int entry, JSGlobalProxy* proxy);
void ExtractJSObjectReferences(int entry, JSObject* js_obj);
@@ -385,13 +399,14 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractCodeReferences(int entry, Code* code);
void ExtractCellReferences(int entry, Cell* cell);
void ExtractFeedbackCellReferences(int entry, FeedbackCell* feedback_cell);
- void ExtractWeakCellReferences(int entry, WeakCell* weak_cell);
void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
void ExtractAllocationSiteReferences(int entry, AllocationSite* site);
void ExtractArrayBoilerplateDescriptionReferences(
int entry, ArrayBoilerplateDescription* value);
void ExtractJSArrayBufferReferences(int entry, JSArrayBuffer* buffer);
void ExtractJSPromiseReferences(int entry, JSPromise* promise);
+ void ExtractJSGeneratorObjectReferences(int entry,
+ JSGeneratorObject* generator);
void ExtractFixedArrayReferences(int entry, FixedArray* array);
void ExtractFeedbackVectorReferences(int entry,
FeedbackVector* feedback_vector);
@@ -591,15 +606,11 @@ class HeapSnapshotJSONSerializer {
reinterpret_cast<char*>(key2)) == 0;
}
- V8_INLINE static uint32_t StringHash(const void* string) {
- const char* s = reinterpret_cast<const char*>(string);
- int len = static_cast<int>(strlen(s));
- return StringHasher::HashSequentialString(
- s, len, v8::internal::kZeroHashSeed);
- }
+ V8_INLINE static uint32_t StringHash(const void* string);
int GetStringId(const char* s);
- int entry_index(const HeapEntry* e) { return e->index() * kNodeFieldsCount; }
+ V8_INLINE int to_node_index(const HeapEntry* e);
+ V8_INLINE int to_node_index(int entry_index);
void SerializeEdge(HeapGraphEdge* edge, bool first_edge);
void SerializeEdges();
void SerializeImpl();
@@ -612,6 +623,8 @@ class HeapSnapshotJSONSerializer {
void SerializeSamples();
void SerializeString(const unsigned char* s);
void SerializeStrings();
+ void SerializeLocation(const SourceLocation& location);
+ void SerializeLocations();
static const int kEdgeFieldsCount;
static const int kNodeFieldsCount;
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 845fe97b64..c0c5242219 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -726,7 +726,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
if (pc_entry) {
int pc_offset =
static_cast<int>(attributed_pc - pc_entry->instruction_start());
- DCHECK_GE(pc_offset, 0);
+ // TODO(petermarshall): pc_offset can still be negative in some cases.
src_line = pc_entry->GetSourceLine(pc_offset);
if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
src_line = pc_entry->line_number();
@@ -758,7 +758,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
// Find out if the entry has an inlining stack associated.
int pc_offset =
static_cast<int>(stack_pos - entry->instruction_start());
- DCHECK_GE(pc_offset, 0);
+ // TODO(petermarshall): pc_offset can still be negative in some cases.
const std::vector<std::unique_ptr<CodeEntry>>* inline_stack =
entry->GetInlineStack(pc_offset);
if (inline_stack) {
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index 4501cd6f79..48c3f73958 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -6,7 +6,7 @@
#include <stdint.h>
#include <memory>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/base/ieee754.h"
#include "src/base/utils/random-number-generator.h"
#include "src/frames-inl.h"
@@ -99,7 +99,16 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
Sample* sample = new Sample(size, node, loc, this);
samples_.emplace(sample);
sample->global.SetWeak(sample, OnWeakCallback, WeakCallbackType::kParameter);
+#if __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated"
+#endif
+ // MarkIndependent is marked deprecated but we still rely on it here
+ // temporarily.
sample->global.MarkIndependent();
+#if __clang__
+#pragma clang diagnostic pop
+#endif
}
void SamplingHeapProfiler::OnWeakCallback(
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index ef44749100..e3bd1d9c69 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -174,7 +174,7 @@ DISABLE_ASAN void TickSample::Init(Isolate* v8_isolate,
// Sample potential return address value for frameless invocation of
// stubs (we'll figure out later, if this value makes sense).
tos = reinterpret_cast<void*>(
- i::Memory::Address_at(reinterpret_cast<i::Address>(regs.sp)));
+ i::Memory<i::Address>(reinterpret_cast<i::Address>(regs.sp)));
} else {
tos = nullptr;
}
@@ -255,9 +255,9 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
// bytecode_array might be garbage, so don't actually dereference it. We
// avoid the frame->GetXXX functions since they call BytecodeArray::cast,
// which has a heap access in its DCHECK.
- i::Object* bytecode_array = i::Memory::Object_at(
+ i::Object* bytecode_array = i::Memory<i::Object*>(
frame->fp() + i::InterpreterFrameConstants::kBytecodeArrayFromFp);
- i::Object* bytecode_offset = i::Memory::Object_at(
+ i::Object* bytecode_offset = i::Memory<i::Object*>(
frame->fp() + i::InterpreterFrameConstants::kBytecodeOffsetFromFp);
// If the bytecode array is a heap object and the bytecode offset is a
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.cc b/deps/v8/src/profiler/tracing-cpu-profiler.cc
index c7374f20d2..875478139d 100644
--- a/deps/v8/src/profiler/tracing-cpu-profiler.cc
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.cc
@@ -9,15 +9,6 @@
#include "src/v8.h"
namespace v8 {
-
-std::unique_ptr<TracingCpuProfiler> TracingCpuProfiler::Create(
- v8::Isolate* isolate) {
- // Dummy profiler that does nothing.
- // Remove it along with the deprecated code.
- // The actual profiler is created by the isolate itself.
- return std::unique_ptr<TracingCpuProfiler>(new TracingCpuProfiler());
-}
-
namespace internal {
TracingCpuProfilerImpl::TracingCpuProfilerImpl(Isolate* isolate)
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.h b/deps/v8/src/profiler/tracing-cpu-profiler.h
index ccd1fa42a2..d7da209e2e 100644
--- a/deps/v8/src/profiler/tracing-cpu-profiler.h
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.h
@@ -6,7 +6,6 @@
#define V8_PROFILER_TRACING_CPU_PROFILER_H_
#include "include/v8-platform.h"
-#include "include/v8-profiler.h"
#include "src/base/atomic-utils.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
@@ -18,8 +17,7 @@ class CpuProfiler;
class Isolate;
class TracingCpuProfilerImpl final
- : public TracingCpuProfiler,
- private v8::TracingController::TraceStateObserver {
+ : private v8::TracingController::TraceStateObserver {
public:
explicit TracingCpuProfilerImpl(Isolate*);
~TracingCpuProfilerImpl();
diff --git a/deps/v8/src/property-descriptor.cc b/deps/v8/src/property-descriptor.cc
index 3fdd39287b..902759a168 100644
--- a/deps/v8/src/property-descriptor.cc
+++ b/deps/v8/src/property-descriptor.cc
@@ -110,9 +110,8 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
return true;
}
-
-void CreateDataProperty(Isolate* isolate, Handle<JSObject> object,
- Handle<String> name, Handle<Object> value) {
+void CreateDataProperty(Handle<JSObject> object, Handle<String> name,
+ Handle<Object> value) {
LookupIterator it(object, name, object, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<bool> result = JSObject::CreateDataProperty(&it, value);
CHECK(result.IsJust() && result.FromJust());
@@ -158,24 +157,24 @@ Handle<Object> PropertyDescriptor::ToObject(Isolate* isolate) {
}
Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
if (has_value()) {
- CreateDataProperty(isolate, result, factory->value_string(), value());
+ CreateDataProperty(result, factory->value_string(), value());
}
if (has_writable()) {
- CreateDataProperty(isolate, result, factory->writable_string(),
+ CreateDataProperty(result, factory->writable_string(),
factory->ToBoolean(writable()));
}
if (has_get()) {
- CreateDataProperty(isolate, result, factory->get_string(), get());
+ CreateDataProperty(result, factory->get_string(), get());
}
if (has_set()) {
- CreateDataProperty(isolate, result, factory->set_string(), set());
+ CreateDataProperty(result, factory->set_string(), set());
}
if (has_enumerable()) {
- CreateDataProperty(isolate, result, factory->enumerable_string(),
+ CreateDataProperty(result, factory->enumerable_string(),
factory->ToBoolean(enumerable()));
}
if (has_configurable()) {
- CreateDataProperty(isolate, result, factory->configurable_string(),
+ CreateDataProperty(result, factory->configurable_string(),
factory->ToBoolean(configurable()));
}
return result;
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index dfa5221bc4..8e56dcf47e 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -7,6 +7,7 @@
#include "src/field-type.h"
#include "src/handles-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/name-inl.h"
#include "src/ostreams.h"
namespace v8 {
@@ -22,12 +23,32 @@ std::ostream& operator<<(std::ostream& os,
return os;
}
-Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
- PropertyAttributes attributes,
+Descriptor::Descriptor() : details_(Smi::kZero) {}
+
+Descriptor::Descriptor(Handle<Name> key, MaybeObjectHandle value,
+ PropertyKind kind, PropertyAttributes attributes,
+ PropertyLocation location, PropertyConstness constness,
+ Representation representation, int field_index)
+ : key_(key),
+ value_(value),
+ details_(kind, attributes, location, constness, representation,
+ field_index) {
+ DCHECK(key->IsUniqueName());
+ DCHECK_IMPLIES(key->IsPrivate(), !details_.IsEnumerable());
+}
+
+Descriptor::Descriptor(Handle<Name> key, MaybeObjectHandle value,
+ PropertyDetails details)
+ : key_(key), value_(value), details_(details) {
+ DCHECK(key->IsUniqueName());
+ DCHECK_IMPLIES(key->IsPrivate(), !details_.IsEnumerable());
+}
+
+Descriptor Descriptor::DataField(Isolate* isolate, Handle<Name> key,
+ int field_index, PropertyAttributes attributes,
Representation representation) {
return DataField(key, field_index, attributes, PropertyConstness::kMutable,
- representation,
- MaybeObjectHandle(FieldType::Any(key->GetIsolate())));
+ representation, MaybeObjectHandle(FieldType::Any(isolate)));
}
Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
@@ -41,11 +62,18 @@ Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
return Descriptor(key, wrapped_field_type, details);
}
-Descriptor Descriptor::DataConstant(Handle<Name> key, int field_index,
- Handle<Object> value,
+Descriptor Descriptor::DataConstant(Handle<Name> key, Handle<Object> value,
+ PropertyAttributes attributes) {
+ return Descriptor(key, MaybeObjectHandle(value), kData, attributes,
+ kDescriptor, PropertyConstness::kConst,
+ value->OptimalRepresentation(), 0);
+}
+
+Descriptor Descriptor::DataConstant(Isolate* isolate, Handle<Name> key,
+ int field_index, Handle<Object> value,
PropertyAttributes attributes) {
if (FLAG_track_constant_fields) {
- MaybeObjectHandle any_type(FieldType::Any(), key->GetIsolate());
+ MaybeObjectHandle any_type(FieldType::Any(), isolate);
return DataField(key, field_index, attributes, PropertyConstness::kConst,
Representation::Tagged(), any_type);
@@ -56,6 +84,14 @@ Descriptor Descriptor::DataConstant(Handle<Name> key, int field_index,
}
}
+Descriptor Descriptor::AccessorConstant(Handle<Name> key,
+ Handle<Object> foreign,
+ PropertyAttributes attributes) {
+ return Descriptor(key, MaybeObjectHandle(foreign), kAccessor, attributes,
+ kDescriptor, PropertyConstness::kConst,
+ Representation::Tagged(), 0);
+}
+
// Outputs PropertyDetails as a dictionary details.
void PropertyDetails::PrintAsSlowTo(std::ostream& os) {
os << "(";
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 4173491466..7a7d485bc3 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -9,6 +9,7 @@
#include "src/globals.h"
#include "src/handles.h"
+#include "src/maybe-handles.h"
#include "src/objects.h"
#include "src/objects/name.h"
#include "src/property-details.h"
@@ -23,7 +24,7 @@ namespace internal {
// optionally a piece of data.
class Descriptor final BASE_EMBEDDED {
public:
- Descriptor() : details_(Smi::kZero) {}
+ Descriptor();
Handle<Name> GetKey() const { return key_; }
MaybeObjectHandle GetValue() const { return value_; }
@@ -31,8 +32,8 @@ class Descriptor final BASE_EMBEDDED {
void SetSortedKeyIndex(int index) { details_ = details_.set_pointer(index); }
- static Descriptor DataField(Handle<Name> key, int field_index,
- PropertyAttributes attributes,
+ static Descriptor DataField(Isolate* isolate, Handle<Name> key,
+ int field_index, PropertyAttributes attributes,
Representation representation);
static Descriptor DataField(Handle<Name> key, int field_index,
@@ -42,22 +43,14 @@ class Descriptor final BASE_EMBEDDED {
MaybeObjectHandle wrapped_field_type);
static Descriptor DataConstant(Handle<Name> key, Handle<Object> value,
- PropertyAttributes attributes) {
- return Descriptor(key, MaybeObjectHandle(value), kData, attributes,
- kDescriptor, PropertyConstness::kConst,
- value->OptimalRepresentation(), 0);
- }
-
- static Descriptor DataConstant(Handle<Name> key, int field_index,
- Handle<Object> value,
+ PropertyAttributes attributes);
+
+ static Descriptor DataConstant(Isolate* isolate, Handle<Name> key,
+ int field_index, Handle<Object> value,
PropertyAttributes attributes);
static Descriptor AccessorConstant(Handle<Name> key, Handle<Object> foreign,
- PropertyAttributes attributes) {
- return Descriptor(key, MaybeObjectHandle(foreign), kAccessor, attributes,
- kDescriptor, PropertyConstness::kConst,
- Representation::Tagged(), 0);
- }
+ PropertyAttributes attributes);
private:
Handle<Name> key_;
@@ -65,23 +58,13 @@ class Descriptor final BASE_EMBEDDED {
PropertyDetails details_;
protected:
- Descriptor(Handle<Name> key, MaybeObjectHandle value, PropertyDetails details)
- : key_(key), value_(value), details_(details) {
- DCHECK(key->IsUniqueName());
- DCHECK_IMPLIES(key->IsPrivate(), !details_.IsEnumerable());
- }
+ Descriptor(Handle<Name> key, MaybeObjectHandle value,
+ PropertyDetails details);
Descriptor(Handle<Name> key, MaybeObjectHandle value, PropertyKind kind,
PropertyAttributes attributes, PropertyLocation location,
PropertyConstness constness, Representation representation,
- int field_index)
- : key_(key),
- value_(value),
- details_(kind, attributes, location, constness, representation,
- field_index) {
- DCHECK(key->IsUniqueName());
- DCHECK_IMPLIES(key->IsPrivate(), !details_.IsEnumerable());
- }
+ int field_index);
friend class MapUpdater;
};
diff --git a/deps/v8/src/prototype-inl.h b/deps/v8/src/prototype-inl.h
new file mode 100644
index 0000000000..820d5756f1
--- /dev/null
+++ b/deps/v8/src/prototype-inl.h
@@ -0,0 +1,145 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROTOTYPE_INL_H_
+#define V8_PROTOTYPE_INL_H_
+
+#include "src/prototype.h"
+
+#include "src/handles-inl.h"
+#include "src/objects/map-inl.h"
+
+namespace v8 {
+namespace internal {
+
+PrototypeIterator::PrototypeIterator(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ WhereToStart where_to_start,
+ WhereToEnd where_to_end)
+ : isolate_(isolate),
+ object_(nullptr),
+ handle_(receiver),
+ where_to_end_(where_to_end),
+ is_at_end_(false),
+ seen_proxies_(0) {
+ CHECK(!handle_.is_null());
+ if (where_to_start == kStartAtPrototype) Advance();
+}
+
+PrototypeIterator::PrototypeIterator(Isolate* isolate, JSReceiver* receiver,
+ WhereToStart where_to_start,
+ WhereToEnd where_to_end)
+ : isolate_(isolate),
+ object_(receiver),
+ where_to_end_(where_to_end),
+ is_at_end_(false),
+ seen_proxies_(0) {
+ if (where_to_start == kStartAtPrototype) Advance();
+}
+
+PrototypeIterator::PrototypeIterator(Isolate* isolate, Map* receiver_map,
+ WhereToEnd where_to_end)
+ : isolate_(isolate),
+ object_(receiver_map->GetPrototypeChainRootMap(isolate_)->prototype()),
+ where_to_end_(where_to_end),
+ is_at_end_(object_->IsNull(isolate_)),
+ seen_proxies_(0) {
+ if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
+ DCHECK(object_->IsJSReceiver());
+ Map* map = JSReceiver::cast(object_)->map();
+ is_at_end_ = !map->has_hidden_prototype();
+ }
+}
+
+PrototypeIterator::PrototypeIterator(Isolate* isolate, Handle<Map> receiver_map,
+ WhereToEnd where_to_end)
+ : isolate_(isolate),
+ object_(nullptr),
+ handle_(receiver_map->GetPrototypeChainRootMap(isolate_)->prototype(),
+ isolate_),
+ where_to_end_(where_to_end),
+ is_at_end_(handle_->IsNull(isolate_)),
+ seen_proxies_(0) {
+ if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
+ DCHECK(handle_->IsJSReceiver());
+ Map* map = JSReceiver::cast(*handle_)->map();
+ is_at_end_ = !map->has_hidden_prototype();
+ }
+}
+
+bool PrototypeIterator::HasAccess() const {
+ // We can only perform access check in the handlified version of the
+ // PrototypeIterator.
+ DCHECK(!handle_.is_null());
+ if (handle_->IsAccessCheckNeeded()) {
+ return isolate_->MayAccess(handle(isolate_->context(), isolate_),
+ Handle<JSObject>::cast(handle_));
+ }
+ return true;
+}
+
+void PrototypeIterator::Advance() {
+ if (handle_.is_null() && object_->IsJSProxy()) {
+ is_at_end_ = true;
+ object_ = ReadOnlyRoots(isolate_).null_value();
+ return;
+ } else if (!handle_.is_null() && handle_->IsJSProxy()) {
+ is_at_end_ = true;
+ handle_ = isolate_->factory()->null_value();
+ return;
+ }
+ AdvanceIgnoringProxies();
+}
+
+void PrototypeIterator::AdvanceIgnoringProxies() {
+ Object* object = handle_.is_null() ? object_ : *handle_;
+ Map* map = HeapObject::cast(object)->map();
+
+ Object* prototype = map->prototype();
+ is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN ? !map->has_hidden_prototype()
+ : prototype->IsNull(isolate_);
+
+ if (handle_.is_null()) {
+ object_ = prototype;
+ } else {
+ handle_ = handle(prototype, isolate_);
+ }
+}
+
+V8_WARN_UNUSED_RESULT bool PrototypeIterator::AdvanceFollowingProxies() {
+ DCHECK(!(handle_.is_null() && object_->IsJSProxy()));
+ if (!HasAccess()) {
+ // Abort the lookup if we do not have access to the current object.
+ handle_ = isolate_->factory()->null_value();
+ is_at_end_ = true;
+ return true;
+ }
+ return AdvanceFollowingProxiesIgnoringAccessChecks();
+}
+
+V8_WARN_UNUSED_RESULT bool
+PrototypeIterator::AdvanceFollowingProxiesIgnoringAccessChecks() {
+ if (handle_.is_null() || !handle_->IsJSProxy()) {
+ AdvanceIgnoringProxies();
+ return true;
+ }
+
+ // Due to possible __proto__ recursion limit the number of Proxies
+ // we visit to an arbitrarily chosen large number.
+ seen_proxies_++;
+ if (seen_proxies_ > JSProxy::kMaxIterationLimit) {
+ isolate_->StackOverflow();
+ return false;
+ }
+ MaybeHandle<Object> proto =
+ JSProxy::GetPrototype(Handle<JSProxy>::cast(handle_));
+ if (!proto.ToHandle(&handle_)) return false;
+ is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN || handle_->IsNull(isolate_);
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PROTOTYPE_INL_H_
diff --git a/deps/v8/src/prototype.h b/deps/v8/src/prototype.h
index 71ae1ff9f1..d09a6c82a6 100644
--- a/deps/v8/src/prototype.h
+++ b/deps/v8/src/prototype.h
@@ -27,72 +27,23 @@ class PrototypeIterator {
public:
enum WhereToEnd { END_AT_NULL, END_AT_NON_HIDDEN };
- PrototypeIterator(Isolate* isolate, Handle<JSReceiver> receiver,
- WhereToStart where_to_start = kStartAtPrototype,
- WhereToEnd where_to_end = END_AT_NULL)
- : isolate_(isolate),
- object_(nullptr),
- handle_(receiver),
- where_to_end_(where_to_end),
- is_at_end_(false),
- seen_proxies_(0) {
- CHECK(!handle_.is_null());
- if (where_to_start == kStartAtPrototype) Advance();
- }
+ inline PrototypeIterator(Isolate* isolate, Handle<JSReceiver> receiver,
+ WhereToStart where_to_start = kStartAtPrototype,
+ WhereToEnd where_to_end = END_AT_NULL);
- PrototypeIterator(Isolate* isolate, JSReceiver* receiver,
- WhereToStart where_to_start = kStartAtPrototype,
- WhereToEnd where_to_end = END_AT_NULL)
- : isolate_(isolate),
- object_(receiver),
- where_to_end_(where_to_end),
- is_at_end_(false),
- seen_proxies_(0) {
- if (where_to_start == kStartAtPrototype) Advance();
- }
+ inline PrototypeIterator(Isolate* isolate, JSReceiver* receiver,
+ WhereToStart where_to_start = kStartAtPrototype,
+ WhereToEnd where_to_end = END_AT_NULL);
- explicit PrototypeIterator(Isolate* isolate, Map* receiver_map,
- WhereToEnd where_to_end = END_AT_NULL)
- : isolate_(isolate),
- object_(receiver_map->GetPrototypeChainRootMap(isolate_)->prototype()),
- where_to_end_(where_to_end),
- is_at_end_(object_->IsNull(isolate_)),
- seen_proxies_(0) {
- if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
- DCHECK(object_->IsJSReceiver());
- Map* map = JSReceiver::cast(object_)->map();
- is_at_end_ = !map->has_hidden_prototype();
- }
- }
+ inline explicit PrototypeIterator(Isolate* isolate, Map* receiver_map,
+ WhereToEnd where_to_end = END_AT_NULL);
- explicit PrototypeIterator(Isolate* isolate, Handle<Map> receiver_map,
- WhereToEnd where_to_end = END_AT_NULL)
- : isolate_(isolate),
- object_(nullptr),
- handle_(receiver_map->GetPrototypeChainRootMap(isolate_)->prototype(),
- isolate_),
- where_to_end_(where_to_end),
- is_at_end_(handle_->IsNull(isolate_)),
- seen_proxies_(0) {
- if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
- DCHECK(handle_->IsJSReceiver());
- Map* map = JSReceiver::cast(*handle_)->map();
- is_at_end_ = !map->has_hidden_prototype();
- }
- }
+ inline explicit PrototypeIterator(Isolate* isolate, Handle<Map> receiver_map,
+ WhereToEnd where_to_end = END_AT_NULL);
~PrototypeIterator() {}
- bool HasAccess() const {
- // We can only perform access check in the handlified version of the
- // PrototypeIterator.
- DCHECK(!handle_.is_null());
- if (handle_->IsAccessCheckNeeded()) {
- return isolate_->MayAccess(handle(isolate_->context(), isolate_),
- Handle<JSObject>::cast(handle_));
- }
- return true;
- }
+ inline bool HasAccess() const;
template <typename T = Object>
T* GetCurrent() const {
@@ -107,67 +58,15 @@ class PrototypeIterator {
return Handle<T>::cast(iterator.handle_);
}
- void Advance() {
- if (handle_.is_null() && object_->IsJSProxy()) {
- is_at_end_ = true;
- object_ = ReadOnlyRoots(isolate_).null_value();
- return;
- } else if (!handle_.is_null() && handle_->IsJSProxy()) {
- is_at_end_ = true;
- handle_ = isolate_->factory()->null_value();
- return;
- }
- AdvanceIgnoringProxies();
- }
+ inline void Advance();
- void AdvanceIgnoringProxies() {
- Object* object = handle_.is_null() ? object_ : *handle_;
- Map* map = HeapObject::cast(object)->map();
-
- Object* prototype = map->prototype();
- is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN
- ? !map->has_hidden_prototype()
- : prototype->IsNull(isolate_);
-
- if (handle_.is_null()) {
- object_ = prototype;
- } else {
- handle_ = handle(prototype, isolate_);
- }
- }
+ inline void AdvanceIgnoringProxies();
// Returns false iff a call to JSProxy::GetPrototype throws.
- V8_WARN_UNUSED_RESULT bool AdvanceFollowingProxies() {
- DCHECK(!(handle_.is_null() && object_->IsJSProxy()));
- if (!HasAccess()) {
- // Abort the lookup if we do not have access to the current object.
- handle_ = isolate_->factory()->null_value();
- is_at_end_ = true;
- return true;
- }
- return AdvanceFollowingProxiesIgnoringAccessChecks();
- }
+ V8_WARN_UNUSED_RESULT inline bool AdvanceFollowingProxies();
- V8_WARN_UNUSED_RESULT bool AdvanceFollowingProxiesIgnoringAccessChecks() {
- if (handle_.is_null() || !handle_->IsJSProxy()) {
- AdvanceIgnoringProxies();
- return true;
- }
-
- // Due to possible __proto__ recursion limit the number of Proxies
- // we visit to an arbitrarily chosen large number.
- seen_proxies_++;
- if (seen_proxies_ > JSProxy::kMaxIterationLimit) {
- isolate_->StackOverflow();
- return false;
- }
- MaybeHandle<Object> proto =
- JSProxy::GetPrototype(Handle<JSProxy>::cast(handle_));
- if (!proto.ToHandle(&handle_)) return false;
- is_at_end_ =
- where_to_end_ == END_AT_NON_HIDDEN || handle_->IsNull(isolate_);
- return true;
- }
+ V8_WARN_UNUSED_RESULT inline bool
+ AdvanceFollowingProxiesIgnoringAccessChecks();
bool IsAtEnd() const { return is_at_end_; }
Isolate* isolate() const { return isolate_; }
diff --git a/deps/v8/src/regexp/arm/OWNERS b/deps/v8/src/regexp/arm/OWNERS
deleted file mode 100644
index 906a5ce641..0000000000
--- a/deps/v8/src/regexp/arm/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-rmcilroy@chromium.org
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index d2a20f3af7..f77d521728 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -1080,7 +1080,7 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState() {
// Helper function for reading a value out of a stack frame.
template <typename T>
static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+ return reinterpret_cast<T&>(Memory<int32_t>(re_frame + frame_offset));
}
diff --git a/deps/v8/src/regexp/arm64/OWNERS b/deps/v8/src/regexp/arm64/OWNERS
deleted file mode 100644
index 906a5ce641..0000000000
--- a/deps/v8/src/regexp/arm64/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-rmcilroy@chromium.org
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index cd84329a78..0d479cacb2 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -585,7 +585,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
ExternalReference word_map =
ExternalReference::re_word_character_map(isolate());
__ test_b(current_character(),
- Operand::StaticArray(current_character(), times_1, word_map));
+ masm_->StaticArray(current_character(), times_1, word_map));
BranchOrBacktrack(zero, on_no_match);
return true;
}
@@ -600,7 +600,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
ExternalReference word_map =
ExternalReference::re_word_character_map(isolate());
__ test_b(current_character(),
- Operand::StaticArray(current_character(), times_1, word_map));
+ masm_->StaticArray(current_character(), times_1, word_map));
BranchOrBacktrack(not_zero, on_no_match);
if (mode_ != LATIN1) {
__ bind(&done);
@@ -681,7 +681,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ mov(ecx, esp);
- __ sub(ecx, Operand::StaticVariable(stack_limit));
+ __ sub(ecx, masm_->StaticVariable(stack_limit));
// Handle it if the stack pointer is already below the stack limit.
__ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
@@ -1108,7 +1108,7 @@ void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
// Helper function for reading a value out of a stack frame.
template <typename T>
static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+ return reinterpret_cast<T&>(Memory<int32_t>(re_frame + frame_offset));
}
@@ -1219,7 +1219,7 @@ void RegExpMacroAssemblerIA32::CheckPreemption() {
Label no_preempt;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ cmp(esp, masm_->StaticVariable(stack_limit));
__ j(above, &no_preempt);
SafeCall(&check_preempt_label_);
@@ -1232,7 +1232,7 @@ void RegExpMacroAssemblerIA32::CheckStackLimit() {
Label no_stack_overflow;
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(isolate());
- __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
+ __ cmp(backtrack_stackpointer(), masm_->StaticVariable(stack_limit));
__ j(above, &no_stack_overflow);
SafeCall(&stack_overflow_label_);
diff --git a/deps/v8/src/regexp/mips/OWNERS b/deps/v8/src/regexp/mips/OWNERS
index 4ce9d7f91d..8bbcab4c2d 100644
--- a/deps/v8/src/regexp/mips/OWNERS
+++ b/deps/v8/src/regexp/mips/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com \ No newline at end of file
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com \ No newline at end of file
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index e16fbd6568..36ac93275e 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -1143,7 +1143,7 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
// Helper function for reading a value out of a stack frame.
template <typename T>
static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+ return reinterpret_cast<T&>(Memory<int32_t>(re_frame + frame_offset));
}
diff --git a/deps/v8/src/regexp/mips64/OWNERS b/deps/v8/src/regexp/mips64/OWNERS
index 4ce9d7f91d..8bbcab4c2d 100644
--- a/deps/v8/src/regexp/mips64/OWNERS
+++ b/deps/v8/src/regexp/mips64/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com \ No newline at end of file
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com \ No newline at end of file
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 40ac387c4e..17a8ce8752 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -1181,7 +1181,7 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
// Helper function for reading a value out of a stack frame.
template <typename T>
static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+ return reinterpret_cast<T&>(Memory<int32_t>(re_frame + frame_offset));
}
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index fdda46424e..494422074c 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -1143,7 +1143,7 @@ void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
// Helper function for reading a value out of a stack frame.
template <typename T>
static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+ return reinterpret_cast<T&>(Memory<int32_t>(re_frame + frame_offset));
}
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index 1f89844f10..c787a50297 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -166,8 +166,7 @@ bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
return last_index->IsSmi() && Smi::ToInt(last_index) >= 0;
}
-uint64_t RegExpUtils::AdvanceStringIndex(Isolate* isolate,
- Handle<String> string, uint64_t index,
+uint64_t RegExpUtils::AdvanceStringIndex(Handle<String> string, uint64_t index,
bool unicode) {
DCHECK_LE(static_cast<double>(index), kMaxSafeInteger);
const uint64_t string_length = static_cast<uint64_t>(string->length());
@@ -199,7 +198,7 @@ MaybeHandle<Object> RegExpUtils::SetAdvancedStringIndex(
Object::ToLength(isolate, last_index_obj), Object);
const uint64_t last_index = PositiveNumberToUint64(*last_index_obj);
const uint64_t new_last_index =
- AdvanceStringIndex(isolate, string, last_index, unicode);
+ AdvanceStringIndex(string, last_index, unicode);
return SetLastIndex(isolate, regexp, new_last_index);
}
diff --git a/deps/v8/src/regexp/regexp-utils.h b/deps/v8/src/regexp/regexp-utils.h
index 7508403e01..8fc6607d98 100644
--- a/deps/v8/src/regexp/regexp-utils.h
+++ b/deps/v8/src/regexp/regexp-utils.h
@@ -41,8 +41,8 @@ class RegExpUtils : public AllStatic {
// ES#sec-advancestringindex
// AdvanceStringIndex ( S, index, unicode )
- static uint64_t AdvanceStringIndex(Isolate* isolate, Handle<String> string,
- uint64_t index, bool unicode);
+ static uint64_t AdvanceStringIndex(Handle<String> string, uint64_t index,
+ bool unicode);
static V8_WARN_UNUSED_RESULT MaybeHandle<Object> SetAdvancedStringIndex(
Isolate* isolate, Handle<JSReceiver> regexp, Handle<String> string,
bool unicode);
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index 837d5639cc..3db1ebc421 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -1089,9 +1089,9 @@ template <typename T>
static T& frame_entry(Address re_frame, int frame_offset) {
DCHECK_EQ(kPointerSize, sizeof(T));
#ifdef V8_TARGET_ARCH_S390X
- return reinterpret_cast<T&>(Memory::uint64_at(re_frame + frame_offset));
+ return reinterpret_cast<T&>(Memory<uint64_t>(re_frame + frame_offset));
#else
- return reinterpret_cast<T&>(Memory::uint32_at(re_frame + frame_offset));
+ return reinterpret_cast<T&>(Memory<uint32_t>(re_frame + frame_offset));
#endif
}
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index c032ca1ff8..43f80767ea 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -99,7 +99,7 @@ RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(Isolate* isolate, Zone* zone,
: NativeRegExpMacroAssembler(isolate, zone),
masm_(isolate, nullptr, kRegExpCodeSize, CodeObjectRequired::kYes),
no_root_array_scope_(&masm_),
- code_relative_fixup_positions_(4, zone),
+ code_relative_fixup_positions_(zone),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -1208,7 +1208,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
// Helper function for reading a value out of a stack frame.
template <typename T>
static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+ return reinterpret_cast<T&>(Memory<int32_t>(re_frame + frame_offset));
}
@@ -1304,8 +1304,7 @@ void RegExpMacroAssemblerX64::Push(Immediate value) {
void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
- for (int i = 0, n = code_relative_fixup_positions_.length(); i < n; i++) {
- int position = code_relative_fixup_positions_[i];
+ for (int position : code_relative_fixup_positions_) {
// The position succeeds a relative label offset from position.
// Patch the relative offset to be relative to the Code object pointer
// instead.
@@ -1317,7 +1316,7 @@ void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
+ Code::kHeaderSize
- kHeapObjectTag);
}
- code_relative_fixup_positions_.Clear();
+ code_relative_fixup_positions_.Rewind(0);
}
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index 365bbaa6b2..1cf2f73ac3 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -8,6 +8,7 @@
#include "src/macro-assembler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/x64/assembler-x64.h"
+#include "src/zone/zone-chunk-list.h"
namespace v8 {
namespace internal {
@@ -215,7 +216,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
void BranchOrBacktrack(Condition condition, Label* to);
void MarkPositionForCodeRelativeFixup() {
- code_relative_fixup_positions_.Add(masm_.pc_offset(), zone());
+ code_relative_fixup_positions_.push_back(masm_.pc_offset());
}
void FixupCodeRelativePositions();
@@ -254,7 +255,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
MacroAssembler masm_;
NoRootArrayScope no_root_array_scope_;
- ZoneList<int> code_relative_fixup_positions_;
+ ZoneChunkList<int> code_relative_fixup_positions_;
// Which mode to generate code for (LATIN1 or UC16).
Mode mode_;
diff --git a/deps/v8/src/register-configuration.cc b/deps/v8/src/register-configuration.cc
index 335de1a053..1c4831ef75 100644
--- a/deps/v8/src/register-configuration.cc
+++ b/deps/v8/src/register-configuration.cc
@@ -166,6 +166,55 @@ static base::LazyInstance<ArchDefaultPoisoningRegisterConfiguration,
PoisoningRegisterConfigurationInitializer>::type
kDefaultPoisoningRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
+#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
+// Allocatable registers with the root register removed.
+// TODO(v8:6666): Once all builtins have been migrated, we could remove this
+// configuration and remove kRootRegister from ALLOCATABLE_GENERAL_REGISTERS
+// instead.
+class ArchPreserveRootIA32RegisterConfiguration : public RegisterConfiguration {
+ public:
+ ArchPreserveRootIA32RegisterConfiguration()
+ : RegisterConfiguration(
+ Register::kNumRegisters, DoubleRegister::kNumRegisters,
+ kMaxAllocatableGeneralRegisterCount - 1,
+ get_num_allocatable_double_registers(),
+ InitializeGeneralRegisterCodes(), get_allocatable_double_codes(),
+ kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE,
+ kGeneralRegisterNames, kFloatRegisterNames, kDoubleRegisterNames,
+ kSimd128RegisterNames) {}
+
+ private:
+ static const int* InitializeGeneralRegisterCodes() {
+ int filtered_index = 0;
+ for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) {
+ if (kAllocatableGeneralCodes[i] != kRootRegister.code()) {
+ allocatable_general_codes_[filtered_index] =
+ kAllocatableGeneralCodes[i];
+ filtered_index++;
+ }
+ }
+ DCHECK_EQ(filtered_index, kMaxAllocatableGeneralRegisterCount - 1);
+ return allocatable_general_codes_;
+ }
+
+ static int
+ allocatable_general_codes_[kMaxAllocatableGeneralRegisterCount - 1];
+};
+
+int ArchPreserveRootIA32RegisterConfiguration::allocatable_general_codes_
+ [kMaxAllocatableGeneralRegisterCount - 1];
+
+struct PreserveRootIA32RegisterConfigurationInitializer {
+ static void Construct(void* config) {
+ new (config) ArchPreserveRootIA32RegisterConfiguration();
+ }
+};
+
+static base::LazyInstance<ArchPreserveRootIA32RegisterConfiguration,
+ PreserveRootIA32RegisterConfigurationInitializer>::
+ type kPreserveRootIA32RegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
+#endif // defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
+
// RestrictedRegisterConfiguration uses the subset of allocatable general
// registers the architecture support, which results into generating assembly
// to use less registers. Currently, it's only used by RecordWrite code stub.
@@ -182,8 +231,8 @@ class RestrictedRegisterConfiguration : public RegisterConfiguration {
allocatable_general_register_codes.get(),
get_allocatable_double_codes(),
kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE,
- allocatable_general_register_names.get(), kFloatRegisterNames,
- kDoubleRegisterNames, kSimd128RegisterNames),
+ kGeneralRegisterNames, kFloatRegisterNames, kDoubleRegisterNames,
+ kSimd128RegisterNames),
allocatable_general_register_codes_(
std::move(allocatable_general_register_codes)),
allocatable_general_register_names_(
@@ -218,6 +267,12 @@ const RegisterConfiguration* RegisterConfiguration::Poisoning() {
return &kDefaultPoisoningRegisterConfiguration.Get();
}
+#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
+const RegisterConfiguration* RegisterConfiguration::PreserveRootIA32() {
+ return &kPreserveRootIA32RegisterConfiguration.Get();
+}
+#endif // defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
+
const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
RegList registers) {
int num = NumRegs(registers);
diff --git a/deps/v8/src/register-configuration.h b/deps/v8/src/register-configuration.h
index ad413cc18a..538c3331ec 100644
--- a/deps/v8/src/register-configuration.h
+++ b/deps/v8/src/register-configuration.h
@@ -34,6 +34,9 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
// Register configuration with reserved masking register.
static const RegisterConfiguration* Poisoning();
+ // Register configuration with reserved root register on ia32.
+ static const RegisterConfiguration* PreserveRootIA32();
+
static const RegisterConfiguration* RestrictGeneralRegisters(
RegList registers);
diff --git a/deps/v8/src/reloc-info.cc b/deps/v8/src/reloc-info.cc
new file mode 100644
index 0000000000..ec4a1c679d
--- /dev/null
+++ b/deps/v8/src/reloc-info.cc
@@ -0,0 +1,540 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/reloc-info.h"
+
+#include "src/assembler-arch-inl.h"
+#include "src/code-stubs.h"
+#include "src/deoptimize-reason.h"
+#include "src/deoptimizer.h"
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/code-inl.h"
+#include "src/snapshot/snapshot.h"
+
+namespace v8 {
+namespace internal {
+
+const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfoWriter and RelocIterator
+//
+// Relocation information is written backwards in memory, from high addresses
+// towards low addresses, byte by byte. Therefore, in the encodings listed
+// below, the first byte listed it at the highest address, and successive
+// bytes in the record are at progressively lower addresses.
+//
+// Encoding
+//
+// The most common modes are given single-byte encodings. Also, it is
+// easy to identify the type of reloc info and skip unwanted modes in
+// an iteration.
+//
+// The encoding relies on the fact that there are fewer than 14
+// different relocation modes using standard non-compact encoding.
+//
+// The first byte of a relocation record has a tag in its low 2 bits:
+// Here are the record schemes, depending on the low tag and optional higher
+// tags.
+//
+// Low tag:
+// 00: embedded_object: [6-bit pc delta] 00
+//
+// 01: code_target: [6-bit pc delta] 01
+//
+// 10: wasm_stub_call: [6-bit pc delta] 10
+//
+// 11: long_record [6 bit reloc mode] 11
+// followed by pc delta
+// followed by optional data depending on type.
+//
+// If a pc delta exceeds 6 bits, it is split into a remainder that fits into
+// 6 bits and a part that does not. The latter is encoded as a long record
+// with PC_JUMP as pseudo reloc info mode. The former is encoded as part of
+// the following record in the usual way. The long pc jump record has variable
+// length:
+// pc-jump: [PC_JUMP] 11
+// [7 bits data] 0
+// ...
+// [7 bits data] 1
+// (Bits 6..31 of pc delta, with leading zeroes
+// dropped, and last non-zero chunk tagged with 1.)
+
+const int kTagBits = 2;
+const int kTagMask = (1 << kTagBits) - 1;
+const int kLongTagBits = 6;
+
+const int kEmbeddedObjectTag = 0;
+const int kCodeTargetTag = 1;
+const int kWasmStubCallTag = 2;
+const int kDefaultTag = 3;
+
+const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
+const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
+const int RelocInfo::kMaxSmallPCDelta = kSmallPCDeltaMask;
+
+const int kChunkBits = 7;
+const int kChunkMask = (1 << kChunkBits) - 1;
+const int kLastChunkTagBits = 1;
+const int kLastChunkTagMask = 1;
+const int kLastChunkTag = 1;
+
+uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
+ // Return if the pc_delta can fit in kSmallPCDeltaBits bits.
+ // Otherwise write a variable length PC jump for the bits that do
+ // not fit in the kSmallPCDeltaBits bits.
+ if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
+ WriteMode(RelocInfo::PC_JUMP);
+ uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
+ DCHECK_GT(pc_jump, 0);
+ // Write kChunkBits size chunks of the pc_jump.
+ for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) {
+ byte b = pc_jump & kChunkMask;
+ *--pos_ = b << kLastChunkTagBits;
+ }
+ // Tag the last chunk so it can be identified.
+ *pos_ = *pos_ | kLastChunkTag;
+ // Return the remaining kSmallPCDeltaBits of the pc_delta.
+ return pc_delta & kSmallPCDeltaMask;
+}
+
+void RelocInfoWriter::WriteShortTaggedPC(uint32_t pc_delta, int tag) {
+ // Write a byte of tagged pc-delta, possibly preceded by an explicit pc-jump.
+ pc_delta = WriteLongPCJump(pc_delta);
+ *--pos_ = pc_delta << kTagBits | tag;
+}
+
+void RelocInfoWriter::WriteShortData(intptr_t data_delta) {
+ *--pos_ = static_cast<byte>(data_delta);
+}
+
+void RelocInfoWriter::WriteMode(RelocInfo::Mode rmode) {
+ STATIC_ASSERT(RelocInfo::NUMBER_OF_MODES <= (1 << kLongTagBits));
+ *--pos_ = static_cast<int>((rmode << kTagBits) | kDefaultTag);
+}
+
+void RelocInfoWriter::WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode) {
+ // Write two-byte tagged pc-delta, possibly preceded by var. length pc-jump.
+ pc_delta = WriteLongPCJump(pc_delta);
+ WriteMode(rmode);
+ *--pos_ = pc_delta;
+}
+
+void RelocInfoWriter::WriteIntData(int number) {
+ for (int i = 0; i < kIntSize; i++) {
+ *--pos_ = static_cast<byte>(number);
+ // Signed right shift is arithmetic shift. Tested in test-utils.cc.
+ number = number >> kBitsPerByte;
+ }
+}
+
+void RelocInfoWriter::WriteData(intptr_t data_delta) {
+ for (int i = 0; i < kIntptrSize; i++) {
+ *--pos_ = static_cast<byte>(data_delta);
+ // Signed right shift is arithmetic shift. Tested in test-utils.cc.
+ data_delta = data_delta >> kBitsPerByte;
+ }
+}
+
+void RelocInfoWriter::Write(const RelocInfo* rinfo) {
+ RelocInfo::Mode rmode = rinfo->rmode();
+#ifdef DEBUG
+ byte* begin_pos = pos_;
+#endif
+ DCHECK(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
+ DCHECK_GE(rinfo->pc() - reinterpret_cast<Address>(last_pc_), 0);
+ // Use unsigned delta-encoding for pc.
+ uint32_t pc_delta =
+ static_cast<uint32_t>(rinfo->pc() - reinterpret_cast<Address>(last_pc_));
+
+ // The two most common modes are given small tags, and usually fit in a byte.
+ if (rmode == RelocInfo::EMBEDDED_OBJECT) {
+ WriteShortTaggedPC(pc_delta, kEmbeddedObjectTag);
+ } else if (rmode == RelocInfo::CODE_TARGET) {
+ WriteShortTaggedPC(pc_delta, kCodeTargetTag);
+ DCHECK_LE(begin_pos - pos_, RelocInfo::kMaxCallSize);
+ } else if (rmode == RelocInfo::WASM_STUB_CALL) {
+ WriteShortTaggedPC(pc_delta, kWasmStubCallTag);
+ } else {
+ WriteModeAndPC(pc_delta, rmode);
+ if (RelocInfo::IsComment(rmode)) {
+ WriteData(rinfo->data());
+ } else if (RelocInfo::IsDeoptReason(rmode)) {
+ DCHECK_LT(rinfo->data(), 1 << kBitsPerByte);
+ WriteShortData(rinfo->data());
+ } else if (RelocInfo::IsConstPool(rmode) ||
+ RelocInfo::IsVeneerPool(rmode) || RelocInfo::IsDeoptId(rmode) ||
+ RelocInfo::IsDeoptPosition(rmode)) {
+ WriteIntData(static_cast<int>(rinfo->data()));
+ }
+ }
+ last_pc_ = reinterpret_cast<byte*>(rinfo->pc());
+#ifdef DEBUG
+ DCHECK_LE(begin_pos - pos_, kMaxSize);
+#endif
+}
+
+inline int RelocIterator::AdvanceGetTag() { return *--pos_ & kTagMask; }
+
+inline RelocInfo::Mode RelocIterator::GetMode() {
+ return static_cast<RelocInfo::Mode>((*pos_ >> kTagBits) &
+ ((1 << kLongTagBits) - 1));
+}
+
+inline void RelocIterator::ReadShortTaggedPC() {
+ rinfo_.pc_ += *pos_ >> kTagBits;
+}
+
+inline void RelocIterator::AdvanceReadPC() { rinfo_.pc_ += *--pos_; }
+
+void RelocIterator::AdvanceReadInt() {
+ int x = 0;
+ for (int i = 0; i < kIntSize; i++) {
+ x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
+ }
+ rinfo_.data_ = x;
+}
+
+void RelocIterator::AdvanceReadData() {
+ intptr_t x = 0;
+ for (int i = 0; i < kIntptrSize; i++) {
+ x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte;
+ }
+ rinfo_.data_ = x;
+}
+
+void RelocIterator::AdvanceReadLongPCJump() {
+ // Read the 32-kSmallPCDeltaBits most significant bits of the
+ // pc jump in kChunkBits bit chunks and shift them into place.
+ // Stop when the last chunk is encountered.
+ uint32_t pc_jump = 0;
+ for (int i = 0; i < kIntSize; i++) {
+ byte pc_jump_part = *--pos_;
+ pc_jump |= (pc_jump_part >> kLastChunkTagBits) << i * kChunkBits;
+ if ((pc_jump_part & kLastChunkTagMask) == 1) break;
+ }
+ // The least significant kSmallPCDeltaBits bits will be added
+ // later.
+ rinfo_.pc_ += pc_jump << kSmallPCDeltaBits;
+}
+
+inline void RelocIterator::ReadShortData() {
+ uint8_t unsigned_b = *pos_;
+ rinfo_.data_ = unsigned_b;
+}
+
+void RelocIterator::next() {
+ DCHECK(!done());
+ // Basically, do the opposite of RelocInfoWriter::Write.
+ // Reading of data is as far as possible avoided for unwanted modes,
+ // but we must always update the pc.
+ //
+ // We exit this loop by returning when we find a mode we want.
+ while (pos_ > end_) {
+ int tag = AdvanceGetTag();
+ if (tag == kEmbeddedObjectTag) {
+ ReadShortTaggedPC();
+ if (SetMode(RelocInfo::EMBEDDED_OBJECT)) return;
+ } else if (tag == kCodeTargetTag) {
+ ReadShortTaggedPC();
+ if (SetMode(RelocInfo::CODE_TARGET)) return;
+ } else if (tag == kWasmStubCallTag) {
+ ReadShortTaggedPC();
+ if (SetMode(RelocInfo::WASM_STUB_CALL)) return;
+ } else {
+ DCHECK_EQ(tag, kDefaultTag);
+ RelocInfo::Mode rmode = GetMode();
+ if (rmode == RelocInfo::PC_JUMP) {
+ AdvanceReadLongPCJump();
+ } else {
+ AdvanceReadPC();
+ if (RelocInfo::IsComment(rmode)) {
+ if (SetMode(rmode)) {
+ AdvanceReadData();
+ return;
+ }
+ Advance(kIntptrSize);
+ } else if (RelocInfo::IsDeoptReason(rmode)) {
+ Advance();
+ if (SetMode(rmode)) {
+ ReadShortData();
+ return;
+ }
+ } else if (RelocInfo::IsConstPool(rmode) ||
+ RelocInfo::IsVeneerPool(rmode) ||
+ RelocInfo::IsDeoptId(rmode) ||
+ RelocInfo::IsDeoptPosition(rmode)) {
+ if (SetMode(rmode)) {
+ AdvanceReadInt();
+ return;
+ }
+ Advance(kIntSize);
+ } else if (SetMode(static_cast<RelocInfo::Mode>(rmode))) {
+ return;
+ }
+ }
+ }
+ }
+ done_ = true;
+}
+
+RelocIterator::RelocIterator(Code* code, int mode_mask)
+ : RelocIterator(code, code->raw_instruction_start(), code->constant_pool(),
+ code->relocation_end(), code->relocation_start(),
+ mode_mask) {}
+
+RelocIterator::RelocIterator(const CodeReference code_reference, int mode_mask)
+ : RelocIterator(nullptr, code_reference.instruction_start(),
+ code_reference.constant_pool(),
+ code_reference.relocation_end(),
+ code_reference.relocation_start(), mode_mask) {}
+
+RelocIterator::RelocIterator(EmbeddedData* embedded_data, Code* code,
+ int mode_mask)
+ : RelocIterator(
+ code, embedded_data->InstructionStartOfBuiltin(code->builtin_index()),
+ code->constant_pool(),
+ code->relocation_start() + code->relocation_size(),
+ code->relocation_start(), mode_mask) {}
+
+RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
+ : RelocIterator(nullptr, reinterpret_cast<Address>(desc.buffer), 0,
+ desc.buffer + desc.buffer_size,
+ desc.buffer + desc.buffer_size - desc.reloc_size,
+ mode_mask) {}
+
+RelocIterator::RelocIterator(Vector<byte> instructions,
+ Vector<const byte> reloc_info, Address const_pool,
+ int mode_mask)
+ : RelocIterator(nullptr, reinterpret_cast<Address>(instructions.start()),
+ const_pool, reloc_info.start() + reloc_info.size(),
+ reloc_info.start(), mode_mask) {}
+
+RelocIterator::RelocIterator(Code* host, Address pc, Address constant_pool,
+ const byte* pos, const byte* end, int mode_mask)
+ : pos_(pos), end_(end), mode_mask_(mode_mask) {
+ // Relocation info is read backwards.
+ DCHECK_GE(pos_, end_);
+ rinfo_.host_ = host;
+ rinfo_.pc_ = pc;
+ rinfo_.constant_pool_ = constant_pool;
+ if (mode_mask_ == 0) pos_ = end_;
+ next();
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+// static
+bool RelocInfo::OffHeapTargetIsCodedSpecially() {
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64) || \
+ defined(V8_TARGET_ARCH_X64)
+ return false;
+#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \
+ defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \
+ defined(V8_TARGET_ARCH_S390)
+ return true;
+#endif
+}
+
+Address RelocInfo::wasm_call_address() const {
+ DCHECK_EQ(rmode_, WASM_CALL);
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+void RelocInfo::set_wasm_call_address(Address address,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK_EQ(rmode_, WASM_CALL);
+ Assembler::set_target_address_at(pc_, constant_pool_, address,
+ icache_flush_mode);
+}
+
+Address RelocInfo::wasm_stub_call_address() const {
+ DCHECK_EQ(rmode_, WASM_STUB_CALL);
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+void RelocInfo::set_wasm_stub_call_address(Address address,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK_EQ(rmode_, WASM_STUB_CALL);
+ Assembler::set_target_address_at(pc_, constant_pool_, address,
+ icache_flush_mode);
+}
+
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
+ IsWasmCall(rmode_));
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
+ IsCodeTargetMode(rmode_)) {
+ Code* target_code = Code::GetCodeFromTargetAddress(target);
+ MarkingBarrierForCode(host(), this, target_code);
+ }
+}
+
+bool RelocInfo::RequiresRelocationAfterCodegen(const CodeDesc& desc) {
+ RelocIterator it(desc, RelocInfo::PostCodegenRelocationMask());
+ return !it.done();
+}
+
+bool RelocInfo::RequiresRelocation(Code* code) {
+ RelocIterator it(code, RelocInfo::kApplyMask);
+ return !it.done();
+}
+
+#ifdef ENABLE_DISASSEMBLER
+const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
+ switch (rmode) {
+ case NONE:
+ return "no reloc";
+ case EMBEDDED_OBJECT:
+ return "embedded object";
+ case CODE_TARGET:
+ return "code target";
+ case RELATIVE_CODE_TARGET:
+ return "relative code target";
+ case RUNTIME_ENTRY:
+ return "runtime entry";
+ case COMMENT:
+ return "comment";
+ case EXTERNAL_REFERENCE:
+ return "external reference";
+ case INTERNAL_REFERENCE:
+ return "internal reference";
+ case INTERNAL_REFERENCE_ENCODED:
+ return "encoded internal reference";
+ case OFF_HEAP_TARGET:
+ return "off heap target";
+ case DEOPT_SCRIPT_OFFSET:
+ return "deopt script offset";
+ case DEOPT_INLINING_ID:
+ return "deopt inlining id";
+ case DEOPT_REASON:
+ return "deopt reason";
+ case DEOPT_ID:
+ return "deopt index";
+ case CONST_POOL:
+ return "constant pool";
+ case VENEER_POOL:
+ return "veneer pool";
+ case WASM_CALL:
+ return "internal wasm call";
+ case WASM_STUB_CALL:
+ return "wasm stub call";
+ case JS_TO_WASM_CALL:
+ return "js to wasm call";
+ case NUMBER_OF_MODES:
+ case PC_JUMP:
+ UNREACHABLE();
+ }
+ return "unknown relocation type";
+}
+
+void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
+ os << reinterpret_cast<const void*>(pc_) << " " << RelocModeName(rmode_);
+ if (IsComment(rmode_)) {
+ os << " (" << reinterpret_cast<char*>(data_) << ")";
+ } else if (rmode_ == DEOPT_SCRIPT_OFFSET || rmode_ == DEOPT_INLINING_ID) {
+ os << " (" << data() << ")";
+ } else if (rmode_ == DEOPT_REASON) {
+ os << " ("
+ << DeoptimizeReasonToString(static_cast<DeoptimizeReason>(data_)) << ")";
+ } else if (rmode_ == EMBEDDED_OBJECT) {
+ os << " (" << Brief(target_object()) << ")";
+ } else if (rmode_ == EXTERNAL_REFERENCE) {
+ if (isolate) {
+ ExternalReferenceEncoder ref_encoder(isolate);
+ os << " ("
+ << ref_encoder.NameOfAddress(isolate, target_external_reference())
+ << ") ";
+ }
+ os << " (" << reinterpret_cast<const void*>(target_external_reference())
+ << ")";
+ } else if (IsCodeTargetMode(rmode_)) {
+ const Address code_target = target_address();
+ Code* code = Code::GetCodeFromTargetAddress(code_target);
+ DCHECK(code->IsCode());
+ os << " (" << Code::Kind2String(code->kind());
+ if (Builtins::IsBuiltin(code)) {
+ os << " " << Builtins::name(code->builtin_index());
+ } else if (code->kind() == Code::STUB) {
+ os << " " << CodeStub::MajorName(CodeStub::GetMajorKey(code));
+ }
+ os << ") (" << reinterpret_cast<const void*>(target_address()) << ")";
+ } else if (IsRuntimeEntry(rmode_) && isolate->deoptimizer_data() != nullptr) {
+ // Deoptimization bailouts are stored as runtime entries.
+ DeoptimizeKind type;
+ if (Deoptimizer::IsDeoptimizationEntry(isolate, target_address(), &type)) {
+ int id = GetDeoptimizationId(isolate, type);
+ os << " (" << Deoptimizer::MessageFor(type) << " deoptimization bailout "
+ << id << ")";
+ }
+ } else if (IsConstPool(rmode_)) {
+ os << " (size " << static_cast<int>(data_) << ")";
+ }
+
+ os << "\n";
+}
+#endif // ENABLE_DISASSEMBLER
+
+#ifdef VERIFY_HEAP
+void RelocInfo::Verify(Isolate* isolate) {
+ switch (rmode_) {
+ case EMBEDDED_OBJECT:
+ Object::VerifyPointer(isolate, target_object());
+ break;
+ case CODE_TARGET:
+ case RELATIVE_CODE_TARGET: {
+ // convert inline target address to code object
+ Address addr = target_address();
+ CHECK_NE(addr, kNullAddress);
+ // Check that we can find the right code object.
+ Code* code = Code::GetCodeFromTargetAddress(addr);
+ Object* found = isolate->FindCodeObject(addr);
+ CHECK(found->IsCode());
+ CHECK(code->address() == HeapObject::cast(found)->address());
+ break;
+ }
+ case INTERNAL_REFERENCE:
+ case INTERNAL_REFERENCE_ENCODED: {
+ Address target = target_internal_reference();
+ Address pc = target_internal_reference_address();
+ Code* code = Code::cast(isolate->FindCodeObject(pc));
+ CHECK(target >= code->InstructionStart());
+ CHECK(target <= code->InstructionEnd());
+ break;
+ }
+ case OFF_HEAP_TARGET: {
+ Address addr = target_off_heap_target();
+ CHECK_NE(addr, kNullAddress);
+ CHECK_NOT_NULL(InstructionStream::TryLookupCode(isolate, addr));
+ break;
+ }
+ case RUNTIME_ENTRY:
+ case COMMENT:
+ case EXTERNAL_REFERENCE:
+ case DEOPT_SCRIPT_OFFSET:
+ case DEOPT_INLINING_ID:
+ case DEOPT_REASON:
+ case DEOPT_ID:
+ case CONST_POOL:
+ case VENEER_POOL:
+ case WASM_CALL:
+ case WASM_STUB_CALL:
+ case JS_TO_WASM_CALL:
+ case NONE:
+ break;
+ case NUMBER_OF_MODES:
+ case PC_JUMP:
+ UNREACHABLE();
+ break;
+ }
+}
+#endif // VERIFY_HEAP
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/reloc-info.h b/deps/v8/src/reloc-info.h
new file mode 100644
index 0000000000..53d52830a0
--- /dev/null
+++ b/deps/v8/src/reloc-info.h
@@ -0,0 +1,455 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_RELOC_INFO_H_
+#define V8_RELOC_INFO_H_
+
+#include "src/globals.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeReference;
+class EmbeddedData;
+
+// Specifies whether to perform icache flush operations on RelocInfo updates.
+// If FLUSH_ICACHE_IF_NEEDED, the icache will always be flushed if an
+// instruction was modified. If SKIP_ICACHE_FLUSH the flush will always be
+// skipped (only use this if you will flush the icache manually before it is
+// executed).
+enum ICacheFlushMode { FLUSH_ICACHE_IF_NEEDED, SKIP_ICACHE_FLUSH };
+
+// -----------------------------------------------------------------------------
+// Relocation information
+
+// Relocation information consists of the address (pc) of the datum
+// to which the relocation information applies, the relocation mode
+// (rmode), and an optional data field. The relocation mode may be
+// "descriptive" and not indicate a need for relocation, but simply
+// describe a property of the datum. Such rmodes are useful for GC
+// and nice disassembly output.
+
+class RelocInfo {
+ public:
+ // This string is used to add padding comments to the reloc info in cases
+ // where we are not sure to have enough space for patching in during
+ // lazy deoptimization. This is the case if we have indirect calls for which
+ // we do not normally record relocation info.
+ static const char* const kFillerCommentString;
+
+ // The minimum size of a comment is equal to two bytes for the extra tagged
+ // pc and kPointerSize for the actual pointer to the comment.
+ static const int kMinRelocCommentSize = 2 + kPointerSize;
+
+ // The maximum size for a call instruction including pc-jump.
+ static const int kMaxCallSize = 6;
+
+ // The maximum pc delta that will use the short encoding.
+ static const int kMaxSmallPCDelta;
+
+ enum Mode : int8_t {
+ // Please note the order is important (see IsRealRelocMode, IsGCRelocMode,
+ // and IsShareableRelocMode predicates below).
+
+ CODE_TARGET,
+ RELATIVE_CODE_TARGET, // LAST_CODE_TARGET_MODE
+ EMBEDDED_OBJECT, // LAST_GCED_ENUM
+
+ JS_TO_WASM_CALL,
+ WASM_CALL, // FIRST_SHAREABLE_RELOC_MODE
+ WASM_STUB_CALL,
+
+ RUNTIME_ENTRY,
+ COMMENT,
+
+ EXTERNAL_REFERENCE, // The address of an external C++ function.
+ INTERNAL_REFERENCE, // An address inside the same function.
+
+ // Encoded internal reference, used only on MIPS, MIPS64 and PPC.
+ INTERNAL_REFERENCE_ENCODED,
+
+ // An off-heap instruction stream target. See http://goo.gl/Z2HUiM.
+ OFF_HEAP_TARGET,
+
+ // Marks constant and veneer pools. Only used on ARM and ARM64.
+ // They use a custom noncompact encoding.
+ CONST_POOL,
+ VENEER_POOL,
+
+ DEOPT_SCRIPT_OFFSET,
+ DEOPT_INLINING_ID, // Deoptimization source position.
+ DEOPT_REASON, // Deoptimization reason index.
+ DEOPT_ID, // Deoptimization inlining id.
+
+ // This is not an actual reloc mode, but used to encode a long pc jump that
+ // cannot be encoded as part of another record.
+ PC_JUMP,
+
+ // Pseudo-types
+ NUMBER_OF_MODES,
+ NONE, // never recorded value
+
+ LAST_CODE_TARGET_MODE = RELATIVE_CODE_TARGET,
+ FIRST_REAL_RELOC_MODE = CODE_TARGET,
+ LAST_REAL_RELOC_MODE = VENEER_POOL,
+ LAST_GCED_ENUM = EMBEDDED_OBJECT,
+ FIRST_SHAREABLE_RELOC_MODE = WASM_CALL,
+ };
+
+ STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt);
+
+ RelocInfo() = default;
+
+ RelocInfo(Address pc, Mode rmode, intptr_t data, Code* host,
+ Address constant_pool = kNullAddress)
+ : pc_(pc),
+ rmode_(rmode),
+ data_(data),
+ host_(host),
+ constant_pool_(constant_pool) {}
+
+ static constexpr bool IsRealRelocMode(Mode mode) {
+ return mode >= FIRST_REAL_RELOC_MODE && mode <= LAST_REAL_RELOC_MODE;
+ }
+ // Is the relocation mode affected by GC?
+ static constexpr bool IsGCRelocMode(Mode mode) {
+ return mode <= LAST_GCED_ENUM;
+ }
+ static constexpr bool IsShareableRelocMode(Mode mode) {
+ static_assert(RelocInfo::NONE >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE,
+ "Users of this function rely on NONE being a sharable "
+ "relocation mode.");
+ return mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE;
+ }
+ static constexpr bool IsCodeTarget(Mode mode) { return mode == CODE_TARGET; }
+ static constexpr bool IsCodeTargetMode(Mode mode) {
+ return mode <= LAST_CODE_TARGET_MODE;
+ }
+ static constexpr bool IsRelativeCodeTarget(Mode mode) {
+ return mode == RELATIVE_CODE_TARGET;
+ }
+ static constexpr bool IsEmbeddedObject(Mode mode) {
+ return mode == EMBEDDED_OBJECT;
+ }
+ static constexpr bool IsRuntimeEntry(Mode mode) {
+ return mode == RUNTIME_ENTRY;
+ }
+ static constexpr bool IsWasmCall(Mode mode) { return mode == WASM_CALL; }
+ static constexpr bool IsWasmStubCall(Mode mode) {
+ return mode == WASM_STUB_CALL;
+ }
+ static constexpr bool IsComment(Mode mode) { return mode == COMMENT; }
+ static constexpr bool IsConstPool(Mode mode) { return mode == CONST_POOL; }
+ static constexpr bool IsVeneerPool(Mode mode) { return mode == VENEER_POOL; }
+ static constexpr bool IsDeoptPosition(Mode mode) {
+ return mode == DEOPT_SCRIPT_OFFSET || mode == DEOPT_INLINING_ID;
+ }
+ static constexpr bool IsDeoptReason(Mode mode) {
+ return mode == DEOPT_REASON;
+ }
+ static constexpr bool IsDeoptId(Mode mode) { return mode == DEOPT_ID; }
+ static constexpr bool IsExternalReference(Mode mode) {
+ return mode == EXTERNAL_REFERENCE;
+ }
+ static constexpr bool IsInternalReference(Mode mode) {
+ return mode == INTERNAL_REFERENCE;
+ }
+ static constexpr bool IsInternalReferenceEncoded(Mode mode) {
+ return mode == INTERNAL_REFERENCE_ENCODED;
+ }
+ static constexpr bool IsOffHeapTarget(Mode mode) {
+ return mode == OFF_HEAP_TARGET;
+ }
+ static constexpr bool IsNone(Mode mode) { return mode == NONE; }
+ static constexpr bool IsWasmReference(Mode mode) {
+ return IsWasmPtrReference(mode);
+ }
+ static constexpr bool IsJsToWasmCall(Mode mode) {
+ return mode == JS_TO_WASM_CALL;
+ }
+ static constexpr bool IsWasmPtrReference(Mode mode) {
+ return mode == WASM_CALL || mode == JS_TO_WASM_CALL;
+ }
+
+ static bool IsOnlyForSerializer(Mode mode) {
+#ifdef V8_TARGET_ARCH_IA32
+ // On ia32, inlined off-heap trampolines must be relocated.
+ DCHECK_NE((kApplyMask & ModeMask(OFF_HEAP_TARGET)), 0);
+ DCHECK_EQ((kApplyMask & ModeMask(EXTERNAL_REFERENCE)), 0);
+ return mode == EXTERNAL_REFERENCE;
+#else
+ DCHECK_EQ((kApplyMask & ModeMask(OFF_HEAP_TARGET)), 0);
+ DCHECK_EQ((kApplyMask & ModeMask(EXTERNAL_REFERENCE)), 0);
+ return mode == EXTERNAL_REFERENCE || mode == OFF_HEAP_TARGET;
+#endif
+ }
+
+ static constexpr int ModeMask(Mode mode) { return 1 << mode; }
+
+ // Accessors
+ Address pc() const { return pc_; }
+ Mode rmode() const { return rmode_; }
+ intptr_t data() const { return data_; }
+ Code* host() const { return host_; }
+ Address constant_pool() const { return constant_pool_; }
+
+ // Apply a relocation by delta bytes. When the code object is moved, PC
+ // relative addresses have to be updated as well as absolute addresses
+ // inside the code (internal references).
+ // Do not forget to flush the icache afterwards!
+ V8_INLINE void apply(intptr_t delta);
+
+ // Is the pointer this relocation info refers to coded like a plain pointer
+ // or is it strange in some way (e.g. relative or patched into a series of
+ // instructions).
+ bool IsCodedSpecially();
+
+ // The static pendant to IsCodedSpecially, just for off-heap targets. Used
+ // during deserialization, when we don't actually have a RelocInfo handy.
+ static bool OffHeapTargetIsCodedSpecially();
+
+ // If true, the pointer this relocation info refers to is an entry in the
+ // constant pool, otherwise the pointer is embedded in the instruction stream.
+ bool IsInConstantPool();
+
+ // Returns the deoptimization id for the entry associated with the reloc info
+ // where {kind} is the deoptimization kind.
+ // This is only used for printing RUNTIME_ENTRY relocation info.
+ int GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind);
+
+ Address wasm_call_address() const;
+ Address wasm_stub_call_address() const;
+ Address js_to_wasm_address() const;
+
+ uint32_t wasm_call_tag() const;
+
+ void set_wasm_call_address(
+ Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ void set_wasm_stub_call_address(
+ Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ void set_js_to_wasm_address(
+ Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+
+ void set_target_address(
+ Address target,
+ WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+
+ // this relocation applies to;
+ // can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
+ V8_INLINE Address target_address();
+ V8_INLINE HeapObject* target_object();
+ V8_INLINE Handle<HeapObject> target_object_handle(Assembler* origin);
+ V8_INLINE void set_target_object(
+ Heap* heap, HeapObject* target,
+ WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ V8_INLINE Address target_runtime_entry(Assembler* origin);
+ V8_INLINE void set_target_runtime_entry(
+ Address target,
+ WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ V8_INLINE Address target_off_heap_target();
+ V8_INLINE Cell* target_cell();
+ V8_INLINE Handle<Cell> target_cell_handle();
+ V8_INLINE void set_target_cell(
+ Cell* cell, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ V8_INLINE void set_target_external_reference(
+ Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+
+ // Returns the address of the constant pool entry where the target address
+ // is held. This should only be called if IsInConstantPool returns true.
+ V8_INLINE Address constant_pool_entry_address();
+
+ // Read the address of the word containing the target_address in an
+ // instruction stream. What this means exactly is architecture-independent.
+ // The only architecture-independent user of this function is the serializer.
+ // The serializer uses it to find out how many raw bytes of instruction to
+ // output before the next target. Architecture-independent code shouldn't
+ // dereference the pointer it gets back from this.
+ V8_INLINE Address target_address_address();
+
+ // This indicates how much space a target takes up when deserializing a code
+ // stream. For most architectures this is just the size of a pointer. For
+ // an instruction like movw/movt where the target bits are mixed into the
+ // instruction bits the size of the target will be zero, indicating that the
+ // serializer should not step forwards in memory after a target is resolved
+ // and written. In this case the target_address_address function above
+ // should return the end of the instructions to be patched, allowing the
+ // deserializer to deserialize the instructions as raw bytes and put them in
+ // place, ready to be patched with the target.
+ V8_INLINE int target_address_size();
+
+ // Read the reference in the instruction this relocation
+ // applies to; can only be called if rmode_ is EXTERNAL_REFERENCE.
+ V8_INLINE Address target_external_reference();
+
+ // Read the reference in the instruction this relocation
+ // applies to; can only be called if rmode_ is INTERNAL_REFERENCE.
+ V8_INLINE Address target_internal_reference();
+
+ // Return the reference address this relocation applies to;
+ // can only be called if rmode_ is INTERNAL_REFERENCE.
+ V8_INLINE Address target_internal_reference_address();
+
+ // Wipe out a relocation to a fixed value, used for making snapshots
+ // reproducible.
+ V8_INLINE void WipeOut();
+
+ template <typename ObjectVisitor>
+ inline void Visit(ObjectVisitor* v);
+
+ // Check whether the given code contains relocation information that
+ // either is position-relative or movable by the garbage collector.
+ static bool RequiresRelocationAfterCodegen(const CodeDesc& desc);
+ static bool RequiresRelocation(Code* code);
+
+#ifdef ENABLE_DISASSEMBLER
+ // Printing
+ static const char* RelocModeName(Mode rmode);
+ void Print(Isolate* isolate, std::ostream& os); // NOLINT
+#endif // ENABLE_DISASSEMBLER
+#ifdef VERIFY_HEAP
+ void Verify(Isolate* isolate);
+#endif
+
+ static const int kApplyMask; // Modes affected by apply. Depends on arch.
+
+ // In addition to modes covered by the apply mask (which is applied at GC
+ // time, among others), this covers all modes that are relocated by
+ // Code::CopyFromNoFlush after code generation.
+ static int PostCodegenRelocationMask() {
+ return ModeMask(RelocInfo::CODE_TARGET) |
+ ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ ModeMask(RelocInfo::RELATIVE_CODE_TARGET) | kApplyMask;
+ }
+
+ private:
+ // On ARM/ARM64, note that pc_ is the address of the instruction referencing
+ // the constant pool and not the address of the constant pool entry.
+ Address pc_;
+ Mode rmode_;
+ intptr_t data_ = 0;
+ Code* host_;
+ Address constant_pool_ = kNullAddress;
+ friend class RelocIterator;
+};
+
+// RelocInfoWriter serializes a stream of relocation info. It writes towards
+// lower addresses.
+class RelocInfoWriter BASE_EMBEDDED {
+ public:
+ RelocInfoWriter() : pos_(nullptr), last_pc_(nullptr) {}
+
+ byte* pos() const { return pos_; }
+ byte* last_pc() const { return last_pc_; }
+
+ void Write(const RelocInfo* rinfo);
+
+ // Update the state of the stream after reloc info buffer
+ // and/or code is moved while the stream is active.
+ void Reposition(byte* pos, byte* pc) {
+ pos_ = pos;
+ last_pc_ = pc;
+ }
+
+ // Max size (bytes) of a written RelocInfo. Longest encoding is
+ // ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, data_delta.
+ static constexpr int kMaxSize = 1 + 4 + 1 + 1 + kPointerSize;
+
+ private:
+ inline uint32_t WriteLongPCJump(uint32_t pc_delta);
+
+ inline void WriteShortTaggedPC(uint32_t pc_delta, int tag);
+ inline void WriteShortData(intptr_t data_delta);
+
+ inline void WriteMode(RelocInfo::Mode rmode);
+ inline void WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode);
+ inline void WriteIntData(int data_delta);
+ inline void WriteData(intptr_t data_delta);
+
+ byte* pos_;
+ byte* last_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
+};
+
+// A RelocIterator iterates over relocation information.
+// Typical use:
+//
+// for (RelocIterator it(code); !it.done(); it.next()) {
+// // do something with it.rinfo() here
+// }
+//
+// A mask can be specified to skip unwanted modes.
+class RelocIterator : public Malloced {
+ public:
+ // Create a new iterator positioned at
+ // the beginning of the reloc info.
+ // Relocation information with mode k is included in the
+ // iteration iff bit k of mode_mask is set.
+ explicit RelocIterator(Code* code, int mode_mask = -1);
+ explicit RelocIterator(EmbeddedData* embedded_data, Code* code,
+ int mode_mask);
+ explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
+ explicit RelocIterator(const CodeReference code_reference,
+ int mode_mask = -1);
+ explicit RelocIterator(Vector<byte> instructions,
+ Vector<const byte> reloc_info, Address const_pool,
+ int mode_mask = -1);
+ RelocIterator(RelocIterator&&) = default;
+ RelocIterator& operator=(RelocIterator&&) = default;
+
+ // Iteration
+ bool done() const { return done_; }
+ void next();
+
+ // Return pointer valid until next next().
+ RelocInfo* rinfo() {
+ DCHECK(!done());
+ return &rinfo_;
+ }
+
+ private:
+ RelocIterator(Code* host, Address pc, Address constant_pool, const byte* pos,
+ const byte* end, int mode_mask);
+
+ // Advance* moves the position before/after reading.
+ // *Read* reads from current byte(s) into rinfo_.
+ // *Get* just reads and returns info on current byte.
+ void Advance(int bytes = 1) { pos_ -= bytes; }
+ int AdvanceGetTag();
+ RelocInfo::Mode GetMode();
+
+ void AdvanceReadLongPCJump();
+
+ void ReadShortTaggedPC();
+ void ReadShortData();
+
+ void AdvanceReadPC();
+ void AdvanceReadInt();
+ void AdvanceReadData();
+
+ // If the given mode is wanted, set it in rinfo_ and return true.
+ // Else return false. Used for efficiently skipping unwanted modes.
+ bool SetMode(RelocInfo::Mode mode) {
+ return (mode_mask_ & (1 << mode)) ? (rinfo_.rmode_ = mode, true) : false;
+ }
+
+ const byte* pos_;
+ const byte* end_;
+ RelocInfo rinfo_;
+ bool done_ = false;
+ const int mode_mask_;
+
+ DISALLOW_COPY_AND_ASSIGN(RelocIterator);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_RELOC_INFO_H_
diff --git a/deps/v8/src/roots.h b/deps/v8/src/roots.h
index 20e11317c5..a15cdcea2a 100644
--- a/deps/v8/src/roots.h
+++ b/deps/v8/src/roots.h
@@ -41,7 +41,6 @@ namespace internal {
V(Map, code_map, CodeMap) \
V(Map, function_context_map, FunctionContextMap) \
V(Map, cell_map, CellMap) \
- V(Map, weak_cell_map, WeakCellMap) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
V(Map, foreign_map, ForeignMap) \
V(Map, heap_number_map, HeapNumberMap) \
@@ -167,6 +166,8 @@ namespace internal {
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(ObjectBoilerplateDescription, empty_object_boilerplate_description, \
EmptyObjectBoilerplateDescription) \
+ V(ArrayBoilerplateDescription, empty_array_boilerplate_description, \
+ EmptyArrayBoilerplateDescription) \
V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \
V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \
@@ -187,7 +188,6 @@ namespace internal {
V(FixedArray, empty_ordered_hash_set, EmptyOrderedHashSet) \
V(FeedbackMetadata, empty_feedback_metadata, EmptyFeedbackMetadata) \
V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
- V(WeakCell, empty_weak_cell, EmptyWeakCell) \
V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
V(WeakFixedArray, empty_weak_fixed_array, EmptyWeakFixedArray) \
V(WeakArrayList, empty_weak_array_list, EmptyWeakArrayList) \
@@ -232,20 +232,20 @@ namespace internal {
V(NameDictionary, public_symbol_table, PublicSymbolTable) \
V(NameDictionary, api_symbol_table, ApiSymbolTable) \
V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable) \
- V(Object, script_list, ScriptList) \
+ V(WeakArrayList, script_list, ScriptList) \
V(SimpleNumberDictionary, code_stubs, CodeStubs) \
V(FixedArray, materialized_objects, MaterializedObjects) \
V(FixedArray, microtask_queue, MicrotaskQueue) \
- V(FixedArray, detached_contexts, DetachedContexts) \
- V(HeapObject, retaining_path_targets, RetainingPathTargets) \
+ V(WeakArrayList, detached_contexts, DetachedContexts) \
+ V(WeakArrayList, retaining_path_targets, RetainingPathTargets) \
V(WeakArrayList, retained_maps, RetainedMaps) \
/* Indirection lists for isolate-independent builtins */ \
V(FixedArray, builtins_constants_table, BuiltinsConstantsTable) \
/* Feedback vectors that we need for code coverage or type profile */ \
V(Object, feedback_vectors_for_profiling_tools, \
FeedbackVectorsForProfilingTools) \
- V(Object, weak_stack_trace_list, WeakStackTraceList) \
- V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
+ V(WeakArrayList, noscript_shared_function_infos, \
+ NoScriptSharedFunctionInfos) \
V(FixedArray, serialized_objects, SerializedObjects) \
V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
V(TemplateList, message_listeners, MessageListeners) \
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index ae23c99910..31b03f6bb7 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -2,9 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/code-stubs.h"
#include "src/conversions-inl.h"
#include "src/debug/debug.h"
@@ -15,7 +13,9 @@
#include "src/messages.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/prototype.h"
+#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
@@ -61,20 +61,14 @@ Object* RemoveArrayHolesGeneric(Isolate* isolate, Handle<JSReceiver> receiver,
// For proxies, we do not collect the keys, instead we use all indices in
// the full range of [0, limit).
Handle<FixedArray> keys;
- if (receiver->IsJSProxy()) {
- CHECK(Smi::IsValid(limit));
- keys = isolate->factory()->NewFixedArray(limit);
- for (uint32_t i = 0; i < limit; ++i) {
- keys->set(i, Smi::FromInt(i));
- }
- } else {
+ if (!receiver->IsJSProxy()) {
keys = JSReceiver::GetOwnElementIndices(isolate, receiver,
Handle<JSObject>::cast(receiver));
}
uint32_t num_undefined = 0;
uint32_t current_pos = 0;
- int num_indices = keys->length();
+ int num_indices = keys.is_null() ? limit : keys->length();
// Compact keys with undefined values and moves non-undefined
// values to the front.
@@ -86,7 +80,7 @@ Object* RemoveArrayHolesGeneric(Isolate* isolate, Handle<JSReceiver> receiver,
// Holes and 'undefined' are considered free spots.
// A hole is when HasElement(receiver, key) is false.
for (int i = 0; i < num_indices; ++i) {
- uint32_t key = NumberToUint32(keys->get(i));
+ uint32_t key = keys.is_null() ? i : NumberToUint32(keys->get(i));
// We only care about array indices that are smaller than the limit.
// The keys are sorted, so we can break as soon as we encounter the first.
@@ -143,7 +137,7 @@ Object* RemoveArrayHolesGeneric(Isolate* isolate, Handle<JSReceiver> receiver,
// Deleting everything after the undefineds up unto the limit.
for (int i = num_indices - 1; i >= 0; --i) {
- uint32_t key = NumberToUint32(keys->get(i));
+ uint32_t key = keys.is_null() ? i : NumberToUint32(keys->get(i));
if (key < current_pos) break;
if (key >= limit) continue;
@@ -605,9 +599,7 @@ RUNTIME_FUNCTION(Runtime_NewArray) {
// We should allocate with an initial map that reflects the allocation site
// advice. Therefore we use AllocateJSObjectFromMap instead of passing
// the constructor.
- if (to_kind != initial_map->elements_kind()) {
- initial_map = Map::AsElementsKind(isolate, initial_map, to_kind);
- }
+ initial_map = Map::AsElementsKind(isolate, initial_map, to_kind);
// If we don't care to track arrays of to_kind ElementsKind, then
// don't emit a memento for them.
@@ -628,7 +620,7 @@ RUNTIME_FUNCTION(Runtime_NewArray) {
if ((old_kind != array->GetElementsKind() || !can_use_type_feedback ||
!can_inline_array_constructor)) {
// The arguments passed in caused a transition. This kind of complexity
- // can't be dealt with in the inlined hydrogen array constructor case.
+ // can't be dealt with in the inlined optimized array constructor case.
// We must mark the allocationsite as un-inlinable.
site->SetDoNotInlineCall();
}
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index aa2e260a09..972e48bae6 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/conversions-inl.h"
#include "src/heap/factory.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/runtime/runtime-utils.h"
// Implement Atomic accesses to SharedArrayBuffers as defined in the
// SharedArrayBuffer draft spec, found here
@@ -108,7 +108,6 @@ ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */
ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */
ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
-#undef ATOMIC_OPS_INTEGER
#undef ATOMIC_OPS
#undef InterlockedExchange32
@@ -240,14 +239,14 @@ inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
} // anonymous namespace
// Duplicated from objects.h
-// V has parameters (Type, type, TYPE, C type, element_size)
-#define INTEGER_TYPED_ARRAYS(V) \
- V(Uint8, uint8, UINT8, uint8_t, 1) \
- V(Int8, int8, INT8, int8_t, 1) \
- V(Uint16, uint16, UINT16, uint16_t, 2) \
- V(Int16, int16, INT16, int16_t, 2) \
- V(Uint32, uint32, UINT32, uint32_t, 4) \
- V(Int32, int32, INT32, int32_t, 4)
+// V has parameters (Type, type, TYPE, C type)
+#define INTEGER_TYPED_ARRAYS(V) \
+ V(Uint8, uint8, UINT8, uint8_t) \
+ V(Int8, int8, INT8, int8_t) \
+ V(Uint16, uint16, UINT16, uint16_t) \
+ V(Int16, int16, INT16, int16_t) \
+ V(Uint32, uint32, UINT32, uint32_t) \
+ V(Int32, int32, INT32, int32_t)
RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
HandleScope scope(isolate);
@@ -262,8 +261,8 @@ RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
NumberToSize(sta->byte_offset());
switch (sta->type()) {
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
- case kExternal##Type##Array: \
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
+ case kExternal##Type##Array: \
return DoExchange<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -290,8 +289,8 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
NumberToSize(sta->byte_offset());
switch (sta->type()) {
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
- case kExternal##Type##Array: \
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
+ case kExternal##Type##Array: \
return DoCompareExchange<ctype>(isolate, source, index, oldobj, newobj);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -319,8 +318,8 @@ RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
NumberToSize(sta->byte_offset());
switch (sta->type()) {
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
- case kExternal##Type##Array: \
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
+ case kExternal##Type##Array: \
return DoAdd<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -348,8 +347,8 @@ RUNTIME_FUNCTION(Runtime_AtomicsSub) {
NumberToSize(sta->byte_offset());
switch (sta->type()) {
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
- case kExternal##Type##Array: \
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
+ case kExternal##Type##Array: \
return DoSub<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -377,8 +376,8 @@ RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
NumberToSize(sta->byte_offset());
switch (sta->type()) {
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
- case kExternal##Type##Array: \
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
+ case kExternal##Type##Array: \
return DoAnd<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -406,8 +405,8 @@ RUNTIME_FUNCTION(Runtime_AtomicsOr) {
NumberToSize(sta->byte_offset());
switch (sta->type()) {
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
- case kExternal##Type##Array: \
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
+ case kExternal##Type##Array: \
return DoOr<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -435,8 +434,8 @@ RUNTIME_FUNCTION(Runtime_AtomicsXor) {
NumberToSize(sta->byte_offset());
switch (sta->type()) {
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
- case kExternal##Type##Array: \
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
+ case kExternal##Type##Array: \
return DoXor<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -449,5 +448,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsXor) {
UNREACHABLE();
}
+#undef INTEGER_TYPED_ARRAYS
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-bigint.cc b/deps/v8/src/runtime/runtime-bigint.cc
index 280106751c..f718ab7eb4 100644
--- a/deps/v8/src/runtime/runtime-bigint.cc
+++ b/deps/v8/src/runtime/runtime-bigint.cc
@@ -2,12 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/counters.h"
#include "src/objects-inl.h"
#include "src/objects/bigint.h"
+#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 6a83087a53..d4fb0df3c3 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -8,7 +8,7 @@
#include <limits>
#include "src/accessors.h"
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/debug/debug.h"
#include "src/elements.h"
#include "src/isolate-inl.h"
@@ -833,19 +833,5 @@ RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper_Sloppy) {
LanguageMode::kSloppy));
}
-
-RUNTIME_FUNCTION(Runtime_GetSuperConstructor) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSFunction, active_function, 0);
- Object* prototype = active_function->map()->prototype();
- if (!prototype->IsConstructor()) {
- HandleScope scope(isolate);
- return ThrowNotSuperConstructor(isolate, handle(prototype, isolate),
- handle(active_function, isolate));
- }
- return prototype;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 30e4341be3..6c64802963 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/conversions-inl.h"
#include "src/heap/factory.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-collection-inl.h"
+#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
@@ -143,19 +142,5 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
return *weak_collection;
}
-RUNTIME_FUNCTION(Runtime_IsJSWeakMap) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSWeakMap());
-}
-
-RUNTIME_FUNCTION(Runtime_IsJSWeakSet) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSWeakSet());
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 2502fba30d..bebd489d70 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -2,9 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/asmjs/asm-js.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler.h"
@@ -12,6 +10,9 @@
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
+#include "src/runtime/runtime-utils.h"
#include "src/v8threads.h"
#include "src/vm-state-inl.h"
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 9711ffad54..c1dc4ec9df 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -2,11 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
#include <vector>
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/compiler.h"
#include "src/debug/debug-coverage.h"
#include "src/debug/debug-evaluate.h"
@@ -22,7 +20,9 @@
#include "src/isolate-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-generator-inl.h"
#include "src/objects/js-promise-inl.h"
+#include "src/runtime/runtime-utils.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-objects-inl.h"
diff --git a/deps/v8/src/runtime/runtime-forin.cc b/deps/v8/src/runtime/runtime-forin.cc
index ed1e226060..b43d91540e 100644
--- a/deps/v8/src/runtime/runtime-forin.cc
+++ b/deps/v8/src/runtime/runtime-forin.cc
@@ -4,7 +4,7 @@
#include "src/runtime/runtime-utils.h"
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/elements.h"
#include "src/heap/factory.h"
#include "src/isolate-inl.h"
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index 1057dfa177..22f4a4fb48 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
#include "src/accessors.h"
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/compiler.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
+#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-futex.cc b/deps/v8/src/runtime/runtime-futex.cc
index 96f538e4f3..3c9a90fbbd 100644
--- a/deps/v8/src/runtime/runtime-futex.cc
+++ b/deps/v8/src/runtime/runtime-futex.cc
@@ -4,11 +4,12 @@
#include "src/runtime/runtime-utils.h"
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/base/platform/time.h"
#include "src/conversions-inl.h"
#include "src/futex-emulation.h"
#include "src/globals.h"
+#include "src/objects/js-array-buffer-inl.h"
// Implement Futex API for SharedArrayBuffers as defined in the
// SharedArrayBuffer draft spec, found here:
@@ -17,7 +18,6 @@
namespace v8 {
namespace internal {
-
RUNTIME_FUNCTION(Runtime_AtomicsNumWaitersForTesting) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -30,7 +30,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsNumWaitersForTesting) {
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
size_t addr = (index << 2) + NumberToSize(sta->byte_offset());
- return FutexEmulation::NumWaitersForTesting(isolate, array_buffer, addr);
+ return FutexEmulation::NumWaitersForTesting(array_buffer, addr);
}
RUNTIME_FUNCTION(Runtime_SetAllowAtomicsWait) {
@@ -41,5 +41,6 @@ RUNTIME_FUNCTION(Runtime_SetAllowAtomicsWait) {
isolate->set_allow_atomics_wait(set);
return ReadOnlyRoots(isolate).undefined_value();
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index 02068ec7a9..636aa63879 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/js-generator-inl.h"
+#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 310c20b102..c98b27da27 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -2,12 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
#include <memory>
#include "src/api.h"
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
#include "src/builtins/builtins.h"
@@ -16,8 +14,10 @@
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
+#include "src/objects/js-array-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
+#include "src/runtime/runtime-utils.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
@@ -120,8 +120,8 @@ namespace {
const char* ElementsKindToType(ElementsKind fixed_elements_kind) {
switch (fixed_elements_kind) {
-#define ELEMENTS_KIND_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
+#define ELEMENTS_KIND_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
return #Type "Array";
TYPED_ARRAYS(ELEMENTS_KIND_CASE)
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index bc48bb4ab7..7f07d084a1 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -2,11 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
#include <iomanip>
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/frames-inl.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-decoder.h"
@@ -16,6 +14,7 @@
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/ostreams.h"
+#include "src/runtime/runtime-utils.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
diff --git a/deps/v8/src/runtime/runtime-intl.cc b/deps/v8/src/runtime/runtime-intl.cc
index 5d39074984..ad75952824 100644
--- a/deps/v8/src/runtime/runtime-intl.cc
+++ b/deps/v8/src/runtime/runtime-intl.cc
@@ -6,14 +6,12 @@
#error Internationalization is expected to be enabled.
#endif // V8_INTL_SUPPORT
-#include "src/runtime/runtime-utils.h"
-
#include <cmath>
#include <memory>
+#include "src/api-inl.h"
#include "src/api-natives.h"
-#include "src/api.h"
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/date.h"
#include "src/global-handles.h"
#include "src/heap/factory.h"
@@ -22,6 +20,13 @@
#include "src/messages.h"
#include "src/objects/intl-objects-inl.h"
#include "src/objects/intl-objects.h"
+#include "src/objects/js-array-inl.h"
+#include "src/objects/js-collator-inl.h"
+#include "src/objects/js-list-format-inl.h"
+#include "src/objects/js-list-format.h"
+#include "src/objects/js-plural-rules-inl.h"
+#include "src/objects/managed.h"
+#include "src/runtime/runtime-utils.h"
#include "src/utils.h"
#include "unicode/brkiter.h"
@@ -52,130 +57,109 @@
namespace v8 {
namespace internal {
-// ECMA 402 6.2.3
-RUNTIME_FUNCTION(Runtime_CanonicalizeLanguageTag) {
+// ecma402 #sec-formatlist
+RUNTIME_FUNCTION(Runtime_FormatList) {
HandleScope scope(isolate);
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSListFormat, list_format, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, list, 1);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSListFormat::FormatList(isolate, list_format, list));
+}
- Factory* factory = isolate->factory();
+// ecma402 #sec-formatlisttoparts
+RUNTIME_FUNCTION(Runtime_FormatListToParts) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSListFormat, list_format, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, list, 1);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSListFormat::FormatListToParts(isolate, list_format, list));
+}
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, locale_id_str, 0);
-
- v8::String::Utf8Value locale_id(v8_isolate,
- v8::Utils::ToLocal(locale_id_str));
-
- // TODO(jshin): uloc_{for,to}TanguageTag can fail even for a structually valid
- // language tag if it's too long (much longer than 100 chars). Even if we
- // allocate a longer buffer, ICU will still fail if it's too long. Either
- // propose to Ecma 402 to put a limit on the locale length or change ICU to
- // handle long locale names better. See
- // https://ssl.icu-project.org/trac/ticket/13417 .
-
- // Return value which denotes invalid language tag.
- const char* const kInvalidTag = "invalid-tag";
-
- UErrorCode error = U_ZERO_ERROR;
- char icu_result[ULOC_FULLNAME_CAPACITY];
- uloc_forLanguageTag(*locale_id, icu_result, ULOC_FULLNAME_CAPACITY, nullptr,
- &error);
- if (U_FAILURE(error) || error == U_STRING_NOT_TERMINATED_WARNING) {
- return *factory->NewStringFromAsciiChecked(kInvalidTag);
+RUNTIME_FUNCTION(Runtime_GetNumberOption) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(5, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, options, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, property, 1);
+ CONVERT_SMI_ARG_CHECKED(min, 2);
+ CONVERT_SMI_ARG_CHECKED(max, 3);
+ CONVERT_SMI_ARG_CHECKED(fallback, 4);
+
+ Maybe<int> num =
+ Intl::GetNumberOption(isolate, options, property, min, max, fallback);
+ if (num.IsNothing()) {
+ return ReadOnlyRoots(isolate).exception();
}
+ return Smi::FromInt(num.FromJust());
+}
- char result[ULOC_FULLNAME_CAPACITY];
-
- // Force strict BCP47 rules.
- uloc_toLanguageTag(icu_result, result, ULOC_FULLNAME_CAPACITY, TRUE, &error);
-
- if (U_FAILURE(error) || error == U_STRING_NOT_TERMINATED_WARNING) {
- return *factory->NewStringFromAsciiChecked(kInvalidTag);
+RUNTIME_FUNCTION(Runtime_DefaultNumberOption) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(5, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
+ CONVERT_SMI_ARG_CHECKED(min, 1);
+ CONVERT_SMI_ARG_CHECKED(max, 2);
+ CONVERT_SMI_ARG_CHECKED(fallback, 3);
+ CONVERT_ARG_HANDLE_CHECKED(String, property, 4);
+
+ Maybe<int> num =
+ Intl::DefaultNumberOption(isolate, value, min, max, fallback, property);
+ if (num.IsNothing()) {
+ return ReadOnlyRoots(isolate).exception();
}
-
- return *factory->NewStringFromAsciiChecked(result);
+ return Smi::FromInt(num.FromJust());
}
-RUNTIME_FUNCTION(Runtime_AvailableLocalesOf) {
+// ECMA 402 6.2.3
+RUNTIME_FUNCTION(Runtime_CanonicalizeLanguageTag) {
HandleScope scope(isolate);
- Factory* factory = isolate->factory();
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, service, 0);
-
- const icu::Locale* available_locales = nullptr;
- int32_t count = 0;
-
- if (service->IsUtf8EqualTo(CStrVector("collator"))) {
- available_locales = icu::Collator::getAvailableLocales(count);
- } else if (service->IsUtf8EqualTo(CStrVector("numberformat"))) {
- available_locales = icu::NumberFormat::getAvailableLocales(count);
- } else if (service->IsUtf8EqualTo(CStrVector("dateformat"))) {
- available_locales = icu::DateFormat::getAvailableLocales(count);
- } else if (service->IsUtf8EqualTo(CStrVector("breakiterator"))) {
- available_locales = icu::BreakIterator::getAvailableLocales(count);
- } else if (service->IsUtf8EqualTo(CStrVector("pluralrules"))) {
- // TODO(littledan): For PluralRules, filter out locales that
- // don't support PluralRules.
- // PluralRules is missing an appropriate getAvailableLocales method,
- // so we should filter from all locales, but it's not clear how; see
- // https://ssl.icu-project.org/trac/ticket/12756
- available_locales = icu::Locale::getAvailableLocales(count);
- } else if (service->IsUtf8EqualTo(CStrVector("relativetimeformat"))) {
- // TODO(ftang): for now just use
- // icu::NumberFormat::getAvailableLocales(count) until we migrate to
- // Intl::GetAvailableLocales()
- available_locales = icu::NumberFormat::getAvailableLocales(count);
- } else {
- UNREACHABLE();
- }
+ CONVERT_ARG_HANDLE_CHECKED(Object, locale, 0);
- UErrorCode error = U_ZERO_ERROR;
- char result[ULOC_FULLNAME_CAPACITY];
- Handle<JSObject> locales = factory->NewJSObject(isolate->object_function());
-
- for (int32_t i = 0; i < count; ++i) {
- const char* icu_name = available_locales[i].getName();
-
- error = U_ZERO_ERROR;
- // No need to force strict BCP47 rules.
- uloc_toLanguageTag(icu_name, result, ULOC_FULLNAME_CAPACITY, FALSE, &error);
- if (U_FAILURE(error) || error == U_STRING_NOT_TERMINATED_WARNING) {
- // This shouldn't happen, but lets not break the user.
- continue;
- }
-
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::SetOwnPropertyIgnoreAttributes(
- locales, factory->NewStringFromAsciiChecked(result),
- factory->NewNumber(i), NONE));
+ std::string canonicalized;
+ if (!Intl::CanonicalizeLanguageTag(isolate, locale).To(&canonicalized)) {
+ return ReadOnlyRoots(isolate).exception();
}
+ return *isolate->factory()->NewStringFromAsciiChecked(canonicalized.c_str());
+}
+RUNTIME_FUNCTION(Runtime_AvailableLocalesOf) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, service, 0);
+ Handle<JSObject> locales;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, locales, Intl::AvailableLocalesOf(isolate, service));
return *locales;
}
RUNTIME_FUNCTION(Runtime_GetDefaultICULocale) {
HandleScope scope(isolate);
- Factory* factory = isolate->factory();
DCHECK_EQ(0, args.length());
+ return *isolate->factory()->NewStringFromAsciiChecked(
+ Intl::DefaultLocale(isolate).c_str());
+}
- icu::Locale default_locale;
-
- // Translate ICU's fallback locale to a well-known locale.
- if (strcmp(default_locale.getName(), "en_US_POSIX") == 0) {
- return *factory->NewStringFromStaticChars("en-US");
- }
+RUNTIME_FUNCTION(Runtime_IsWellFormedCurrencyCode) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, currency, 0);
+ return *(isolate->factory()->ToBoolean(
+ Intl::IsWellFormedCurrencyCode(isolate, currency)));
+}
- // Set the locale
- char result[ULOC_FULLNAME_CAPACITY];
- UErrorCode status = U_ZERO_ERROR;
- uloc_toLanguageTag(default_locale.getName(), result, ULOC_FULLNAME_CAPACITY,
- FALSE, &status);
- if (U_SUCCESS(status)) {
- return *factory->NewStringFromAsciiChecked(result);
- }
+RUNTIME_FUNCTION(Runtime_DefineWEProperty) {
+ HandleScope scope(isolate);
- return *factory->NewStringFromStaticChars("und");
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ Intl::DefineWEProperty(isolate, target, key, value);
+ return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_IsInitializedIntlObjectOfType) {
@@ -232,10 +216,10 @@ RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) {
// Set date time formatter as embedder field of the resulting JS object.
icu::SimpleDateFormat* date_format =
DateFormat::InitializeDateTimeFormat(isolate, locale, options, resolved);
+ CHECK_NOT_NULL(date_format);
- if (!date_format) return isolate->ThrowIllegalOperation();
-
- local_object->SetEmbedderField(0, reinterpret_cast<Smi*>(date_format));
+ local_object->SetEmbedderField(DateFormat::kSimpleDateFormatIndex,
+ reinterpret_cast<Smi*>(date_format));
// Make object handle weak so we can delete the data format once GC kicks in.
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
@@ -245,33 +229,6 @@ RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) {
return *local_object;
}
-RUNTIME_FUNCTION(Runtime_InternalDateFormat) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(2, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(date, 1);
-
- double date_value = DateCache::TimeClip(date->Number());
- if (std::isnan(date_value)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidTimeValue));
- }
-
- icu::SimpleDateFormat* date_format =
- DateFormat::UnpackDateFormat(isolate, date_format_holder);
- CHECK_NOT_NULL(date_format);
-
- icu::UnicodeString result;
- date_format->format(date_value, result);
-
- RETURN_RESULT_OR_FAILURE(
- isolate, isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()),
- result.length())));
-}
-
RUNTIME_FUNCTION(Runtime_CreateNumberFormat) {
HandleScope scope(isolate);
@@ -280,198 +237,103 @@ RUNTIME_FUNCTION(Runtime_CreateNumberFormat) {
CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
-
- Handle<JSFunction> constructor(
- isolate->native_context()->intl_number_format_function(), isolate);
-
- Handle<JSObject> local_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, local_object,
- JSObject::New(constructor, constructor));
-
- // Set number formatter as embedder field of the resulting JS object.
- icu::DecimalFormat* number_format =
- NumberFormat::InitializeNumberFormat(isolate, locale, options, resolved);
-
- if (!number_format) return isolate->ThrowIllegalOperation();
-
- local_object->SetEmbedderField(NumberFormat::kDecimalFormatIndex,
- reinterpret_cast<Smi*>(number_format));
-
- Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
- NumberFormat::DeleteNumberFormat,
- WeakCallbackType::kInternalFields);
- return *local_object;
-}
-
-RUNTIME_FUNCTION(Runtime_InternalNumberFormat) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(2, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
-
- Handle<Object> number_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_obj,
- Object::ToNumber(isolate, value));
-
- double number = number_obj->Number();
- RETURN_RESULT_OR_FAILURE(isolate, NumberFormat::FormatNumber(
- isolate, number_format_holder, number));
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Intl::CreateNumberFormat(isolate, locale, options, resolved));
}
RUNTIME_FUNCTION(Runtime_CurrencyDigits) {
+ HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
-
CONVERT_ARG_HANDLE_CHECKED(String, currency, 0);
-
- v8::String::Value currency_string(v8_isolate, v8::Utils::ToLocal(currency));
-
- DisallowHeapAllocation no_gc;
- UErrorCode status = U_ZERO_ERROR;
- uint32_t fraction_digits = ucurr_getDefaultFractionDigits(
- reinterpret_cast<const UChar*>(*currency_string), &status);
- // For missing currency codes, default to the most common, 2
- if (!U_SUCCESS(status)) fraction_digits = 2;
- return Smi::FromInt(fraction_digits);
+ return *Intl::CurrencyDigits(isolate, currency);
}
-RUNTIME_FUNCTION(Runtime_CreateCollator) {
+RUNTIME_FUNCTION(Runtime_CollatorResolvedOptions) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
-
- Handle<JSFunction> constructor(
- isolate->native_context()->intl_collator_function(), isolate);
-
- Handle<JSObject> collator_holder;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, collator_holder,
- JSObject::New(constructor, constructor));
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, collator_obj, 0);
- if (!Collator::InitializeCollator(isolate, collator_holder, locale, options,
- resolved)) {
- return isolate->ThrowIllegalOperation();
+ // 3. If pr does not have an [[InitializedCollator]] internal
+ // slot, throw a TypeError exception.
+ if (!collator_obj->IsJSCollator()) {
+ Handle<String> method_str = isolate->factory()->NewStringFromStaticChars(
+ "Intl.Collator.prototype.resolvedOptions");
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ method_str, collator_obj));
}
- return *collator_holder;
+ Handle<JSCollator> collator = Handle<JSCollator>::cast(collator_obj);
+
+ return *JSCollator::ResolvedOptions(isolate, collator);
}
-RUNTIME_FUNCTION(Runtime_InternalCompare) {
+RUNTIME_FUNCTION(Runtime_PluralRulesResolvedOptions) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, plural_rules_obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, collator_holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, string1, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, string2, 2);
-
- icu::Collator* collator = Collator::UnpackCollator(isolate, collator_holder);
- CHECK_NOT_NULL(collator);
-
- string1 = String::Flatten(isolate, string1);
- string2 = String::Flatten(isolate, string2);
-
- UCollationResult result;
- UErrorCode status = U_ZERO_ERROR;
- {
- DisallowHeapAllocation no_gc;
- int32_t length1 = string1->length();
- int32_t length2 = string2->length();
- String::FlatContent flat1 = string1->GetFlatContent();
- String::FlatContent flat2 = string2->GetFlatContent();
- std::unique_ptr<uc16[]> sap1;
- std::unique_ptr<uc16[]> sap2;
- icu::UnicodeString string_val1(
- FALSE, GetUCharBufferFromFlat(flat1, &sap1, length1), length1);
- icu::UnicodeString string_val2(
- FALSE, GetUCharBufferFromFlat(flat2, &sap2, length2), length2);
- result = collator->compare(string_val1, string_val2, status);
+ // 3. If pr does not have an [[InitializedPluralRules]] internal
+ // slot, throw a TypeError exception.
+ if (!plural_rules_obj->IsJSPluralRules()) {
+ Handle<String> method_str = isolate->factory()->NewStringFromStaticChars(
+ "Intl.PluralRules.prototype.resolvedOptions");
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ method_str, plural_rules_obj));
}
- if (U_FAILURE(status)) return isolate->ThrowIllegalOperation();
- return *isolate->factory()->NewNumberFromInt(result);
+ Handle<JSPluralRules> plural_rules =
+ Handle<JSPluralRules>::cast(plural_rules_obj);
+
+ return *JSPluralRules::ResolvedOptions(isolate, plural_rules);
}
-RUNTIME_FUNCTION(Runtime_CreatePluralRules) {
+RUNTIME_FUNCTION(Runtime_ParseExtension) {
+ Factory* factory = isolate->factory();
HandleScope scope(isolate);
-
- DCHECK_EQ(3, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
-
- Handle<JSFunction> constructor(
- isolate->native_context()->intl_plural_rules_function(), isolate);
-
- Handle<JSObject> local_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, local_object,
- JSObject::New(constructor, constructor));
-
- // Set pluralRules as internal field of the resulting JS object.
- icu::PluralRules* plural_rules;
- icu::DecimalFormat* decimal_format;
- bool success = PluralRules::InitializePluralRules(
- isolate, locale, options, resolved, &plural_rules, &decimal_format);
-
- if (!success) return isolate->ThrowIllegalOperation();
-
- local_object->SetEmbedderField(0, reinterpret_cast<Smi*>(plural_rules));
- local_object->SetEmbedderField(1, reinterpret_cast<Smi*>(decimal_format));
-
- Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
- PluralRules::DeletePluralRules,
- WeakCallbackType::kInternalFields);
- return *local_object;
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, extension, 0);
+ std::map<std::string, std::string> map;
+ Intl::ParseExtension(isolate, std::string(extension->ToCString().get()), map);
+ Handle<JSObject> extension_map =
+ isolate->factory()->NewJSObjectWithNullProto();
+ for (std::map<std::string, std::string>::iterator it = map.begin();
+ it != map.end(); it++) {
+ JSObject::AddProperty(
+ isolate, extension_map,
+ factory->NewStringFromAsciiChecked(it->first.c_str()),
+ factory->NewStringFromAsciiChecked(it->second.c_str()), NONE);
+ }
+ return *extension_map;
}
RUNTIME_FUNCTION(Runtime_PluralRulesSelect) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, plural_rules_holder, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, plural_rules_obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, number, 1);
- icu::PluralRules* plural_rules =
- PluralRules::UnpackPluralRules(isolate, plural_rules_holder);
- CHECK_NOT_NULL(plural_rules);
-
- icu::DecimalFormat* number_format =
- PluralRules::UnpackNumberFormat(isolate, plural_rules_holder);
- CHECK_NOT_NULL(number_format);
-
- // Currently, PluralRules doesn't implement all the options for rounding that
- // the Intl spec provides; format and parse the number to round to the
- // appropriate amount, then apply PluralRules.
- //
- // TODO(littledan): If a future ICU version supports an extended API to avoid
- // this step, then switch to that API. Bug thread:
- // http://bugs.icu-project.org/trac/ticket/12763
- icu::UnicodeString rounded_string;
- number_format->format(number->Number(), rounded_string);
-
- icu::Formattable formattable;
- UErrorCode status = U_ZERO_ERROR;
- number_format->parse(rounded_string, formattable, status);
- if (!U_SUCCESS(status)) return isolate->ThrowIllegalOperation();
-
- double rounded = formattable.getDouble(status);
- if (!U_SUCCESS(status)) return isolate->ThrowIllegalOperation();
-
- icu::UnicodeString result = plural_rules->select(rounded);
- return *isolate->factory()
- ->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()),
- result.length()))
- .ToHandleChecked();
+ // 3. If pr does not have an [[InitializedPluralRules]] internal
+ // slot, throw a TypeError exception.
+ if (!plural_rules_obj->IsJSPluralRules()) {
+ Handle<String> method_str = isolate->factory()->NewStringFromStaticChars(
+ "Intl.PluralRules.prototype.select");
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ method_str, plural_rules_obj));
+ }
+
+ Handle<JSPluralRules> plural_rules =
+ Handle<JSPluralRules>::cast(plural_rules_obj);
+
+ // 4. Return ? ResolvePlural(pr, n).
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSPluralRules::ResolvePlural(isolate, plural_rules, number));
}
RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
@@ -493,12 +355,15 @@ RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
// Set break iterator as embedder field of the resulting JS object.
icu::BreakIterator* break_iterator = V8BreakIterator::InitializeBreakIterator(
isolate, locale, options, resolved);
+ CHECK_NOT_NULL(break_iterator);
if (!break_iterator) return isolate->ThrowIllegalOperation();
- local_object->SetEmbedderField(0, reinterpret_cast<Smi*>(break_iterator));
+ local_object->SetEmbedderField(V8BreakIterator::kBreakIteratorIndex,
+ reinterpret_cast<Smi*>(break_iterator));
// Make sure that the pointer to adopted text is nullptr.
- local_object->SetEmbedderField(1, static_cast<Smi*>(nullptr));
+ local_object->SetEmbedderField(V8BreakIterator::kUnicodeStringIndex,
+ static_cast<Smi*>(nullptr));
// Make object handle weak so we can delete the break iterator once GC kicks
// in.
@@ -509,36 +374,6 @@ RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
return *local_object;
}
-RUNTIME_FUNCTION(Runtime_BreakIteratorAdoptText) {
- HandleScope scope(isolate);
-
- DCHECK_EQ(2, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, text, 1);
-
- icu::BreakIterator* break_iterator =
- V8BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
- CHECK_NOT_NULL(break_iterator);
-
- icu::UnicodeString* u_text = reinterpret_cast<icu::UnicodeString*>(
- break_iterator_holder->GetEmbedderField(1));
- delete u_text;
-
- int length = text->length();
- text = String::Flatten(isolate, text);
- DisallowHeapAllocation no_gc;
- String::FlatContent flat = text->GetFlatContent();
- std::unique_ptr<uc16[]> sap;
- const UChar* text_value = GetUCharBufferFromFlat(flat, &sap, length);
- u_text = new icu::UnicodeString(text_value, length);
- break_iterator_holder->SetEmbedderField(1, reinterpret_cast<Smi*>(u_text));
-
- break_iterator->setText(*u_text);
-
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_BreakIteratorFirst) {
HandleScope scope(isolate);
@@ -547,7 +382,7 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorFirst) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
icu::BreakIterator* break_iterator =
- V8BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ V8BreakIterator::UnpackBreakIterator(break_iterator_holder);
CHECK_NOT_NULL(break_iterator);
return *isolate->factory()->NewNumberFromInt(break_iterator->first());
@@ -561,7 +396,7 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorNext) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
icu::BreakIterator* break_iterator =
- V8BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ V8BreakIterator::UnpackBreakIterator(break_iterator_holder);
CHECK_NOT_NULL(break_iterator);
return *isolate->factory()->NewNumberFromInt(break_iterator->next());
@@ -575,7 +410,7 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorCurrent) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
icu::BreakIterator* break_iterator =
- V8BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ V8BreakIterator::UnpackBreakIterator(break_iterator_holder);
CHECK_NOT_NULL(break_iterator);
return *isolate->factory()->NewNumberFromInt(break_iterator->current());
@@ -589,7 +424,7 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorBreakType) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
icu::BreakIterator* break_iterator =
- V8BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ V8BreakIterator::UnpackBreakIterator(break_iterator_holder);
CHECK_NOT_NULL(break_iterator);
// TODO(cira): Remove cast once ICU fixes base BreakIterator class.
@@ -612,63 +447,50 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorBreakType) {
}
}
-RUNTIME_FUNCTION(Runtime_StringToLowerCaseIntl) {
+RUNTIME_FUNCTION(Runtime_ToLocaleDateTime) {
HandleScope scope(isolate);
- DCHECK_EQ(args.length(), 1);
- CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
- s = String::Flatten(isolate, s);
- return ConvertToLower(s, isolate);
+
+ DCHECK_EQ(6, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(Object, date, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, locales, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, options, 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, required, 3);
+ CONVERT_ARG_HANDLE_CHECKED(String, defaults, 4);
+ CONVERT_ARG_HANDLE_CHECKED(String, service, 5);
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, DateFormat::ToLocaleDateTime(
+ isolate, date, locales, options, required->ToCString().get(),
+ defaults->ToCString().get(), service->ToCString().get()));
}
-RUNTIME_FUNCTION(Runtime_StringToUpperCaseIntl) {
+RUNTIME_FUNCTION(Runtime_ToDateTimeOptions) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(args.length(), 3);
+ CONVERT_ARG_HANDLE_CHECKED(Object, options, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, required, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, defaults, 2);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, DateFormat::ToDateTimeOptions(isolate, options,
+ required->ToCString().get(),
+ defaults->ToCString().get()));
+}
+
+RUNTIME_FUNCTION(Runtime_StringToLowerCaseIntl) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 1);
CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
s = String::Flatten(isolate, s);
- return ConvertToUpper(s, isolate);
+ RETURN_RESULT_OR_FAILURE(isolate, ConvertToLower(s, isolate));
}
-RUNTIME_FUNCTION(Runtime_StringLocaleConvertCase) {
+RUNTIME_FUNCTION(Runtime_StringToUpperCaseIntl) {
HandleScope scope(isolate);
- DCHECK_EQ(args.length(), 3);
+ DCHECK_EQ(args.length(), 1);
CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(is_upper, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, lang_arg, 2);
-
- // Primary language tag can be up to 8 characters long in theory.
- // https://tools.ietf.org/html/bcp47#section-2.2.1
- DCHECK_LE(lang_arg->length(), 8);
- lang_arg = String::Flatten(isolate, lang_arg);
s = String::Flatten(isolate, s);
-
- // All the languages requiring special-handling have two-letter codes.
- // Note that we have to check for '!= 2' here because private-use language
- // tags (x-foo) or grandfathered irregular tags (e.g. i-enochian) would have
- // only 'x' or 'i' when they get here.
- if (V8_UNLIKELY(lang_arg->length() != 2))
- return ConvertCase(s, is_upper, isolate);
-
- char c1, c2;
- {
- DisallowHeapAllocation no_gc;
- String::FlatContent lang = lang_arg->GetFlatContent();
- c1 = lang.Get(0);
- c2 = lang.Get(1);
- }
- // TODO(jshin): Consider adding a fast path for ASCII or Latin-1. The fastpath
- // in the root locale needs to be adjusted for az, lt and tr because even case
- // mapping of ASCII range characters are different in those locales.
- // Greek (el) does not require any adjustment.
- if (V8_UNLIKELY(c1 == 't' && c2 == 'r'))
- return LocaleConvertCase(s, isolate, is_upper, "tr");
- if (V8_UNLIKELY(c1 == 'e' && c2 == 'l'))
- return LocaleConvertCase(s, isolate, is_upper, "el");
- if (V8_UNLIKELY(c1 == 'l' && c2 == 't'))
- return LocaleConvertCase(s, isolate, is_upper, "lt");
- if (V8_UNLIKELY(c1 == 'a' && c2 == 'z'))
- return LocaleConvertCase(s, isolate, is_upper, "az");
-
- return ConvertCase(s, is_upper, isolate);
+ RETURN_RESULT_OR_FAILURE(isolate, ConvertToUpper(s, isolate));
}
RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
@@ -704,5 +526,18 @@ RUNTIME_FUNCTION(Runtime_IntlUnwrapReceiver) {
check_legacy_constructor));
}
+RUNTIME_FUNCTION(Runtime_SupportedLocalesOf) {
+ HandleScope scope(isolate);
+
+ DCHECK_EQ(args.length(), 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, service, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, locales, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, options, 2);
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Intl::SupportedLocalesOf(isolate, service, locales, options));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 6e17ba85d4..d5111f7efa 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
#include "src/allocation-site-scopes.h"
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/ast/ast.h"
#include "src/isolate-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/runtime/runtime-utils.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -22,7 +21,7 @@ bool IsUninitializedLiteralSite(Object* literal_site) {
return literal_site == Smi::kZero;
}
-bool HasBoilerplate(Isolate* isolate, Handle<Object> literal_site) {
+bool HasBoilerplate(Handle<Object> literal_site) {
return !literal_site->IsSmi();
}
@@ -196,7 +195,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
UNREACHABLE();
break;
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -317,11 +316,11 @@ MaybeHandle<JSObject> DeepCopy(Handle<JSObject> object,
return copy;
}
-struct ObjectBoilerplate {
+struct ObjectLiteralHelper {
static Handle<JSObject> Create(Isolate* isolate,
Handle<HeapObject> description, int flags,
PretenureFlag pretenure_flag) {
- Handle<Context> native_context = isolate->native_context();
+ Handle<NativeContext> native_context = isolate->native_context();
Handle<ObjectBoilerplateDescription> object_boilerplate_description =
Handle<ObjectBoilerplateDescription>::cast(description);
bool use_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
@@ -392,7 +391,7 @@ struct ObjectBoilerplate {
}
};
-struct ArrayBoilerplate {
+struct ArrayLiteralHelper {
static Handle<JSObject> Create(Isolate* isolate,
Handle<HeapObject> description, int flags,
PretenureFlag pretenure_flag) {
@@ -455,20 +454,43 @@ Handle<Object> InnerCreateBoilerplate(Isolate* isolate,
if (description->IsObjectBoilerplateDescription()) {
Handle<ObjectBoilerplateDescription> object_boilerplate_description =
Handle<ObjectBoilerplateDescription>::cast(description);
- return ObjectBoilerplate::Create(isolate, object_boilerplate_description,
- object_boilerplate_description->flags(),
- pretenure_flag);
+ return ObjectLiteralHelper::Create(isolate, object_boilerplate_description,
+ object_boilerplate_description->flags(),
+ pretenure_flag);
} else {
DCHECK(description->IsArrayBoilerplateDescription());
Handle<ArrayBoilerplateDescription> array_boilerplate_description =
Handle<ArrayBoilerplateDescription>::cast(description);
- return ArrayBoilerplate::Create(
+ return ArrayLiteralHelper::Create(
isolate, array_boilerplate_description,
array_boilerplate_description->elements_kind(), pretenure_flag);
}
}
-template <typename Boilerplate>
+inline DeepCopyHints DecodeCopyHints(int flags) {
+ DeepCopyHints copy_hints =
+ (flags & AggregateLiteral::kIsShallow) ? kObjectIsShallow : kNoHints;
+ if (FLAG_track_double_fields && !FLAG_unbox_double_fields) {
+ // Make sure we properly clone mutable heap numbers on 32-bit platforms.
+ copy_hints = kNoHints;
+ }
+ return copy_hints;
+}
+
+template <typename LiteralHelper>
+MaybeHandle<JSObject> CreateLiteralWithoutAllocationSite(
+ Isolate* isolate, Handle<HeapObject> description, int flags) {
+ Handle<JSObject> literal =
+ LiteralHelper::Create(isolate, description, flags, NOT_TENURED);
+ DeepCopyHints copy_hints = DecodeCopyHints(flags);
+ if (copy_hints == kNoHints) {
+ DeprecationUpdateContext update_context(isolate);
+ RETURN_ON_EXCEPTION(isolate, DeepWalk(literal, &update_context), JSObject);
+ }
+ return literal;
+}
+
+template <typename LiteralHelper>
MaybeHandle<JSObject> CreateLiteral(Isolate* isolate,
Handle<FeedbackVector> vector,
int literals_index,
@@ -476,41 +498,25 @@ MaybeHandle<JSObject> CreateLiteral(Isolate* isolate,
FeedbackSlot literals_slot(FeedbackVector::ToSlot(literals_index));
CHECK(literals_slot.ToInt() < vector->length());
Handle<Object> literal_site(vector->Get(literals_slot)->ToObject(), isolate);
- DeepCopyHints copy_hints =
- (flags & AggregateLiteral::kIsShallow) ? kObjectIsShallow : kNoHints;
- if (FLAG_track_double_fields && !FLAG_unbox_double_fields) {
- // Make sure we properly clone mutable heap numbers on 32-bit platforms.
- copy_hints = kNoHints;
- }
+ DeepCopyHints copy_hints = DecodeCopyHints(flags);
Handle<AllocationSite> site;
Handle<JSObject> boilerplate;
- if (HasBoilerplate(isolate, literal_site)) {
+ if (HasBoilerplate(literal_site)) {
site = Handle<AllocationSite>::cast(literal_site);
boilerplate = Handle<JSObject>(site->boilerplate(), isolate);
} else {
// Eagerly create AllocationSites for literals that contain an Array.
bool needs_initial_allocation_site =
(flags & AggregateLiteral::kNeedsInitialAllocationSite) != 0;
- // TODO(cbruni): Even in the case where we need an initial allocation site
- // we could still create the boilerplate lazily to save memory.
if (!needs_initial_allocation_site &&
IsUninitializedLiteralSite(*literal_site)) {
PreInitializeLiteralSite(vector, literals_slot);
- boilerplate =
- Boilerplate::Create(isolate, description, flags, NOT_TENURED);
- if (copy_hints == kNoHints) {
- DeprecationUpdateContext update_context(isolate);
- RETURN_ON_EXCEPTION(isolate, DeepWalk(boilerplate, &update_context),
- JSObject);
- }
- return boilerplate;
+ return CreateLiteralWithoutAllocationSite<LiteralHelper>(
+ isolate, description, flags);
} else {
- PretenureFlag pretenure_flag =
- Heap::InNewSpace(*vector) ? NOT_TENURED : TENURED;
- boilerplate =
- Boilerplate::Create(isolate, description, flags, pretenure_flag);
+ boilerplate = LiteralHelper::Create(isolate, description, flags, TENURED);
}
// Install AllocationSite objects.
AllocationSiteCreationContext creation_context(isolate);
@@ -544,8 +550,28 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
CONVERT_ARG_HANDLE_CHECKED(ObjectBoilerplateDescription, description, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
RETURN_RESULT_OR_FAILURE(
- isolate, CreateLiteral<ObjectBoilerplate>(isolate, vector, literals_index,
- description, flags));
+ isolate, CreateLiteral<ObjectLiteralHelper>(
+ isolate, vector, literals_index, description, flags));
+}
+
+RUNTIME_FUNCTION(Runtime_CreateObjectLiteralWithoutAllocationSite) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(ObjectBoilerplateDescription, description, 0);
+ CONVERT_SMI_ARG_CHECKED(flags, 1);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, CreateLiteralWithoutAllocationSite<ObjectLiteralHelper>(
+ isolate, description, flags));
+}
+
+RUNTIME_FUNCTION(Runtime_CreateArrayLiteralWithoutAllocationSite) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(ArrayBoilerplateDescription, description, 0);
+ CONVERT_SMI_ARG_CHECKED(flags, 1);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, CreateLiteralWithoutAllocationSite<ArrayLiteralHelper>(
+ isolate, description, flags));
}
RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
@@ -556,8 +582,8 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
CONVERT_ARG_HANDLE_CHECKED(ArrayBoilerplateDescription, elements, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
RETURN_RESULT_OR_FAILURE(
- isolate, CreateLiteral<ArrayBoilerplate>(isolate, vector, literals_index,
- elements, flags));
+ isolate, CreateLiteral<ArrayLiteralHelper>(
+ isolate, vector, literals_index, elements, flags));
}
RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
@@ -573,7 +599,7 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
// Check if boilerplate exists. If not, create it first.
Handle<Object> literal_site(vector->Get(literal_slot)->ToObject(), isolate);
Handle<Object> boilerplate;
- if (!HasBoilerplate(isolate, literal_site)) {
+ if (!HasBoilerplate(literal_site)) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, boilerplate,
JSRegExp::New(isolate, pattern, JSRegExp::Flags(flags)));
diff --git a/deps/v8/src/runtime/runtime-maths.cc b/deps/v8/src/runtime/runtime-maths.cc
index 1804f93229..7695c14657 100644
--- a/deps/v8/src/runtime/runtime-maths.cc
+++ b/deps/v8/src/runtime/runtime-maths.cc
@@ -5,7 +5,6 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
-#include "src/assembler.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/counters.h"
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index 5c59a2f997..91dac4fa1c 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/counters.h"
#include "src/objects-inl.h"
#include "src/objects/js-promise.h"
#include "src/objects/module.h"
+#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index 2eac95923f..14b91c8f1b 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -2,12 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/isolate-inl.h"
+#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
@@ -70,12 +69,12 @@ RUNTIME_FUNCTION(Runtime_StringParseFloat) {
return *isolate->factory()->NewNumber(value);
}
-RUNTIME_FUNCTION(Runtime_NumberToStringSkipCache) {
+RUNTIME_FUNCTION(Runtime_NumberToString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
- return *isolate->factory()->NumberToString(number, false);
+ return *isolate->factory()->NumberToString(number);
}
// Compare two Smis x, y as if they were converted to strings and then
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index b3a8b18906..81478b0e1b 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/bootstrapper.h"
#include "src/debug/debug.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/property-descriptor-object.h"
#include "src/property-descriptor.h"
+#include "src/runtime/runtime-utils.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -405,34 +405,26 @@ RUNTIME_FUNCTION(Runtime_AddDictionaryProperty) {
RUNTIME_FUNCTION(Runtime_ObjectCreate) {
HandleScope scope(isolate);
Handle<Object> prototype = args.at(0);
+ Handle<Object> properties = args.at(1);
+ Handle<JSObject> obj;
+ // 1. If Type(O) is neither Object nor Null, throw a TypeError exception.
if (!prototype->IsNull(isolate) && !prototype->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, prototype));
}
+ // 2. Let obj be ObjectCreate(O).
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, obj, JSObject::ObjectCreate(isolate, prototype));
- // Generate the map with the specified {prototype} based on the Object
- // function's initial map from the current native context.
- // TODO(bmeurer): Use a dedicated cache for Object.create; think about
- // slack tracking for Object.create.
- Handle<Map> map =
- Map::GetObjectCreateMap(isolate, Handle<HeapObject>::cast(prototype));
-
- // Actually allocate the object.
- Handle<JSObject> object;
- if (map->is_dictionary_map()) {
- object = isolate->factory()->NewSlowJSObjectFromMap(map);
- } else {
- object = isolate->factory()->NewJSObjectFromMap(map);
- }
-
- // Define the properties if properties was specified and is not undefined.
- Handle<Object> properties = args.at(1);
+ // 3. If Properties is not undefined, then
if (!properties->IsUndefined(isolate)) {
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSReceiver::DefineProperties(isolate, object, properties));
+ // a. Return ? ObjectDefineProperties(obj, Properties).
+ // Define the properties if properties was specified and is not undefined.
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSReceiver::DefineProperties(isolate, obj, properties));
}
-
- return *object;
+ // 4. Return obj.
+ return *obj;
}
MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
@@ -1239,27 +1231,6 @@ RUNTIME_FUNCTION(Runtime_CreateDataProperty) {
return *value;
}
-// Checks that 22.2.2.1.1 Runtime Semantics: IterableToList produces exactly the
-// same result as doing nothing.
-RUNTIME_FUNCTION(Runtime_IterableToListCanBeElided) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
-
- // If an iterator symbol is added to the Number prototype, we could see a Smi.
- if (obj->IsSmi()) return isolate->heap()->ToBoolean(false);
- if (!HeapObject::cast(*obj)->IsJSObject()) {
- return isolate->heap()->ToBoolean(false);
- }
-
- // While iteration alone may not have observable side-effects, calling
- // toNumber on an object will. Make sure the arg is not an array of objects.
- ElementsKind kind = JSObject::cast(*obj)->GetElementsKind();
- if (!IsFastNumberElementsKind(kind)) return isolate->heap()->ToBoolean(false);
-
- return isolate->heap()->ToBoolean(!obj->IterationHasObservableEffects());
-}
-
RUNTIME_FUNCTION(Runtime_GetOwnPropertyDescriptor) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
index ec78904710..eeb92e9a35 100644
--- a/deps/v8/src/runtime/runtime-promise.cc
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
-#include "src/api.h"
-#include "src/arguments.h"
+#include "src/api-inl.h"
+#include "src/arguments-inl.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/elements.h"
#include "src/objects-inl.h"
#include "src/objects/js-promise-inl.h"
+#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index 8101ea6d29..7eeee631be 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -4,7 +4,7 @@
#include "src/runtime/runtime-utils.h"
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/elements.h"
#include "src/heap/factory.h"
#include "src/isolate-inl.h"
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index f58a3dd6f7..3e77bf1f3b 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -2,19 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
#include <functional>
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/conversions-inl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
+#include "src/objects/js-array-inl.h"
#include "src/regexp/jsregexp-inl.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-utils.h"
-#include "src/string-builder.h"
+#include "src/runtime/runtime-utils.h"
+#include "src/string-builder-inl.h"
#include "src/string-search.h"
+#include "src/zone/zone-chunk-list.h"
namespace v8 {
namespace internal {
@@ -65,7 +66,7 @@ int LookupNamedCapture(std::function<bool(String*)> name_matches,
class CompiledReplacement {
public:
explicit CompiledReplacement(Zone* zone)
- : parts_(1, zone), replacement_substrings_(0, zone), zone_(zone) {}
+ : parts_(zone), replacement_substrings_(zone) {}
// Return whether the replacement is simple.
bool Compile(Isolate* isolate, Handle<JSRegExp> regexp,
@@ -77,9 +78,7 @@ class CompiledReplacement {
int32_t* match);
// Number of distinct parts of the replacement pattern.
- int parts() { return parts_.length(); }
-
- Zone* zone() const { return zone_; }
+ int parts() { return static_cast<int>(parts_.size()); }
private:
enum PartType {
@@ -142,10 +141,10 @@ class CompiledReplacement {
};
template <typename Char>
- bool ParseReplacementPattern(ZoneList<ReplacementPart>* parts,
+ bool ParseReplacementPattern(ZoneChunkList<ReplacementPart>* parts,
Vector<Char> characters,
FixedArray* capture_name_map, int capture_count,
- int subject_length, Zone* zone) {
+ int subject_length) {
// Equivalent to String::GetSubstitution, except that this method converts
// the replacement string into an internal representation that avoids
// repeated parsing when used repeatedly.
@@ -163,9 +162,8 @@ class CompiledReplacement {
case '$':
if (i > last) {
// There is a substring before. Include the first "$".
- parts->Add(
- ReplacementPart::ReplacementSubString(last, next_index),
- zone);
+ parts->push_back(
+ ReplacementPart::ReplacementSubString(last, next_index));
last = next_index + 1; // Continue after the second "$".
} else {
// Let the next substring start with the second "$".
@@ -175,25 +173,25 @@ class CompiledReplacement {
break;
case '`':
if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
+ parts->push_back(ReplacementPart::ReplacementSubString(last, i));
}
- parts->Add(ReplacementPart::SubjectPrefix(), zone);
+ parts->push_back(ReplacementPart::SubjectPrefix());
i = next_index;
last = i + 1;
break;
case '\'':
if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
+ parts->push_back(ReplacementPart::ReplacementSubString(last, i));
}
- parts->Add(ReplacementPart::SubjectSuffix(subject_length), zone);
+ parts->push_back(ReplacementPart::SubjectSuffix(subject_length));
i = next_index;
last = i + 1;
break;
case '&':
if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
+ parts->push_back(ReplacementPart::ReplacementSubString(last, i));
}
- parts->Add(ReplacementPart::SubjectMatch(), zone);
+ parts->push_back(ReplacementPart::SubjectMatch());
i = next_index;
last = i + 1;
break;
@@ -226,11 +224,11 @@ class CompiledReplacement {
}
if (capture_ref > 0) {
if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i),
- zone);
+ parts->push_back(
+ ReplacementPart::ReplacementSubString(last, i));
}
DCHECK(capture_ref <= capture_count);
- parts->Add(ReplacementPart::SubjectCapture(capture_ref), zone);
+ parts->push_back(ReplacementPart::SubjectCapture(capture_ref));
last = next_index + 1;
}
i = next_index;
@@ -281,12 +279,12 @@ class CompiledReplacement {
(1 <= capture_index && capture_index <= capture_count));
if (i > last) {
- parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
+ parts->push_back(ReplacementPart::ReplacementSubString(last, i));
}
- parts->Add((capture_index == -1)
- ? ReplacementPart::EmptyReplacement()
- : ReplacementPart::SubjectCapture(capture_index),
- zone);
+ parts->push_back(
+ (capture_index == -1)
+ ? ReplacementPart::EmptyReplacement()
+ : ReplacementPart::SubjectCapture(capture_index));
last = closing_bracket_index + 1;
i = closing_bracket_index;
break;
@@ -302,15 +300,14 @@ class CompiledReplacement {
// Replacement is simple. Do not use Apply to do the replacement.
return true;
} else {
- parts->Add(ReplacementPart::ReplacementSubString(last, length), zone);
+ parts->push_back(ReplacementPart::ReplacementSubString(last, length));
}
}
return false;
}
- ZoneList<ReplacementPart> parts_;
- ZoneList<Handle<String> > replacement_substrings_;
- Zone* zone_;
+ ZoneChunkList<ReplacementPart> parts_;
+ ZoneVector<Handle<String>> replacement_substrings_;
};
bool CompiledReplacement::Compile(Isolate* isolate, Handle<JSRegExp> regexp,
@@ -334,31 +331,31 @@ bool CompiledReplacement::Compile(Isolate* isolate, Handle<JSRegExp> regexp,
if (content.IsOneByte()) {
simple = ParseReplacementPattern(&parts_, content.ToOneByteVector(),
capture_name_map, capture_count,
- subject_length, zone());
+ subject_length);
} else {
DCHECK(content.IsTwoByte());
simple = ParseReplacementPattern(&parts_, content.ToUC16Vector(),
capture_name_map, capture_count,
- subject_length, zone());
+ subject_length);
}
if (simple) return true;
}
// Find substrings of replacement string and create them as String objects.
int substring_index = 0;
- for (int i = 0, n = parts_.length(); i < n; i++) {
- int tag = parts_[i].tag;
+ for (ReplacementPart& part : parts_) {
+ int tag = part.tag;
if (tag <= 0) { // A replacement string slice.
int from = -tag;
- int to = parts_[i].data;
- replacement_substrings_.Add(
- isolate->factory()->NewSubString(replacement, from, to), zone());
- parts_[i].tag = REPLACEMENT_SUBSTRING;
- parts_[i].data = substring_index;
+ int to = part.data;
+ replacement_substrings_.push_back(
+ isolate->factory()->NewSubString(replacement, from, to));
+ part.tag = REPLACEMENT_SUBSTRING;
+ part.data = substring_index;
substring_index++;
} else if (tag == REPLACEMENT_STRING) {
- replacement_substrings_.Add(replacement, zone());
- parts_[i].data = substring_index;
+ replacement_substrings_.push_back(replacement);
+ part.data = substring_index;
substring_index++;
}
}
@@ -368,9 +365,8 @@ bool CompiledReplacement::Compile(Isolate* isolate, Handle<JSRegExp> regexp,
void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
int match_from, int match_to, int32_t* match) {
- DCHECK_LT(0, parts_.length());
- for (int i = 0, n = parts_.length(); i < n; i++) {
- ReplacementPart part = parts_[i];
+ DCHECK_LT(0, parts_.size());
+ for (ReplacementPart& part : parts_) {
switch (part.tag) {
case SUBJECT_PREFIX:
if (match_from > 0) builder->AddSubjectSlice(0, match_from);
@@ -1327,15 +1323,19 @@ V8_WARN_UNUSED_RESULT MaybeHandle<String> RegExpReplace(
Object::ToLength(isolate, last_index_obj),
String);
last_index = PositiveNumberToUint32(*last_index_obj);
-
- if (last_index > static_cast<uint32_t>(string->length())) last_index = 0;
}
- Handle<Object> match_indices_obj;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, match_indices_obj,
- RegExpImpl::Exec(isolate, regexp, string, last_index, last_match_info),
- String);
+ Handle<Object> match_indices_obj(ReadOnlyRoots(isolate).null_value(),
+ isolate);
+
+ // A lastIndex exceeding the string length always always returns null
+ // (signalling failure) in RegExpBuiltinExec, thus we can skip the call.
+ if (last_index <= static_cast<uint32_t>(string->length())) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, match_indices_obj,
+ RegExpImpl::Exec(isolate, regexp, string,
+ last_index, last_match_info),
+ String);
+ }
if (match_indices_obj->IsNull(isolate)) {
if (sticky) regexp->set_last_index(Smi::kZero, SKIP_WRITE_BARRIER);
@@ -1658,8 +1658,8 @@ RUNTIME_FUNCTION(Runtime_RegExpSplit) {
factory->undefined_value()));
if (result->IsNull(isolate)) {
- string_index = static_cast<uint32_t>(RegExpUtils::AdvanceStringIndex(
- isolate, string, string_index, unicode));
+ string_index = static_cast<uint32_t>(
+ RegExpUtils::AdvanceStringIndex(string, string_index, unicode));
continue;
}
@@ -1673,8 +1673,8 @@ RUNTIME_FUNCTION(Runtime_RegExpSplit) {
const uint32_t end =
std::min(PositiveNumberToUint32(*last_index_obj), length);
if (end == prev_string_index) {
- string_index = static_cast<uint32_t>(RegExpUtils::AdvanceStringIndex(
- isolate, string, string_index, unicode));
+ string_index = static_cast<uint32_t>(
+ RegExpUtils::AdvanceStringIndex(string, string_index, unicode));
continue;
}
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 93a0629783..4772f400b3 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -2,12 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
#include <memory>
#include "src/accessors.h"
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/ast/scopes.h"
#include "src/bootstrapper.h"
#include "src/deoptimizer.h"
@@ -15,6 +13,7 @@
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/objects/module-inl.h"
+#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
@@ -671,8 +670,8 @@ RUNTIME_FUNCTION(Runtime_NewScriptContext) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 0);
- Handle<Context> native_context(isolate->context(), isolate);
- DCHECK(native_context->IsNativeContext());
+ Handle<NativeContext> native_context(NativeContext::cast(isolate->context()),
+ isolate);
Handle<JSGlobalObject> global_object(native_context->global_object(),
isolate);
Handle<ScriptContextTable> script_context_table(
@@ -722,7 +721,7 @@ RUNTIME_FUNCTION(Runtime_PushModuleContext) {
CONVERT_ARG_HANDLE_CHECKED(Module, module, 0);
CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
- Handle<Context> outer(isolate->context(), isolate);
+ Handle<NativeContext> outer(NativeContext::cast(isolate->context()), isolate);
Handle<Context> context =
isolate->factory()->NewModuleContext(module, outer, scope_info);
isolate->set_context(*context);
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 0c3c82deda..f6537fd073 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/conversions.h"
#include "src/counters.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/regexp/jsregexp-inl.h"
#include "src/regexp/regexp-utils.h"
-#include "src/string-builder.h"
+#include "src/runtime/runtime-utils.h"
+#include "src/string-builder-inl.h"
#include "src/string-search.h"
namespace v8 {
diff --git a/deps/v8/src/runtime/runtime-symbol.cc b/deps/v8/src/runtime/runtime-symbol.cc
index 1472b4e2be..0c9ea75d5d 100644
--- a/deps/v8/src/runtime/runtime-symbol.cc
+++ b/deps/v8/src/runtime/runtime-symbol.cc
@@ -2,12 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/isolate-inl.h"
#include "src/objects-inl.h"
-#include "src/string-builder.h"
+#include "src/runtime/runtime-utils.h"
+#include "src/string-builder-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 03251b67e1..94376e1364 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -7,8 +7,8 @@
#include <memory>
#include <sstream>
-#include "src/api.h"
-#include "src/arguments.h"
+#include "src/api-inl.h"
+#include "src/arguments-inl.h"
#include "src/assembler-inl.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler.h"
@@ -658,17 +658,6 @@ RUNTIME_FUNCTION(Runtime_SystemBreak) {
}
-// Sets a v8 flag.
-RUNTIME_FUNCTION(Runtime_SetFlags) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(String, arg, 0);
- std::unique_ptr<char[]> flags =
- arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- FlagList::SetFlagsFromString(flags.get(), StrLength(flags.get()));
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_SetForceSlowPath) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
@@ -847,6 +836,23 @@ RUNTIME_FUNCTION(Runtime_GetWasmRecoveredTrapCount) {
return *isolate->factory()->NewNumberFromSize(trap_count);
}
+namespace {
+bool EnableWasmThreads(v8::Local<v8::Context> context) { return true; }
+
+bool DisableWasmThreads(v8::Local<v8::Context> context) { return false; }
+} // namespace
+
+// This runtime function enables WebAssembly threads through an embedder
+// callback and thereby bypasses the value in FLAG_experimental_wasm_threads.
+RUNTIME_FUNCTION(Runtime_SetWasmThreadsEnabled) {
+ DCHECK_EQ(1, args.length());
+ CONVERT_BOOLEAN_ARG_CHECKED(flag, 0);
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8_isolate->SetWasmThreadsEnabledCallback(flag ? EnableWasmThreads
+ : DisableWasmThreads);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
RUNTIME_FUNCTION(Runtime_Has##Name) { \
CONVERT_ARG_CHECKED(JSObject, obj, 0); \
@@ -866,11 +872,10 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
#undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
-
-#define FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION(Type, type, TYPE, ctype, s) \
- RUNTIME_FUNCTION(Runtime_HasFixed##Type##Elements) { \
- CONVERT_ARG_CHECKED(JSObject, obj, 0); \
- return isolate->heap()->ToBoolean(obj->HasFixed##Type##Elements()); \
+#define FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION(Type, type, TYPE, ctype) \
+ RUNTIME_FUNCTION(Runtime_HasFixed##Type##Elements) { \
+ CONVERT_ARG_CHECKED(JSObject, obj, 0); \
+ return isolate->heap()->ToBoolean(obj->HasFixed##Type##Elements()); \
}
TYPED_ARRAYS(FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
@@ -900,19 +905,18 @@ RUNTIME_FUNCTION(Runtime_PromiseSpeciesProtector) {
// Take a compiled wasm module and serialize it into an array buffer, which is
// then returned.
RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
- HandleScope shs(isolate);
+ HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
wasm::NativeModule* native_module = module_obj->native_module();
- size_t compiled_size =
- wasm::GetSerializedNativeModuleSize(isolate, native_module);
+ wasm::WasmSerializer wasm_serializer(isolate, native_module);
+ size_t compiled_size = wasm_serializer.GetSerializedNativeModuleSize();
void* array_data = isolate->array_buffer_allocator()->Allocate(compiled_size);
Handle<JSArrayBuffer> array_buffer = isolate->factory()->NewJSArrayBuffer();
JSArrayBuffer::Setup(array_buffer, isolate, false, array_data, compiled_size);
if (!array_data ||
- !wasm::SerializeNativeModule(
- isolate, native_module,
+ !wasm_serializer.SerializeNativeModule(
{reinterpret_cast<uint8_t*>(array_data), compiled_size})) {
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -922,31 +926,20 @@ RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
// Take an array buffer and attempt to reconstruct a compiled wasm module.
// Return undefined if unsuccessful.
RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
- HandleScope shs(isolate);
+ HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, wire_bytes, 1);
- uint8_t* mem_start = reinterpret_cast<uint8_t*>(buffer->backing_store());
- size_t mem_size = static_cast<size_t>(buffer->byte_length()->Number());
-
// Note that {wasm::DeserializeNativeModule} will allocate. We assume the
- // JSArrayBuffer doesn't get relocated.
- bool already_external = wire_bytes->is_external();
- if (!already_external) {
- wire_bytes->set_is_external(true);
- isolate->heap()->UnregisterArrayBuffer(*wire_bytes);
- }
+ // JSArrayBuffer backing store doesn't get relocated.
MaybeHandle<WasmModuleObject> maybe_module_object =
wasm::DeserializeNativeModule(
- isolate, {mem_start, mem_size},
- Vector<const uint8_t>(
- reinterpret_cast<uint8_t*>(wire_bytes->backing_store()),
- static_cast<int>(wire_bytes->byte_length()->Number())));
- if (!already_external) {
- wire_bytes->set_is_external(false);
- isolate->heap()->RegisterNewArrayBuffer(*wire_bytes);
- }
+ isolate,
+ {reinterpret_cast<uint8_t*>(buffer->backing_store()),
+ static_cast<size_t>(buffer->byte_length()->Number())},
+ {reinterpret_cast<uint8_t*>(wire_bytes->backing_store()),
+ static_cast<size_t>(wire_bytes->byte_length()->Number())});
Handle<WasmModuleObject> module_object;
if (!maybe_module_object.ToHandle(&module_object)) {
return ReadOnlyRoots(isolate).undefined_value();
@@ -1005,7 +998,7 @@ RUNTIME_FUNCTION(Runtime_RedirectToWasmInterpreter) {
}
RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
- HandleScope hs(isolate);
+ HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Smi, info_addr, 0);
@@ -1025,16 +1018,29 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
// TODO(titzer): eliminate dependency on WasmModule definition here.
int func_start =
frame->wasm_instance()->module()->functions[func_index].code.offset();
- wasm::ExecutionEngine eng = frame->wasm_code()->is_liftoff()
- ? wasm::ExecutionEngine::kLiftoff
- : wasm::ExecutionEngine::kTurbofan;
- wasm::TraceMemoryOperation(eng, info, func_index, pos - func_start,
+ wasm::ExecutionTier tier = frame->wasm_code()->is_liftoff()
+ ? wasm::ExecutionTier::kBaseline
+ : wasm::ExecutionTier::kOptimized;
+ wasm::TraceMemoryOperation(tier, info, func_index, pos - func_start,
mem_start);
return ReadOnlyRoots(isolate).undefined_value();
}
+RUNTIME_FUNCTION(Runtime_WasmTierUpFunction) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
+ CONVERT_SMI_ARG_CHECKED(function_index, 1);
+ if (!isolate->wasm_engine()->CompileFunction(
+ isolate, instance->module_object()->native_module(), function_index,
+ wasm::ExecutionTier::kOptimized)) {
+ return ReadOnlyRoots(isolate).exception();
+ }
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_IsLiftoffFunction) {
- HandleScope shs(isolate);
+ HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CHECK(WasmExportedFunction::IsWasmExportedFunction(*function));
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index d68bb06e82..c101219d2c 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
+#include "src/arguments-inl.h"
#include "src/elements.h"
#include "src/heap/factory.h"
+#include "src/heap/heap-inl.h"
#include "src/messages.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/runtime/runtime-utils.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -123,7 +124,7 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
Handle<FixedTypedArrayBase> elements(
FixedTypedArrayBase::cast(array->elements()), isolate);
switch (array->type()) {
-#define TYPED_ARRAY_SORT(Type, type, TYPE, ctype, size) \
+#define TYPED_ARRAY_SORT(Type, type, TYPE, ctype) \
case kExternal##Type##Array: { \
ctype* data = static_cast<ctype*>(elements->DataPtr()); \
if (kExternal##Type##Array == kExternalFloat64Array || \
diff --git a/deps/v8/src/runtime/runtime-utils.h b/deps/v8/src/runtime/runtime-utils.h
index 4a80ff5d40..e58934ba33 100644
--- a/deps/v8/src/runtime/runtime-utils.h
+++ b/deps/v8/src/runtime/runtime-utils.h
@@ -122,8 +122,8 @@ static inline ObjectPair MakePair(Object* x, Object* y) {
}
#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
// For x32 a 128-bit struct return is done as rax and rdx from the ObjectPair
-// are used in the full codegen and Crankshaft compiler. An alternative is
-// using uint64_t and modifying full codegen and Crankshaft compiler.
+// are used in generated code. An alternative is using uint64_t and modifying
+// generated code.
struct ObjectPair {
Object* x;
uint32_t x_upper;
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 55c549a6cf..5a6c782292 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -2,10 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
-#include "src/assembler.h"
+#include "src/arguments-inl.h"
#include "src/compiler/wasm-compiler.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
@@ -13,6 +10,7 @@
#include "src/heap/factory.h"
#include "src/objects-inl.h"
#include "src/objects/frame-array-inl.h"
+#include "src/runtime/runtime-utils.h"
#include "src/trap-handler/trap-handler.h"
#include "src/v8memory.h"
#include "src/wasm/module-compiler.h"
@@ -195,7 +193,7 @@ RUNTIME_FUNCTION(Runtime_WasmExceptionGetElement) {
CONVERT_SMI_ARG_CHECKED(index, 0);
CHECK_LT(index, Smi::ToInt(values->length()));
auto* vals =
- reinterpret_cast<uint16_t*>(values->GetBuffer()->allocation_base());
+ reinterpret_cast<uint16_t*>(values->GetBuffer()->backing_store());
return Smi::FromInt(vals[index]);
}
}
@@ -265,8 +263,13 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
frame_pointer = it.frame()->fp();
}
- bool success = instance->debug_info()->RunInterpreter(frame_pointer,
- func_index, arg_buffer);
+ // Run the function in the interpreter. Note that neither the {WasmDebugInfo}
+ // nor the {InterpreterHandle} have to exist, because interpretation might
+ // have been triggered by another Isolate sharing the same WasmEngine.
+ Handle<WasmDebugInfo> debug_info =
+ WasmInstanceObject::GetOrCreateDebugInfo(instance);
+ bool success = WasmDebugInfo::RunInterpreter(
+ isolate, debug_info, frame_pointer, func_index, arg_buffer);
if (!success) {
DCHECK(isolate->has_pending_exception());
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index 64f487398f..ec35131c90 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -4,13 +4,13 @@
#include "src/runtime/runtime.h"
-#include "src/assembler.h"
#include "src/base/hashmap.h"
#include "src/contexts.h"
#include "src/handles-inl.h"
#include "src/heap/heap.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/reloc-info.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index accb97d0e6..5a6364f644 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -79,7 +79,6 @@ namespace internal {
#define FOR_EACH_INTRINSIC_CLASSES(F) \
F(DefineClass, -1 /* >= 3 */, 1) \
- F(GetSuperConstructor, 1, 1) \
F(HomeObjectSymbol, 0, 1) \
F(LoadFromSuper, 3, 1) \
F(LoadKeyedFromSuper, 3, 1) \
@@ -97,8 +96,6 @@ namespace internal {
#define FOR_EACH_INTRINSIC_COLLECTIONS(F) \
F(GetWeakMapEntries, 2, 1) \
F(GetWeakSetValues, 2, 1) \
- F(IsJSWeakMap, 1, 1) \
- F(IsJSWeakSet, 1, 1) \
F(MapGrow, 1, 1) \
F(MapIteratorClone, 1, 1) \
F(MapShrink, 1, 1) \
@@ -203,33 +200,39 @@ namespace internal {
#ifdef V8_INTL_SUPPORT
#define FOR_EACH_INTRINSIC_INTL(F) \
F(AvailableLocalesOf, 1, 1) \
- F(BreakIteratorAdoptText, 2, 1) \
F(BreakIteratorBreakType, 1, 1) \
F(BreakIteratorCurrent, 1, 1) \
F(BreakIteratorFirst, 1, 1) \
F(BreakIteratorNext, 1, 1) \
F(CanonicalizeLanguageTag, 1, 1) \
+ F(CollatorResolvedOptions, 1, 1) \
F(CreateBreakIterator, 3, 1) \
- F(CreateCollator, 3, 1) \
F(CreateDateTimeFormat, 3, 1) \
F(CreateNumberFormat, 3, 1) \
- F(CreatePluralRules, 3, 1) \
F(CurrencyDigits, 1, 1) \
F(DateCacheVersion, 0, 1) \
+ F(DefaultNumberOption, 5, 1) \
+ F(DefineWEProperty, 3, 1) \
+ F(FormatList, 2, 1) \
+ F(FormatListToParts, 2, 1) \
F(GetDefaultICULocale, 0, 1) \
- F(InternalCompare, 3, 1) \
- F(InternalDateFormat, 2, 1) \
- F(InternalNumberFormat, 2, 1) \
+ F(GetNumberOption, 5, 1) \
F(IntlUnwrapReceiver, 5, 1) \
F(IsInitializedIntlObjectOfType, 2, 1) \
+ F(IsWellFormedCurrencyCode, 1, 1) \
F(MarkAsInitializedIntlObjectOfType, 2, 1) \
+ F(ParseExtension, 1, 1) \
+ F(PluralRulesResolvedOptions, 1, 1) \
F(PluralRulesSelect, 2, 1) \
- F(StringLocaleConvertCase, 3, 1) \
+ F(ToDateTimeOptions, 3, 1) \
+ F(ToLocaleDateTime, 6, 1) \
F(StringToLowerCaseIntl, 1, 1) \
- F(StringToUpperCaseIntl, 1, 1)
+ F(StringToUpperCaseIntl, 1, 1) \
+ F(SupportedLocalesOf, 3, 1) \
+// End of macro.
#else
#define FOR_EACH_INTRINSIC_INTL(F)
-#endif
+#endif // V8_INTL_SUPPORT
#define FOR_EACH_INTRINSIC_INTERNAL(F) \
F(AllocateInNewSpace, 1, 1) \
@@ -277,9 +280,11 @@ namespace internal {
F(Typeof, 1, 1) \
F(UnwindAndFindExceptionHandler, 0, 1)
-#define FOR_EACH_INTRINSIC_LITERALS(F) \
- F(CreateArrayLiteral, 4, 1) \
- F(CreateObjectLiteral, 4, 1) \
+#define FOR_EACH_INTRINSIC_LITERALS(F) \
+ F(CreateArrayLiteral, 4, 1) \
+ F(CreateArrayLiteralWithoutAllocationSite, 2, 1) \
+ F(CreateObjectLiteral, 4, 1) \
+ F(CreateObjectLiteralWithoutAllocationSite, 2, 1) \
F(CreateRegExpLiteral, 4, 1)
#define FOR_EACH_INTRINSIC_MATHS(F) F(GenerateRandomNumbers, 0, 1)
@@ -295,7 +300,7 @@ namespace internal {
F(IsSmi, 1, 1) \
F(IsValidSmi, 1, 1) \
F(MaxSmi, 0, 1) \
- F(NumberToStringSkipCache, 1, 1) \
+ F(NumberToString, 1, 1) \
F(SmiLexicographicCompare, 2, 1) \
F(StringParseFloat, 1, 1) \
F(StringParseInt, 2, 1) \
@@ -330,7 +335,6 @@ namespace internal {
F(HasProperty, 2, 1) \
F(InternalSetPrototype, 2, 1) \
F(IsJSReceiver, 1, 1) \
- F(IterableToListCanBeElided, 1, 1) \
F(KeyedGetProperty, 2, 1) \
F(NewObject, 2, 1) \
F(ObjectCreate, 2, 1) \
@@ -515,6 +519,7 @@ namespace internal {
F(InNewSpace, 1, 1) \
F(IsAsmWasmCode, 1, 1) \
F(IsConcurrentRecompilationSupported, 0, 1) \
+ F(WasmTierUpFunction, 2, 1) \
F(IsLiftoffFunction, 1, 1) \
F(IsWasmCode, 1, 1) \
F(IsWasmTrapHandlerEnabled, 0, 1) \
@@ -527,7 +532,6 @@ namespace internal {
F(RunningInSimulator, 0, 1) \
F(SerializeWasmModule, 1, 1) \
F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
- F(SetFlags, 1, 1) \
F(SetForceSlowPath, 1, 1) \
F(SetWasmCompileControls, 2, 1) \
F(SetWasmInstantiateControls, 0, 1) \
@@ -541,7 +545,8 @@ namespace internal {
F(WasmGetNumberOfInstances, 1, 1) \
F(WasmNumInterpretedCalls, 1, 1) \
F(WasmTraceMemory, 1, 1) \
- F(WasmMemoryHasFullGuardRegion, 1, 1)
+ F(WasmMemoryHasFullGuardRegion, 1, 1) \
+ F(SetWasmThreadsEnabled, 1, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
F(ArrayBufferNeuter, 1, 1) \
@@ -578,6 +583,8 @@ namespace internal {
F(KeyedLoadIC_Miss, 4, 1) \
F(KeyedStoreIC_Miss, 5, 1) \
F(KeyedStoreIC_Slow, 5, 1) \
+ F(LoadAccessorProperty, 4, 1) \
+ F(LoadCallbackProperty, 4, 1) \
F(LoadElementWithInterceptor, 2, 1) \
F(LoadGlobalIC_Miss, 3, 1) \
F(LoadGlobalIC_Slow, 3, 1) \
@@ -588,7 +595,9 @@ namespace internal {
F(StoreGlobalIC_Slow, 5, 1) \
F(StoreIC_Miss, 5, 1) \
F(StoreInArrayLiteralIC_Slow, 5, 1) \
- F(StorePropertyWithInterceptor, 5, 1)
+ F(StorePropertyWithInterceptor, 5, 1) \
+ F(CloneObjectIC_Miss, 4, 1) \
+ F(CloneObjectIC_Slow, 2, 1)
#define FOR_EACH_INTRINSIC_RETURN_OBJECT(F) \
FOR_EACH_INTRINSIC_ARRAY(F) \
diff --git a/deps/v8/src/s390/assembler-s390-inl.h b/deps/v8/src/s390/assembler-s390-inl.h
index b9a7a1f8eb..e2bf452b62 100644
--- a/deps/v8/src/s390/assembler-s390-inl.h
+++ b/deps/v8/src/s390/assembler-s390-inl.h
@@ -54,8 +54,8 @@ void RelocInfo::apply(intptr_t delta) {
// Absolute code pointer inside code object moves with the code object.
if (IsInternalReference(rmode_)) {
// Jump table entry
- Address target = Memory::Address_at(pc_);
- Memory::Address_at(pc_) = target + delta;
+ Address target = Memory<Address>(pc_);
+ Memory<Address>(pc_) = target + delta;
} else if (IsCodeTarget(rmode_)) {
SixByteInstr instr =
Instruction::InstructionBits(reinterpret_cast<const byte*>(pc_));
@@ -78,7 +78,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_internal_reference() {
if (IsInternalReference(rmode_)) {
// Jump table entry
- return Memory::Address_at(pc_);
+ return Memory<Address>(pc_);
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
@@ -164,8 +164,7 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
- heap->incremental_marking()->RecordWriteIntoCode(host(), this, target);
- heap->RecordWriteIntoCode(host(), this, target);
+ WriteBarrierForCode(host(), this, target);
}
}
@@ -202,11 +201,12 @@ void RelocInfo::set_target_runtime_entry(Address target,
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
- IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
+ IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) ||
+ IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
// Jump table entry
- Memory::Address_at(pc_) = kNullAddress;
- } else if (IsInternalReferenceEncoded(rmode_)) {
+ Memory<Address>(pc_) = kNullAddress;
+ } else if (IsInternalReferenceEncoded(rmode_) || IsOffHeapTarget(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress,
@@ -293,7 +293,7 @@ void Assembler::deserialization_set_target_internal_reference_at(
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
set_target_address_at(pc, kNullAddress, target, SKIP_ICACHE_FLUSH);
} else {
- Memory::Address_at(pc) = target;
+ Memory<Address>(pc) = target;
}
}
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
index 4b34bcb070..6f1cbcd2f9 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -826,8 +826,8 @@ void Assembler::EmitRelocations() {
// Fix up internal references now that they are guaranteed to be bound.
if (RelocInfo::IsInternalReference(rmode)) {
// Jump table entry
- Address pos = Memory::Address_at(pc);
- Memory::Address_at(pc) = reinterpret_cast<Address>(buffer_) + pos;
+ Address pos = Memory<Address>(pc);
+ Memory<Address>(pc) = reinterpret_cast<Address>(buffer_) + pos;
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
Address pos = target_address_at(pc, 0);
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index f73897d13e..9e0a9ab32f 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -272,7 +272,6 @@ GENERAL_REGISTERS(DEFINE_REGISTER)
constexpr Register no_reg = Register::no_reg();
// Register aliases
-constexpr Register kLithiumScratch = r1; // lithium scratch.
constexpr Register kRootRegister = r10; // Roots array pointer.
constexpr Register cp = r13; // JavaScript context pointer.
@@ -472,7 +471,7 @@ class DeferredRelocInfo {
intptr_t data_;
};
-class Assembler : public AssemblerBase {
+class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index a9c1ec17e8..c06a3f636a 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -4,7 +4,7 @@
#if V8_TARGET_ARCH_S390
-#include "src/api-arguments.h"
+#include "src/api-arguments-inl.h"
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
diff --git a/deps/v8/src/s390/codegen-s390.cc b/deps/v8/src/s390/codegen-s390.cc
index badecb461e..cae0cf60e6 100644
--- a/deps/v8/src/s390/codegen-s390.cc
+++ b/deps/v8/src/s390/codegen-s390.cc
@@ -34,7 +34,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
+ DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
+ !RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
diff --git a/deps/v8/src/s390/constants-s390.h b/deps/v8/src/s390/constants-s390.h
index e02a3ab215..8dd330b8f8 100644
--- a/deps/v8/src/s390/constants-s390.h
+++ b/deps/v8/src/s390/constants-s390.h
@@ -141,27 +141,6 @@ inline Condition NegateCondition(Condition cond) {
return al;
}
-// Commute a condition such that {a cond b == b cond' a}.
-inline Condition CommuteCondition(Condition cond) {
- switch (cond) {
- case lt:
- return gt;
- case gt:
- return lt;
- case ge:
- return le;
- case le:
- return ge;
- case eq:
- return eq;
- case ne:
- return ne;
- default:
- DCHECK(false);
- return cond;
- }
-}
-
// -----------------------------------------------------------------------------
// Instructions encoding.
diff --git a/deps/v8/src/s390/disasm-s390.cc b/deps/v8/src/s390/disasm-s390.cc
index 5acd0e7755..f62df67738 100644
--- a/deps/v8/src/s390/disasm-s390.cc
+++ b/deps/v8/src/s390/disasm-s390.cc
@@ -960,11 +960,6 @@ const char* NameConverter::NameInCode(byte* addr) const {
//------------------------------------------------------------------------------
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-Disassembler::~Disassembler() {}
-
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte* instruction) {
v8::internal::Decoder d(converter_, buffer);
@@ -974,9 +969,10 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
// The S390 assembler does not currently use constant pools.
int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
-void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
+ UnimplementedOpcodeAction unimplemented_action) {
NameConverter converter;
- Disassembler d(converter);
+ Disassembler d(converter, unimplemented_action);
for (byte* pc = begin; pc < end;) {
v8::internal::EmbeddedVector<char, 128> buffer;
buffer[0] = '\0';
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index c5fbebc5a0..4b30bc0547 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -244,30 +244,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-namespace {
-
-void InterpreterCEntryDescriptor_InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r2, // argument count (argc)
- r4, // address of first argument (argv)
- r3 // the runtime function to call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace
-
-void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
-void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index 584ba9a0db..3ddcd9fd9b 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -20,6 +20,7 @@
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/snapshot.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/s390/macro-assembler-s390.h"
@@ -191,6 +192,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do
@@ -204,16 +206,9 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
jump(code, rmode, cond);
}
-int TurboAssembler::CallSize(Register target) { return 2; } // BASR
-
void TurboAssembler::Call(Register target) {
- Label start;
- bind(&start);
-
// Branch to target via indirect branch
basr(r14, target);
-
- DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::CallJSEntry(Register target) {
@@ -221,18 +216,6 @@ void MacroAssembler::CallJSEntry(Register target) {
Call(target);
}
-int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
- Condition cond) {
- // S390 Assembler::move sequence is IILF / IIHF
- int size;
-#if V8_TARGET_ARCH_S390X
- size = 14; // IILF + IIHF + BASR
-#else
- size = 8; // IILF + BASR
-#endif
- return size;
-}
-
int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond) {
@@ -250,23 +233,8 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(cond == al);
-#ifdef DEBUG
- // Check the expected size before generating code to ensure we assume the same
- // constant pool availability (e.g., whether constant pool is full or not).
- int expected_size = CallSize(target, rmode, cond);
- Label start;
- bind(&start);
-#endif
-
mov(ip, Operand(target, rmode));
basr(r14, ip);
-
- DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
-}
-
-int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond) {
- return 6; // BRASL
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
@@ -286,6 +254,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
DCHECK(Builtins::IsBuiltinId(builtin_index));
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
@@ -827,6 +796,9 @@ void TurboAssembler::ConvertDoubleToInt32(const Register dst,
UNIMPLEMENTED();
break;
}
+#ifdef V8_TARGET_ARCH_S390X
+ lghi(dst, Operand::Zero());
+#endif
cfdbr(m, dst, double_input);
}
@@ -851,6 +823,9 @@ void TurboAssembler::ConvertFloat32ToInt32(const Register result,
UNIMPLEMENTED();
break;
}
+#ifdef V8_TARGET_ARCH_S390X
+ lghi(result, Operand::Zero());
+#endif
cfebr(m, result, double_input);
}
@@ -875,6 +850,9 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt32(
UNIMPLEMENTED();
break;
}
+#ifdef V8_TARGET_ARCH_S390X
+ lghi(result, Operand::Zero());
+#endif
clfebr(m, Condition(0), result, double_input);
}
@@ -947,6 +925,9 @@ void TurboAssembler::ConvertDoubleToUnsignedInt32(
UNIMPLEMENTED();
break;
}
+#ifdef V8_TARGET_ARCH_S390X
+ lghi(dst, Operand::Zero());
+#endif
clfdbr(m, Condition(0), dst, double_input);
}
@@ -1727,6 +1708,18 @@ void TurboAssembler::Abort(AbortReason reason) {
return;
}
+ if (should_abort_hard()) {
+ // We don't care if we constructed a frame. Just pretend we did.
+ FrameScope assume_frame(this, StackFrame::NONE);
+ lgfi(r2, Operand(static_cast<int>(reason)));
+ PrepareCallCFunction(1, 0, r3);
+ Move(r3, ExternalReference::abort_with_reason());
+ // Use Call directly to avoid any unneeded overhead. The function won't
+ // return anyway.
+ Call(r3);
+ return;
+ }
+
LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(reason)));
// Disable stub call restrictions to always allow calls to abort.
@@ -1889,7 +1882,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
} else {
stack_space += stack_passed_arguments;
}
- lay(sp, MemOperand(sp, -(stack_space)*kPointerSize));
+ lay(sp, MemOperand(sp, (-stack_space) * kPointerSize));
}
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
@@ -4392,56 +4385,6 @@ void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
StoreDouble(scratch_1, src);
}
-#ifdef DEBUG
-bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
- Register reg5, Register reg6, Register reg7, Register reg8,
- Register reg9, Register reg10) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
- reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
- reg10.is_valid();
-
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
- if (reg7.is_valid()) regs |= reg7.bit();
- if (reg8.is_valid()) regs |= reg8.bit();
- if (reg9.is_valid()) regs |= reg9.bit();
- if (reg10.is_valid()) regs |= reg10.bit();
- int n_of_non_aliasing_regs = NumRegs(regs);
-
- return n_of_valid_regs != n_of_non_aliasing_regs;
-}
-bool AreAliased(DoubleRegister reg1, DoubleRegister reg2, DoubleRegister reg3,
- DoubleRegister reg4, DoubleRegister reg5, DoubleRegister reg6,
- DoubleRegister reg7, DoubleRegister reg8, DoubleRegister reg9,
- DoubleRegister reg10) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
- reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
- reg10.is_valid();
-
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
- if (reg7.is_valid()) regs |= reg7.bit();
- if (reg8.is_valid()) regs |= reg8.bit();
- if (reg9.is_valid()) regs |= reg9.bit();
- if (reg10.is_valid()) regs |= reg10.bit();
- int n_of_non_aliasing_regs = NumRegs(regs);
-
- return n_of_valid_regs != n_of_non_aliasing_regs;
-}
-#endif
-
void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index a82f7999f1..54aecf5896 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -36,6 +36,7 @@ constexpr Register kJavaScriptCallExtraArg1Register = r4;
constexpr Register kOffHeapTrampolineRegister = ip;
constexpr Register kRuntimeCallFunctionRegister = r3;
constexpr Register kRuntimeCallArgCountRegister = r2;
+constexpr Register kRuntimeCallArgvRegister = r4;
constexpr Register kWasmInstanceRegister = r6;
// ----------------------------------------------------------------------------
@@ -56,14 +57,6 @@ inline MemOperand RootMemOperand(Heap::RootListIndex index) {
return MemOperand(kRootRegister, index << kPointerSizeLog2);
}
-// Flags used for AllocateHeapNumber
-enum TaggingMode {
- // Tag the result.
- TAG_RESULT,
- // Don't tag
- DONT_TAG_RESULT
-};
-
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
@@ -74,22 +67,8 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
Register reg5 = no_reg,
Register reg6 = no_reg);
-#ifdef DEBUG
-bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
- Register reg4 = no_reg, Register reg5 = no_reg,
- Register reg6 = no_reg, Register reg7 = no_reg,
- Register reg8 = no_reg, Register reg9 = no_reg,
- Register reg10 = no_reg);
-bool AreAliased(DoubleRegister reg1, DoubleRegister reg2,
- DoubleRegister reg3 = no_dreg, DoubleRegister reg4 = no_dreg,
- DoubleRegister reg5 = no_dreg, DoubleRegister reg6 = no_dreg,
- DoubleRegister reg7 = no_dreg, DoubleRegister reg8 = no_dreg,
- DoubleRegister reg9 = no_dreg, DoubleRegister reg10 = no_dreg);
-#endif
-
// These exist to provide portability between 32 and 64bit
#if V8_TARGET_ARCH_S390X
-#define Div divd
// The length of the arithmetic operation is the length
// of the register.
@@ -172,7 +151,7 @@ bool AreAliased(DoubleRegister reg1, DoubleRegister reg2,
#endif
-class TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
@@ -185,10 +164,6 @@ class TurboAssembler : public TurboAssemblerBase {
void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
void LoadRootRelative(Register destination, int32_t offset) override;
- // Returns the size of a call in instructions.
- static int CallSize(Register target);
- int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
-
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
@@ -203,9 +178,6 @@ class TurboAssembler : public TurboAssemblerBase {
void Call(Register target);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
- int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
Condition cond = al);
void Ret() { b(r14); }
@@ -681,11 +653,6 @@ class TurboAssembler : public TurboAssemblerBase {
AddP(kRootRegister, kRootRegister, Operand(kRootRegisterBias));
}
- // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
- // from C.
- // Does not handle errors.
- void FlushICache(Register address, size_t size, Register scratch);
-
// If the value is a NaN, canonicalize the value else, do nothing.
void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
void CanonicalizeNaN(const DoubleRegister value) {
@@ -1301,13 +1268,6 @@ class MacroAssembler : public TurboAssembler {
void IncrementalMarkingRecordWriteHelper(Register object, Register value,
Register address);
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr, Register scratch,
- SaveFPRegsMode save_fp);
-
void CallJSEntry(Register target);
static int CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc
index e5062a7e2c..5765038084 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/safepoint-table.cc
@@ -41,8 +41,8 @@ SafepointTable::SafepointTable(Address instruction_start,
stack_slots_(stack_slots),
has_deopt_(has_deopt) {
Address header = instruction_start_ + safepoint_table_offset;
- length_ = Memory::uint32_at(header + kLengthOffset);
- entry_size_ = Memory::uint32_at(header + kEntrySizeOffset);
+ length_ = Memory<uint32_t>(header + kLengthOffset);
+ entry_size_ = Memory<uint32_t>(header + kEntrySizeOffset);
pc_and_deoptimization_indexes_ = header + kHeaderSize;
entries_ = pc_and_deoptimization_indexes_ + (length_ * kFixedEntrySize);
DCHECK_GT(entry_size_, 0);
@@ -121,9 +121,8 @@ void SafepointTable::PrintBits(std::ostream& os, // NOLINT
}
}
-
-void Safepoint::DefinePointerRegister(Register reg, Zone* zone) {
- registers_->Add(reg.code(), zone);
+void Safepoint::DefinePointerRegister(Register reg) {
+ registers_->push_back(reg.code());
}
@@ -133,20 +132,19 @@ Safepoint SafepointTableBuilder::DefineSafepoint(
int arguments,
Safepoint::DeoptMode deopt_mode) {
DCHECK_GE(arguments, 0);
- deoptimization_info_.Add(
- DeoptimizationInfo(zone_, assembler->pc_offset(), arguments, kind),
- zone_);
+ deoptimization_info_.push_back(
+ DeoptimizationInfo(zone_, assembler->pc_offset(), arguments, kind));
if (deopt_mode == Safepoint::kNoLazyDeopt) {
- last_lazy_safepoint_ = deoptimization_info_.length();
+ last_lazy_safepoint_ = deoptimization_info_.size();
}
- DeoptimizationInfo& new_info = deoptimization_info_.last();
+ DeoptimizationInfo& new_info = deoptimization_info_.back();
return Safepoint(new_info.indexes, new_info.registers);
}
-
void SafepointTableBuilder::RecordLazyDeoptimizationIndex(int index) {
- while (last_lazy_safepoint_ < deoptimization_info_.length()) {
- deoptimization_info_[last_lazy_safepoint_++].deopt_index = index;
+ for (auto it = deoptimization_info_.Find(last_lazy_safepoint_);
+ it != deoptimization_info_.end(); it++, last_lazy_safepoint_++) {
+ it->deopt_index = index;
}
}
@@ -157,17 +155,15 @@ unsigned SafepointTableBuilder::GetCodeOffset() const {
int SafepointTableBuilder::UpdateDeoptimizationInfo(int pc, int trampoline,
int start) {
- int index = -1;
- for (int i = start; i < deoptimization_info_.length(); i++) {
- if (static_cast<int>(deoptimization_info_[i].pc) == pc) {
- index = i;
- break;
+ int index = start;
+ for (auto it = deoptimization_info_.Find(start);
+ it != deoptimization_info_.end(); it++, index++) {
+ if (static_cast<int>(it->pc) == pc) {
+ it->trampoline = trampoline;
+ return index;
}
}
- CHECK_GE(index, 0);
- DCHECK(index < deoptimization_info_.length());
- deoptimization_info_[index].trampoline = trampoline;
- return index;
+ UNREACHABLE();
}
void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
@@ -186,25 +182,23 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
RoundUp(bits_per_entry, kBitsPerByte) >> kBitsPerByteLog2;
// Emit the table header.
- int length = deoptimization_info_.length();
+ int length = static_cast<int>(deoptimization_info_.size());
assembler->dd(length);
assembler->dd(bytes_per_entry);
// Emit sorted table of pc offsets together with deoptimization indexes.
- for (int i = 0; i < length; i++) {
- const DeoptimizationInfo& info = deoptimization_info_[i];
+ for (const DeoptimizationInfo& info : deoptimization_info_) {
assembler->dd(info.pc);
assembler->dd(EncodeExceptPC(info));
assembler->dd(info.trampoline);
}
// Emit table of bitmaps.
- ZoneList<uint8_t> bits(bytes_per_entry, zone_);
- for (int i = 0; i < length; i++) {
- ZoneList<int>* indexes = deoptimization_info_[i].indexes;
- ZoneList<int>* registers = deoptimization_info_[i].registers;
- bits.Clear();
- bits.AddBlock(0, bytes_per_entry, zone_);
+ ZoneVector<uint8_t> bits(bytes_per_entry, 0, zone_);
+ for (const DeoptimizationInfo& info : deoptimization_info_) {
+ ZoneChunkList<int>* indexes = info.indexes;
+ ZoneChunkList<int>* registers = info.registers;
+ std::fill(bits.begin(), bits.end(), 0);
// Run through the registers (if any).
DCHECK(IsAligned(kNumSafepointRegisters, kBitsPerByte));
@@ -214,8 +208,7 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
bits[j] = SafepointTable::kNoRegisters;
}
} else {
- for (int j = 0; j < registers->length(); j++) {
- int index = registers->at(j);
+ for (int index : *registers) {
DCHECK(index >= 0 && index < kNumSafepointRegisters);
int byte_index = index >> kBitsPerByteLog2;
int bit_index = index & (kBitsPerByte - 1);
@@ -224,8 +217,8 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
}
// Run through the indexes and build a bitmap.
- for (int j = 0; j < indexes->length(); j++) {
- int index = bits_per_entry - 1 - indexes->at(j);
+ for (int idx : *indexes) {
+ int index = bits_per_entry - 1 - idx;
int byte_index = index >> kBitsPerByteLog2;
int bit_index = index & (kBitsPerByte - 1);
bits[byte_index] |= (1U << bit_index);
@@ -251,19 +244,19 @@ void SafepointTableBuilder::RemoveDuplicates() {
// kMaxUInt32. This especially compacts the table for wasm code without tagged
// pointers and without deoptimization info.
- int length = deoptimization_info_.length();
- if (length < 2) return;
+ if (deoptimization_info_.size() < 2) return;
- // Check that all entries (1, length] are identical to entry 0.
- const DeoptimizationInfo& first_info = deoptimization_info_[0];
- for (int i = 1; i < length; ++i) {
- if (!IsIdenticalExceptForPc(first_info, deoptimization_info_[i])) return;
+ // Check that all entries (1, size] are identical to entry 0.
+ const DeoptimizationInfo& first_info = deoptimization_info_.front();
+ for (auto it = deoptimization_info_.Find(1); it != deoptimization_info_.end();
+ it++) {
+ if (!IsIdenticalExceptForPc(first_info, *it)) return;
}
// If we get here, all entries were identical. Rewind the list to just one
// entry, and set the pc to kMaxUInt32.
deoptimization_info_.Rewind(1);
- deoptimization_info_[0].pc = kMaxUInt32;
+ deoptimization_info_.front().pc = kMaxUInt32;
}
bool SafepointTableBuilder::IsIdenticalExceptForPc(
@@ -273,20 +266,21 @@ bool SafepointTableBuilder::IsIdenticalExceptForPc(
if (info1.deopt_index != info2.deopt_index) return false;
- ZoneList<int>* indexes1 = info1.indexes;
- ZoneList<int>* indexes2 = info2.indexes;
- if (indexes1->length() != indexes2->length()) return false;
- for (int i = 0; i < indexes1->length(); ++i) {
- if (indexes1->at(i) != indexes2->at(i)) return false;
+ ZoneChunkList<int>* indexes1 = info1.indexes;
+ ZoneChunkList<int>* indexes2 = info2.indexes;
+ if (indexes1->size() != indexes2->size()) return false;
+ if (!std::equal(indexes1->begin(), indexes1->end(), indexes2->begin())) {
+ return false;
}
- ZoneList<int>* registers1 = info1.registers;
- ZoneList<int>* registers2 = info2.registers;
+ ZoneChunkList<int>* registers1 = info1.registers;
+ ZoneChunkList<int>* registers2 = info2.registers;
if (registers1) {
if (!registers2) return false;
- if (registers1->length() != registers2->length()) return false;
- for (int i = 0; i < registers1->length(); ++i) {
- if (registers1->at(i) != registers2->at(i)) return false;
+ if (registers1->size() != registers2->size()) return false;
+ if (!std::equal(registers1->begin(), registers1->end(),
+ registers2->begin())) {
+ return false;
}
} else if (registers2) {
return false;
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index 5c6b413fa1..e85a27fcb3 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -9,6 +9,7 @@
#include "src/assert-scope.h"
#include "src/utils.h"
#include "src/v8memory.h"
+#include "src/zone/zone-chunk-list.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -102,22 +103,22 @@ class SafepointTable BASE_EMBEDDED {
unsigned GetPcOffset(unsigned index) const {
DCHECK(index < length_);
- return Memory::uint32_at(GetPcOffsetLocation(index));
+ return Memory<uint32_t>(GetPcOffsetLocation(index));
}
int GetTrampolinePcOffset(unsigned index) const {
DCHECK(index < length_);
- return Memory::int_at(GetTrampolineLocation(index));
+ return Memory<int>(GetTrampolineLocation(index));
}
unsigned find_return_pc(unsigned pc_offset);
SafepointEntry GetEntry(unsigned index) const {
DCHECK(index < length_);
- unsigned info = Memory::uint32_at(GetInfoLocation(index));
- uint8_t* bits = &Memory::uint8_at(entries_ + (index * entry_size_));
+ unsigned info = Memory<uint32_t>(GetInfoLocation(index));
+ uint8_t* bits = &Memory<uint8_t>(entries_ + (index * entry_size_));
int trampoline_pc =
- has_deopt_ ? Memory::int_at(GetTrampolineLocation(index)) : -1;
+ has_deopt_ ? Memory<int>(GetTrampolineLocation(index)) : -1;
return SafepointEntry(info, bits, trampoline_pc);
}
@@ -188,14 +189,14 @@ class Safepoint BASE_EMBEDDED {
static const int kNoDeoptimizationIndex =
(1 << (SafepointEntry::kDeoptIndexBits)) - 1;
- void DefinePointerSlot(int index, Zone* zone) { indexes_->Add(index, zone); }
- void DefinePointerRegister(Register reg, Zone* zone);
+ void DefinePointerSlot(int index) { indexes_->push_back(index); }
+ void DefinePointerRegister(Register reg);
private:
- Safepoint(ZoneList<int>* indexes, ZoneList<int>* registers)
+ Safepoint(ZoneChunkList<int>* indexes, ZoneChunkList<int>* registers)
: indexes_(indexes), registers_(registers) {}
- ZoneList<int>* const indexes_;
- ZoneList<int>* const registers_;
+ ZoneChunkList<int>* const indexes_;
+ ZoneChunkList<int>* const registers_;
friend class SafepointTableBuilder;
};
@@ -204,10 +205,10 @@ class Safepoint BASE_EMBEDDED {
class SafepointTableBuilder BASE_EMBEDDED {
public:
explicit SafepointTableBuilder(Zone* zone)
- : deoptimization_info_(32, zone),
+ : deoptimization_info_(zone),
emitted_(false),
last_lazy_safepoint_(0),
- zone_(zone) { }
+ zone_(zone) {}
// Get the offset of the emitted safepoint table in the code.
unsigned GetCodeOffset() const;
@@ -222,7 +223,7 @@ class SafepointTableBuilder BASE_EMBEDDED {
// outstanding safepoints.
void RecordLazyDeoptimizationIndex(int index);
void BumpLastLazySafepointIndex() {
- last_lazy_safepoint_ = deoptimization_info_.length();
+ last_lazy_safepoint_ = deoptimization_info_.size();
}
// Emit the safepoint table after the body. The number of bits per
@@ -241,8 +242,8 @@ class SafepointTableBuilder BASE_EMBEDDED {
unsigned arguments;
bool has_doubles;
int trampoline;
- ZoneList<int>* indexes;
- ZoneList<int>* registers;
+ ZoneChunkList<int>* indexes;
+ ZoneChunkList<int>* registers;
unsigned deopt_index;
DeoptimizationInfo(Zone* zone, unsigned pc, unsigned arguments,
Safepoint::Kind kind)
@@ -250,9 +251,11 @@ class SafepointTableBuilder BASE_EMBEDDED {
arguments(arguments),
has_doubles(kind & Safepoint::kWithDoubles),
trampoline(-1),
- indexes(new (zone) ZoneList<int>(8, zone)),
+ indexes(new (zone) ZoneChunkList<int>(
+ zone, ZoneChunkList<int>::StartMode::kSmall)),
registers(kind & Safepoint::kWithRegisters
- ? new (zone) ZoneList<int>(4, zone)
+ ? new (zone) ZoneChunkList<int>(
+ zone, ZoneChunkList<int>::StartMode::kSmall)
: nullptr),
deopt_index(Safepoint::kNoDeoptimizationIndex) {}
};
@@ -264,11 +267,11 @@ class SafepointTableBuilder BASE_EMBEDDED {
// If all entries are identical, replace them by 1 entry with pc = kMaxUInt32.
void RemoveDuplicates();
- ZoneList<DeoptimizationInfo> deoptimization_info_;
+ ZoneChunkList<DeoptimizationInfo> deoptimization_info_;
unsigned offset_;
bool emitted_;
- int last_lazy_safepoint_;
+ size_t last_lazy_safepoint_;
Zone* zone_;
diff --git a/deps/v8/src/simulator-base.h b/deps/v8/src/simulator-base.h
index afd01fb18d..1bada2b812 100644
--- a/deps/v8/src/simulator-base.h
+++ b/deps/v8/src/simulator-base.h
@@ -7,7 +7,6 @@
#include <type_traits>
-#include "src/assembler.h"
#include "src/globals.h"
#include "src/isolate.h"
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index e91799cdad..5db7cae94b 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -57,7 +57,6 @@ ScriptCompiler::CachedData* CodeSerializer::Serialize(
// TODO(7110): Enable serialization of Asm modules once the AsmWasmData is
// context independent.
if (script->ContainsAsmModule()) return nullptr;
- if (isolate->debug()->is_active()) return nullptr;
isolate->heap()->read_only_space()->ClearStringPaddingIfNeeded();
@@ -206,8 +205,7 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
debug_bytecode_array = debug_info->DebugBytecodeArray();
sfi->SetDebugBytecodeArray(debug_info->OriginalBytecodeArray());
}
- sfi->set_function_identifier_or_debug_info(
- debug_info->function_identifier());
+ sfi->set_script_or_debug_info(debug_info->script());
}
DCHECK(!sfi->HasDebugInfo());
@@ -219,7 +217,7 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
// Restore debug info
if (debug_info != nullptr) {
- sfi->set_function_identifier_or_debug_info(debug_info);
+ sfi->set_script_or_debug_info(debug_info);
if (debug_bytecode_array != nullptr) {
sfi->SetDebugBytecodeArray(debug_bytecode_array);
}
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index c4878811ec..d1f19ef081 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -5,7 +5,6 @@
#ifndef V8_SNAPSHOT_CODE_SERIALIZER_H_
#define V8_SNAPSHOT_CODE_SERIALIZER_H_
-#include "src/parsing/preparse-data.h"
#include "src/snapshot/serializer.h"
namespace v8 {
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index dcb37ce63c..3ed360e14a 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -5,9 +5,12 @@
#include "src/snapshot/deserializer.h"
#include "src/assembler-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
#include "src/isolate.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/hash-table.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/maybe-object.h"
#include "src/objects/string.h"
#include "src/snapshot/builtin-deserializer-allocator.h"
@@ -207,15 +210,17 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
if (obj->map() == ReadOnlyRoots(isolate_).native_source_string_map()) {
ExternalOneByteString* string = ExternalOneByteString::cast(obj);
DCHECK(string->is_short());
- string->set_resource(
- NativesExternalStringResource::DecodeForDeserialization(
- string->resource()));
+ string->SetResource(
+ isolate_, NativesExternalStringResource::DecodeForDeserialization(
+ string->resource()));
} else {
ExternalString* string = ExternalString::cast(obj);
uint32_t index = string->resource_as_uint32();
Address address =
static_cast<Address>(isolate_->api_external_references()[index]);
string->set_address_as_resource(address);
+ isolate_->heap()->UpdateExternalString(string, 0,
+ string->ExternalPayloadSize());
}
isolate_->heap()->RegisterExternalString(String::cast(obj));
} else if (obj->IsJSTypedArray()) {
@@ -319,8 +324,7 @@ HeapObject* Deserializer<AllocatorT>::GetBackReferencedObject(int space) {
return obj;
}
-// This routine writes the new object into the pointer provided and then
-// returns true if the new object was in young space and false otherwise.
+// This routine writes the new object into the pointer provided.
// The reason for this strange interface is that otherwise the object is
// written very late, which means the FreeSpace map is not set up by the
// time we need to use it to mark the space at the end of a page free.
@@ -506,14 +510,13 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
// object.
case kExternalReference + kPlain + kStartOfObject:
current = reinterpret_cast<MaybeObject**>(ReadExternalReferenceCase(
- kPlain, isolate, reinterpret_cast<void**>(current),
- current_object_address));
+ kPlain, reinterpret_cast<void**>(current), current_object_address));
break;
// Find an external reference and write a pointer to it in the current
// code object.
case kExternalReference + kFromCode + kStartOfObject:
current = reinterpret_cast<MaybeObject**>(ReadExternalReferenceCase(
- kFromCode, isolate, reinterpret_cast<void**>(current),
+ kFromCode, reinterpret_cast<void**>(current),
current_object_address));
break;
@@ -711,10 +714,9 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
UnalignedCopy(current, &hot_maybe_object);
if (write_barrier_needed && Heap::InNewSpace(hot_object)) {
Address current_address = reinterpret_cast<Address>(current);
- isolate->heap()->RecordWrite(
- HeapObject::FromAddress(current_object_address),
- reinterpret_cast<MaybeObject**>(current_address),
- hot_maybe_object);
+ GenerationalBarrier(HeapObject::FromAddress(current_object_address),
+ reinterpret_cast<MaybeObject**>(current_address),
+ hot_maybe_object);
}
current++;
break;
@@ -761,8 +763,7 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
template <class AllocatorT>
void** Deserializer<AllocatorT>::ReadExternalReferenceCase(
- HowToCode how, Isolate* isolate, void** current,
- Address current_object_address) {
+ HowToCode how, void** current, Address current_object_address) {
int skip = source_.GetInt();
current = reinterpret_cast<void**>(reinterpret_cast<Address>(current) + skip);
uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
@@ -873,10 +874,9 @@ MaybeObject** Deserializer<AllocatorT>::ReadDataCase(
if (emit_write_barrier && write_barrier_needed) {
Address current_address = reinterpret_cast<Address>(current);
SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));
- isolate->heap()->RecordWrite(
- HeapObject::FromAddress(current_object_address),
- reinterpret_cast<MaybeObject**>(current_address),
- *reinterpret_cast<MaybeObject**>(current_address));
+ GenerationalBarrier(HeapObject::FromAddress(current_object_address),
+ reinterpret_cast<MaybeObject**>(current_address),
+ *reinterpret_cast<MaybeObject**>(current_address));
}
if (!current_was_incremented) {
current++;
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index d3b57b2137..f13bc03fd4 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -124,8 +124,7 @@ class Deserializer : public SerializerDeserializer {
// A helper function for ReadData for reading external references.
// Returns the new value of {current}.
- inline void** ReadExternalReferenceCase(HowToCode how, Isolate* isolate,
- void** current,
+ inline void** ReadExternalReferenceCase(HowToCode how, void** current,
Address current_object_address);
void ReadObject(int space_number, MaybeObject** write_back,
diff --git a/deps/v8/src/snapshot/macros.h b/deps/v8/src/snapshot/macros.h
index 5ea6917c16..8551281614 100644
--- a/deps/v8/src/snapshot/macros.h
+++ b/deps/v8/src/snapshot/macros.h
@@ -31,7 +31,11 @@
#else // !MACOSX && !WIN && !AIX
#define V8_ASM_MANGLE_LABEL ""
#define V8_ASM_RODATA_SECTION ".section .rodata\n"
+#if defined(OS_CHROMEOS) // ChromeOS
+#define V8_ASM_TEXT_SECTION ".section .text.hot.embedded\n"
+#else
#define V8_ASM_TEXT_SECTION ".section .text\n"
+#endif
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
#define V8_ASM_DECLARE(NAME) ".global " V8_ASM_MANGLE_LABEL NAME "\n"
#else
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index 9516108749..a2303613d6 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -153,6 +153,9 @@ class SnapshotWriter {
static void WriteEmbeddedFileData(FILE* fp, const i::EmbeddedData* blob,
const char* embedded_variant) {
+ fprintf(fp, "V8_EMBEDDED_TEXT_HEADER(v8_%s_embedded_blob_)\n",
+ embedded_variant);
+#ifdef V8_OS_MACOSX
// Note: On some platforms (observed on mac64), inserting labels into the
// .byte stream causes the compiler to reorder symbols, invalidating stored
// offsets.
@@ -161,16 +164,47 @@ class SnapshotWriter {
// there since the chrome build process on mac verifies the order of symbols
// present in the binary.
// For now, the straight-forward solution seems to be to just emit a pure
- // .byte stream.
- fprintf(fp, "V8_EMBEDDED_TEXT_HEADER(v8_%s_embedded_blob_)\n",
- embedded_variant);
+ // .byte stream on OSX.
WriteBinaryContentsAsByteDirective(fp, blob->data(), blob->size());
+#else
+ WriteBinaryContentsAsByteDirective(fp, blob->data(),
+ i::EmbeddedData::RawDataOffset());
+ WriteBuiltins(fp, blob, embedded_variant);
+#endif
fprintf(fp, "extern \"C\" const uint8_t v8_%s_embedded_blob_[];\n",
embedded_variant);
fprintf(fp, "static const uint32_t v8_embedded_blob_size_ = %d;\n\n",
blob->size());
}
+ static void WriteBuiltins(FILE* fp, const i::EmbeddedData* blob,
+ const char* embedded_variant) {
+ const bool is_default_variant =
+ std::strcmp(embedded_variant, "Default") == 0;
+ for (int i = 0; i < i::Builtins::builtin_count; i++) {
+ if (!blob->ContainsBuiltin(i)) continue;
+
+ // Labels created here will show up in backtraces. We check in
+ // Isolate::SetEmbeddedBlob that the blob layout remains unchanged, i.e.
+ // that labels do not insert bytes into the middle of the blob byte
+ // stream.
+ if (is_default_variant) {
+ // Create nicer symbol names for the default mode.
+ fprintf(fp, "__asm__(V8_ASM_LABEL(\"Builtins_%s\"));\n",
+ i::Builtins::name(i));
+ } else {
+ fprintf(fp, "__asm__(V8_ASM_LABEL(\"%s_Builtins_%s\"));\n",
+ embedded_variant, i::Builtins::name(i));
+ }
+
+ WriteBinaryContentsAsByteDirective(
+ fp,
+ reinterpret_cast<const uint8_t*>(blob->InstructionStartOfBuiltin(i)),
+ blob->PaddedInstructionSizeOfBuiltin(i));
+ }
+ fprintf(fp, "\n");
+ }
+
static void WriteBinaryContentsAsByteDirective(FILE* fp, const uint8_t* data,
uint32_t size) {
static const int kTextWidth = 80;
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index 2fb86867d0..aabc5bf1e0 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -59,7 +59,7 @@ void ObjectDeserializer::
DCHECK(deserializing_user_code());
for (Code* code : new_code_objects()) {
// Record all references to embedded objects in the new code object.
- isolate()->heap()->RecordWritesIntoCode(code);
+ WriteBarrierForCode(code);
Assembler::FlushICache(code->raw_instruction_start(),
code->raw_instruction_size());
}
@@ -85,8 +85,9 @@ void ObjectDeserializer::CommitPostProcessedObjects() {
ScriptEvent(Logger::ScriptEventType::kDeserialize, script->id()));
LOG(isolate(), ScriptDetails(*script));
// Add script to list.
- Handle<Object> list =
- FixedArrayOfWeakCells::Add(isolate(), factory->script_list(), script);
+ Handle<WeakArrayList> list = factory->script_list();
+ list = WeakArrayList::AddToEnd(isolate(), list,
+ MaybeObjectHandle::Weak(script));
heap->SetRootScriptList(*list);
}
}
diff --git a/deps/v8/src/snapshot/partial-deserializer.cc b/deps/v8/src/snapshot/partial-deserializer.cc
index 626106a353..a772594636 100644
--- a/deps/v8/src/snapshot/partial-deserializer.cc
+++ b/deps/v8/src/snapshot/partial-deserializer.cc
@@ -4,7 +4,7 @@
#include "src/snapshot/partial-deserializer.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/heap/heap-inl.h"
#include "src/snapshot/snapshot.h"
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index 5161629fa4..d127aa5f0a 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -5,7 +5,7 @@
#include "src/snapshot/partial-serializer.h"
#include "src/snapshot/startup-serializer.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/objects-inl.h"
namespace v8 {
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index f4db5513bb..34a6b64676 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -10,7 +10,7 @@
#include "src/external-reference-table.h"
#include "src/globals.h"
#include "src/snapshot/references.h"
-#include "src/utils.h"
+#include "src/v8memory.h"
#include "src/visitors.h"
namespace v8 {
@@ -300,8 +300,10 @@ class SerializedData {
SerializedData(byte* data, int size)
: data_(data), size_(size), owns_data_(false) {}
SerializedData() : data_(nullptr), size_(0), owns_data_(false) {}
- SerializedData(SerializedData&& other)
- : data_(other.data_), size_(other.size_), owns_data_(other.owns_data_) {
+ SerializedData(SerializedData&& other) V8_NOEXCEPT
+ : data_(other.data_),
+ size_(other.size_),
+ owns_data_(other.owns_data_) {
// Ensure |other| will not attempt to destroy our data in destructor.
other.owns_data_ = false;
}
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 67644b83f3..56d87b8916 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -5,8 +5,11 @@
#include "src/snapshot/serializer.h"
#include "src/assembler-inl.h"
+#include "src/heap/heap.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/code.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/map.h"
#include "src/snapshot/builtin-serializer-allocator.h"
#include "src/snapshot/natives.h"
@@ -23,21 +26,19 @@ Serializer<AllocatorT>::Serializer(Isolate* isolate)
allocator_(this) {
#ifdef OBJECT_PRINT
if (FLAG_serialization_statistics) {
- instance_type_count_ = NewArray<int>(kInstanceTypes);
- instance_type_size_ = NewArray<size_t>(kInstanceTypes);
- read_only_instance_type_count_ = NewArray<int>(kInstanceTypes);
- read_only_instance_type_size_ = NewArray<size_t>(kInstanceTypes);
- for (int i = 0; i < kInstanceTypes; i++) {
- instance_type_count_[i] = 0;
- instance_type_size_[i] = 0;
- read_only_instance_type_count_[i] = 0;
- read_only_instance_type_size_[i] = 0;
+ for (int space = 0; space < LAST_SPACE; ++space) {
+ instance_type_count_[space] = NewArray<int>(kInstanceTypes);
+ instance_type_size_[space] = NewArray<size_t>(kInstanceTypes);
+ for (int i = 0; i < kInstanceTypes; i++) {
+ instance_type_count_[space][i] = 0;
+ instance_type_size_[space][i] = 0;
+ }
}
} else {
- instance_type_count_ = nullptr;
- instance_type_size_ = nullptr;
- read_only_instance_type_count_ = nullptr;
- read_only_instance_type_size_ = nullptr;
+ for (int space = 0; space < LAST_SPACE; ++space) {
+ instance_type_count_[space] = nullptr;
+ instance_type_size_[space] = nullptr;
+ }
}
#endif // OBJECT_PRINT
}
@@ -46,11 +47,11 @@ template <class AllocatorT>
Serializer<AllocatorT>::~Serializer() {
if (code_address_map_ != nullptr) delete code_address_map_;
#ifdef OBJECT_PRINT
- if (instance_type_count_ != nullptr) {
- DeleteArray(instance_type_count_);
- DeleteArray(instance_type_size_);
- DeleteArray(read_only_instance_type_count_);
- DeleteArray(read_only_instance_type_size_);
+ for (int space = 0; space < LAST_SPACE; ++space) {
+ if (instance_type_count_[space] != nullptr) {
+ DeleteArray(instance_type_count_[space]);
+ DeleteArray(instance_type_size_[space]);
+ }
}
#endif // OBJECT_PRINT
}
@@ -60,13 +61,8 @@ template <class AllocatorT>
void Serializer<AllocatorT>::CountInstanceType(Map* map, int size,
AllocationSpace space) {
int instance_type = map->instance_type();
- if (space != RO_SPACE) {
- instance_type_count_[instance_type]++;
- instance_type_size_[instance_type] += size;
- } else {
- read_only_instance_type_count_[instance_type]++;
- read_only_instance_type_size_[instance_type] += size;
- }
+ instance_type_count_[space][instance_type]++;
+ instance_type_size_[space][instance_type] += size;
}
#endif // OBJECT_PRINT
@@ -79,28 +75,18 @@ void Serializer<AllocatorT>::OutputStatistics(const char* name) {
#ifdef OBJECT_PRINT
PrintF(" Instance types (count and bytes):\n");
-#define PRINT_INSTANCE_TYPE(Name) \
- if (instance_type_count_[Name]) { \
- PrintF("%10d %10" PRIuS " %s\n", instance_type_count_[Name], \
- instance_type_size_[Name], #Name); \
+#define PRINT_INSTANCE_TYPE(Name) \
+ for (int space = 0; space < LAST_SPACE; ++space) { \
+ if (instance_type_count_[space][Name]) { \
+ PrintF("%10d %10" PRIuS " %-10s %s\n", \
+ instance_type_count_[space][Name], \
+ instance_type_size_[space][Name], \
+ AllocationSpaceName(static_cast<AllocationSpace>(space)), #Name); \
+ } \
}
INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
#undef PRINT_INSTANCE_TYPE
- size_t read_only_total = 0;
-#define UPDATE_TOTAL(Name) \
- read_only_total += read_only_instance_type_size_[Name];
- INSTANCE_TYPE_LIST(UPDATE_TOTAL)
-#undef UPDATE_TOTAL
- if (read_only_total > 0) {
- PrintF("\n Read Only Instance types (count and bytes):\n");
-#define PRINT_INSTANCE_TYPE(Name) \
- if (read_only_instance_type_count_[Name]) { \
- PrintF("%10d %10" PRIuS " %s\n", read_only_instance_type_count_[Name], \
- read_only_instance_type_size_[Name], #Name); \
- }
- INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
-#undef PRINT_INSTANCE_TYPE
- }
+
PrintF("\n");
#endif // OBJECT_PRINT
}
@@ -130,11 +116,16 @@ void Serializer<AllocatorT>::VisitRootPointers(Root root,
if (root == Root::kBuiltins || root == Root::kDispatchTable) return;
for (Object** current = start; current < end; current++) {
- if ((*current)->IsSmi()) {
- PutSmi(Smi::cast(*current));
- } else {
- SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
- }
+ SerializeRootObject(*current);
+ }
+}
+
+template <class AllocatorT>
+void Serializer<AllocatorT>::SerializeRootObject(Object* object) {
+ if (object->IsSmi()) {
+ PutSmi(Smi::cast(object));
+ } else {
+ SerializeObject(HeapObject::cast(object), kPlain, kStartOfObject, 0);
}
}
@@ -260,7 +251,7 @@ void Serializer<AllocatorT>::PutRoot(
// Assert that the first 32 root array items are a conscious choice. They are
// chosen so that the most common ones can be encoded more efficiently.
- STATIC_ASSERT(Heap::kEmptyDescriptorArrayRootIndex ==
+ STATIC_ASSERT(Heap::kArgumentsMarkerRootIndex ==
kNumberOfRootArrayConstants - 1);
if (how_to_code == kPlain && where_to_point == kStartOfObject &&
@@ -1007,9 +998,10 @@ void Serializer<AllocatorT>::ObjectSerializer::OutputCode(int size) {
int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
+ RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
rinfo->WipeOut();
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index 658d37f286..9427cb6c78 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -170,6 +170,7 @@ class Serializer : public SerializerDeserializer {
void VisitRootPointers(Root root, const char* description, Object** start,
Object** end) override;
+ void SerializeRootObject(Object* object);
void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
int skip);
@@ -253,10 +254,8 @@ class Serializer : public SerializerDeserializer {
#ifdef OBJECT_PRINT
static const int kInstanceTypes = LAST_TYPE + 1;
- int* instance_type_count_;
- size_t* instance_type_size_;
- int* read_only_instance_type_count_;
- size_t* read_only_instance_type_size_;
+ int* instance_type_count_[LAST_SPACE];
+ size_t* instance_type_size_[LAST_SPACE];
#endif // OBJECT_PRINT
#ifdef DEBUG
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 5da7bb0f49..31f378792b 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -6,7 +6,6 @@
#include "src/snapshot/snapshot.h"
-#include "src/api.h"
#include "src/assembler-inl.h"
#include "src/base/platform/platform.h"
#include "src/callable.h"
@@ -308,6 +307,10 @@ bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate, Code* code) {
case Builtins::TFJ:
case Builtins::TFS:
break;
+
+ // Bytecode handlers will only ever be used by the interpreter and so there
+ // will never be a need to use trampolines with them.
+ case Builtins::BCH:
case Builtins::API:
case Builtins::ASM:
// TODO(jgruber): Extend checks to remaining kinds.
@@ -347,6 +350,7 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
// On X64, ARM, ARM64 we emit relative builtin-to-builtin jumps for isolate
// independent builtins in the snapshot. This fixes up the relative jumps
// to the right offsets in the snapshot.
+ // See also: Code::IsIsolateIndependent.
while (!on_heap_it.done()) {
DCHECK(!off_heap_it.done());
@@ -355,8 +359,10 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
CHECK(Builtins::IsIsolateIndependentBuiltin(target));
+ // Do not emit write-barrier for off-heap writes.
off_heap_it.rinfo()->set_target_address(
- blob->InstructionStartOfBuiltin(target->builtin_index()));
+ blob->InstructionStartOfBuiltin(target->builtin_index()),
+ SKIP_WRITE_BARRIER);
on_heap_it.next();
off_heap_it.next();
@@ -378,8 +384,7 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
Builtins* builtins = isolate->builtins();
// Store instruction stream lengths and offsets.
- std::vector<uint32_t> lengths(kTableSize);
- std::vector<uint32_t> offsets(kTableSize);
+ std::vector<struct Metadata> metadata(kTableSize);
bool saw_unsafe_builtin = false;
uint32_t raw_data_size = 0;
@@ -395,6 +400,16 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
saw_unsafe_builtin = true;
fprintf(stderr, "%s is not isolate-independent.\n", Builtins::name(i));
}
+ if (Builtins::IsWasmRuntimeStub(i) &&
+ RelocInfo::RequiresRelocation(code)) {
+ // Wasm additionally requires that its runtime stubs must be
+ // individually PIC (i.e. we must be able to copy each stub outside the
+ // embedded area without relocations). In particular, that means
+ // pc-relative calls to other builtins are disallowed.
+ saw_unsafe_builtin = true;
+ fprintf(stderr, "%s is a wasm runtime stub but needs relocation.\n",
+ Builtins::name(i));
+ }
if (BuiltinAliasesOffHeapTrampolineRegister(isolate, code)) {
saw_unsafe_builtin = true;
fprintf(stderr, "%s aliases the off-heap trampoline register.\n",
@@ -404,14 +419,13 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
uint32_t length = static_cast<uint32_t>(code->raw_instruction_size());
DCHECK_EQ(0, raw_data_size % kCodeAlignment);
- offsets[i] = raw_data_size;
- lengths[i] = length;
+ metadata[i].instructions_offset = raw_data_size;
+ metadata[i].instructions_length = length;
// Align the start of each instruction stream.
- raw_data_size += RoundUp<kCodeAlignment>(length);
+ raw_data_size += PadAndAlign(length);
} else {
- offsets[i] = raw_data_size;
- lengths[i] = 0;
+ metadata[i].instructions_offset = raw_data_size;
}
}
CHECK_WITH_MSG(
@@ -421,22 +435,23 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
"If in doubt, ask jgruber@");
const uint32_t blob_size = RawDataOffset() + raw_data_size;
- uint8_t* blob = new uint8_t[blob_size];
- std::memset(blob, 0, blob_size);
+ uint8_t* const blob = new uint8_t[blob_size];
+ uint8_t* const raw_data_start = blob + RawDataOffset();
- // Write the offsets and length tables.
- DCHECK_EQ(OffsetsSize(), sizeof(offsets[0]) * offsets.size());
- std::memcpy(blob + OffsetsOffset(), offsets.data(), OffsetsSize());
+ // Initially zap the entire blob, effectively padding the alignment area
+ // between two builtins with int3's (on x64/ia32).
+ ZapCode(reinterpret_cast<Address>(blob), blob_size);
- DCHECK_EQ(LengthsSize(), sizeof(lengths[0]) * lengths.size());
- std::memcpy(blob + LengthsOffset(), lengths.data(), LengthsSize());
+ // Write the metadata tables.
+ DCHECK_EQ(MetadataSize(), sizeof(metadata[0]) * metadata.size());
+ std::memcpy(blob + MetadataOffset(), metadata.data(), MetadataSize());
// Write the raw data section.
for (int i = 0; i < Builtins::builtin_count; i++) {
if (!Builtins::IsIsolateIndependent(i)) continue;
Code* code = builtins->builtin(i);
- uint32_t offset = offsets[i];
- uint8_t* dst = blob + RawDataOffset() + offset;
+ uint32_t offset = metadata[i].instructions_offset;
+ uint8_t* dst = raw_data_start + offset;
DCHECK_LE(RawDataOffset() + offset + code->raw_instruction_size(),
blob_size);
std::memcpy(dst, reinterpret_cast<uint8_t*>(code->raw_instruction_start()),
@@ -471,8 +486,8 @@ EmbeddedData EmbeddedData::FromBlob() {
Address EmbeddedData::InstructionStartOfBuiltin(int i) const {
DCHECK(Builtins::IsBuiltinId(i));
- const uint32_t* offsets = Offsets();
- const uint8_t* result = RawData() + offsets[i];
+ const struct Metadata* metadata = Metadata();
+ const uint8_t* result = RawData() + metadata[i].instructions_offset;
DCHECK_LE(result, data_ + size_);
DCHECK_IMPLIES(result == data_ + size_, InstructionSizeOfBuiltin(i) == 0);
return reinterpret_cast<Address>(result);
@@ -480,8 +495,8 @@ Address EmbeddedData::InstructionStartOfBuiltin(int i) const {
uint32_t EmbeddedData::InstructionSizeOfBuiltin(int i) const {
DCHECK(Builtins::IsBuiltinId(i));
- const uint32_t* lengths = Lengths();
- return lengths[i];
+ const struct Metadata* metadata = Metadata();
+ return metadata[i].instructions_length;
}
size_t EmbeddedData::CreateHash() const {
@@ -520,8 +535,7 @@ void EmbeddedData::PrintStatistics() const {
const int k90th = embedded_count * 0.90;
const int k99th = embedded_count * 0.99;
- const int metadata_size =
- static_cast<int>(HashSize() + OffsetsSize() + LengthsSize());
+ const int metadata_size = static_cast<int>(HashSize() + MetadataSize());
PrintF("EmbeddedData:\n");
PrintF(" Total size: %d\n",
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 26f1cdb44b..b973ebb356 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -96,7 +96,7 @@ class EmbeddedData final {
// Padded with kCodeAlignment.
uint32_t PaddedInstructionSizeOfBuiltin(int i) const {
- return RoundUp<kCodeAlignment>(InstructionSizeOfBuiltin(i));
+ return PadAndAlign(InstructionSizeOfBuiltin(i));
}
size_t CreateHash() const;
@@ -104,41 +104,49 @@ class EmbeddedData final {
return *reinterpret_cast<const size_t*>(data_ + HashOffset());
}
+ struct Metadata {
+ // Blob layout information.
+ uint32_t instructions_offset;
+ uint32_t instructions_length;
+ };
+ STATIC_ASSERT(offsetof(Metadata, instructions_offset) == 0);
+ STATIC_ASSERT(offsetof(Metadata, instructions_length) == kUInt32Size);
+ STATIC_ASSERT(sizeof(Metadata) == kUInt32Size + kUInt32Size);
+
// The layout of the blob is as follows:
//
- // [0] hash of the remaining blob
- // [1] offset of instruction stream 0
- // ... offsets
- // [N + 1] length of instruction stream 0
- // ... lengths
- // ... instruction streams
+ // [0] hash of the remaining blob
+ // [1] metadata of instruction stream 0
+ // ... metadata
+ // ... instruction streams
static constexpr uint32_t kTableSize = Builtins::builtin_count;
static constexpr uint32_t HashOffset() { return 0; }
static constexpr uint32_t HashSize() { return kSizetSize; }
- static constexpr uint32_t OffsetsOffset() {
+ static constexpr uint32_t MetadataOffset() {
return HashOffset() + HashSize();
}
- static constexpr uint32_t OffsetsSize() { return kUInt32Size * kTableSize; }
- static constexpr uint32_t LengthsOffset() {
- return OffsetsOffset() + OffsetsSize();
+ static constexpr uint32_t MetadataSize() {
+ return sizeof(struct Metadata) * kTableSize;
}
- static constexpr uint32_t LengthsSize() { return kUInt32Size * kTableSize; }
static constexpr uint32_t RawDataOffset() {
- return RoundUp<kCodeAlignment>(LengthsOffset() + LengthsSize());
+ return PadAndAlign(MetadataOffset() + MetadataSize());
}
private:
EmbeddedData(const uint8_t* data, uint32_t size) : data_(data), size_(size) {}
- const uint32_t* Offsets() const {
- return reinterpret_cast<const uint32_t*>(data_ + OffsetsOffset());
- }
- const uint32_t* Lengths() const {
- return reinterpret_cast<const uint32_t*>(data_ + LengthsOffset());
+ const Metadata* Metadata() const {
+ return reinterpret_cast<const struct Metadata*>(data_ + MetadataOffset());
}
const uint8_t* RawData() const { return data_ + RawDataOffset(); }
+ static constexpr int PadAndAlign(int size) {
+ // Ensure we have at least one byte trailing the actual builtin
+ // instructions which we can later fill with int3.
+ return RoundUp<kCodeAlignment>(size + 1);
+ }
+
void PrintStatistics() const;
const uint8_t* data_;
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 34c23a6077..9ad6cda5d1 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -70,8 +70,9 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
} else if (obj->IsSharedFunctionInfo()) {
// Clear inferred name for native functions.
SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
- if (!shared->IsSubjectToDebugging() && shared->HasInferredName()) {
- shared->set_inferred_name(ReadOnlyRoots(isolate()).empty_string());
+ if (!shared->IsSubjectToDebugging() && shared->HasUncompiledData()) {
+ shared->uncompiled_data()->set_inferred_name(
+ ReadOnlyRoots(isolate()).empty_string());
}
}
@@ -131,24 +132,13 @@ void StartupSerializer::VisitRootPointers(Root root, const char* description,
Object** start, Object** end) {
if (start == isolate()->heap()->roots_array_start()) {
// Serializing the root list needs special handling:
- // - The first pass over the root list only serializes immortal immovables.
- // - The second pass over the root list serializes the rest.
// - Only root list elements that have been fully serialized can be
- // referenced via as root by using kRootArray bytecodes.
- int skip = 0;
+ // referenced using kRootArray bytecodes.
for (Object** current = start; current < end; current++) {
+ SerializeRootObject(*current);
int root_index = static_cast<int>(current - start);
- if ((*current)->IsSmi()) {
- FlushSkip(skip);
- PutSmi(Smi::cast(*current));
- } else {
- SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject,
- skip);
- }
root_has_been_serialized_.set(root_index);
- skip = 0;
}
- FlushSkip(skip);
} else {
Serializer::VisitRootPointers(root, description, start, end);
}
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index 190cc59529..cf334d10b2 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -18,10 +18,10 @@ class StartupSerializer : public Serializer<> {
~StartupSerializer() override;
// Serialize the current state of the heap. The order is:
- // 1) Immortal immovable roots
- // 2) Remaining strong references.
- // 3) Partial snapshot cache.
- // 4) Weak references (e.g. the string table).
+ // 1) Strong roots
+ // 2) Builtins and bytecode handlers
+ // 3) Partial snapshot cache
+ // 4) Weak references (e.g. the string table)
void SerializeStrongReferences();
void SerializeWeakReferencesAndDeferred();
diff --git a/deps/v8/src/string-builder.h b/deps/v8/src/string-builder-inl.h
index aa11161620..dccdb3d01a 100644
--- a/deps/v8/src/string-builder.h
+++ b/deps/v8/src/string-builder-inl.h
@@ -2,14 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_STRING_BUILDER_H_
-#define V8_STRING_BUILDER_H_
+#ifndef V8_STRING_BUILDER_INL_H_
+#define V8_STRING_BUILDER_INL_H_
#include "src/assert-scope.h"
-#include "src/handles.h"
+#include "src/handles-inl.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects.h"
+#include "src/objects/fixed-array.h"
+#include "src/objects/string-inl.h"
#include "src/utils.h"
namespace v8 {
@@ -24,161 +26,33 @@ typedef BitField<int, kStringBuilderConcatHelperLengthBits,
kStringBuilderConcatHelperPositionBits>
StringBuilderSubstringPosition;
-
template <typename sinkchar>
-static inline void StringBuilderConcatHelper(String* special, sinkchar* sink,
- FixedArray* fixed_array,
- int array_length) {
- DisallowHeapAllocation no_gc;
- int position = 0;
- for (int i = 0; i < array_length; i++) {
- Object* element = fixed_array->get(i);
- if (element->IsSmi()) {
- // Smi encoding of position and length.
- int encoded_slice = Smi::ToInt(element);
- int pos;
- int len;
- if (encoded_slice > 0) {
- // Position and length encoded in one smi.
- pos = StringBuilderSubstringPosition::decode(encoded_slice);
- len = StringBuilderSubstringLength::decode(encoded_slice);
- } else {
- // Position and length encoded in two smis.
- Object* obj = fixed_array->get(++i);
- DCHECK(obj->IsSmi());
- pos = Smi::ToInt(obj);
- len = -encoded_slice;
- }
- String::WriteToFlat(special, sink + position, pos, pos + len);
- position += len;
- } else {
- String* string = String::cast(element);
- int element_length = string->length();
- String::WriteToFlat(string, sink + position, 0, element_length);
- position += element_length;
- }
- }
-}
-
+void StringBuilderConcatHelper(String* special, sinkchar* sink,
+ FixedArray* fixed_array, int array_length);
// Returns the result length of the concatenation.
// On illegal argument, -1 is returned.
-static inline int StringBuilderConcatLength(int special_length,
- FixedArray* fixed_array,
- int array_length, bool* one_byte) {
- DisallowHeapAllocation no_gc;
- int position = 0;
- for (int i = 0; i < array_length; i++) {
- int increment = 0;
- Object* elt = fixed_array->get(i);
- if (elt->IsSmi()) {
- // Smi encoding of position and length.
- int smi_value = Smi::ToInt(elt);
- int pos;
- int len;
- if (smi_value > 0) {
- // Position and length encoded in one smi.
- pos = StringBuilderSubstringPosition::decode(smi_value);
- len = StringBuilderSubstringLength::decode(smi_value);
- } else {
- // Position and length encoded in two smis.
- len = -smi_value;
- // Get the position and check that it is a positive smi.
- i++;
- if (i >= array_length) return -1;
- Object* next_smi = fixed_array->get(i);
- if (!next_smi->IsSmi()) return -1;
- pos = Smi::ToInt(next_smi);
- if (pos < 0) return -1;
- }
- DCHECK_GE(pos, 0);
- DCHECK_GE(len, 0);
- if (pos > special_length || len > special_length - pos) return -1;
- increment = len;
- } else if (elt->IsString()) {
- String* element = String::cast(elt);
- int element_length = element->length();
- increment = element_length;
- if (*one_byte && !element->HasOnlyOneByteChars()) {
- *one_byte = false;
- }
- } else {
- return -1;
- }
- if (increment > String::kMaxLength - position) {
- return kMaxInt; // Provoke throw on allocation.
- }
- position += increment;
- }
- return position;
-}
-
+int StringBuilderConcatLength(int special_length, FixedArray* fixed_array,
+ int array_length, bool* one_byte);
class FixedArrayBuilder {
public:
- explicit FixedArrayBuilder(Isolate* isolate, int initial_capacity)
- : array_(isolate->factory()->NewFixedArrayWithHoles(initial_capacity)),
- length_(0),
- has_non_smi_elements_(false) {
- // Require a non-zero initial size. Ensures that doubling the size to
- // extend the array will work.
- DCHECK_GT(initial_capacity, 0);
- }
+ explicit FixedArrayBuilder(Isolate* isolate, int initial_capacity);
+ explicit FixedArrayBuilder(Handle<FixedArray> backing_store);
- explicit FixedArrayBuilder(Handle<FixedArray> backing_store)
- : array_(backing_store), length_(0), has_non_smi_elements_(false) {
- // Require a non-zero initial size. Ensures that doubling the size to
- // extend the array will work.
- DCHECK_GT(backing_store->length(), 0);
- }
+ bool HasCapacity(int elements);
+ void EnsureCapacity(Isolate* isolate, int elements);
- bool HasCapacity(int elements) {
- int length = array_->length();
- int required_length = length_ + elements;
- return (length >= required_length);
- }
-
- void EnsureCapacity(Isolate* isolate, int elements) {
- int length = array_->length();
- int required_length = length_ + elements;
- if (length < required_length) {
- int new_length = length;
- do {
- new_length *= 2;
- } while (new_length < required_length);
- Handle<FixedArray> extended_array =
- isolate->factory()->NewFixedArrayWithHoles(new_length);
- array_->CopyTo(0, *extended_array, 0, length_);
- array_ = extended_array;
- }
- }
-
- void Add(Object* value) {
- DCHECK(!value->IsSmi());
- DCHECK(length_ < capacity());
- array_->set(length_, value);
- length_++;
- has_non_smi_elements_ = true;
- }
-
- void Add(Smi* value) {
- DCHECK(value->IsSmi());
- DCHECK(length_ < capacity());
- array_->set(length_, value);
- length_++;
- }
+ void Add(Object* value);
+ void Add(Smi* value);
Handle<FixedArray> array() { return array_; }
int length() { return length_; }
- int capacity() { return array_->length(); }
+ int capacity();
- Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
- JSArray::SetContent(target_array, array_);
- target_array->set_length(Smi::FromInt(length_));
- return target_array;
- }
+ Handle<JSArray> ToJSArray(Handle<JSArray> target_array);
private:
Handle<FixedArray> array_;
@@ -186,20 +60,10 @@ class FixedArrayBuilder {
bool has_non_smi_elements_;
};
-
class ReplacementStringBuilder {
public:
ReplacementStringBuilder(Heap* heap, Handle<String> subject,
- int estimated_part_count)
- : heap_(heap),
- array_builder_(heap->isolate(), estimated_part_count),
- subject_(subject),
- character_count_(0),
- is_one_byte_(subject->IsOneByteRepresentation()) {
- // Require a non-zero initial size. Ensures that doubling the size to
- // extend the array will work.
- DCHECK_GT(estimated_part_count, 0);
- }
+ int estimated_part_count);
static inline void AddSubjectSlice(FixedArrayBuilder* builder, int from,
int to) {
@@ -218,30 +82,17 @@ class ReplacementStringBuilder {
}
}
- void EnsureCapacity(int elements) {
- array_builder_.EnsureCapacity(heap_->isolate(), elements);
- }
+ void EnsureCapacity(int elements);
void AddSubjectSlice(int from, int to) {
AddSubjectSlice(&array_builder_, from, to);
IncrementCharacterCount(to - from);
}
-
- void AddString(Handle<String> string) {
- int length = string->length();
- DCHECK_GT(length, 0);
- AddElement(*string);
- if (!string->IsOneByteRepresentation()) {
- is_one_byte_ = false;
- }
- IncrementCharacterCount(length);
- }
-
+ void AddString(Handle<String> string);
MaybeHandle<String> ToString();
-
void IncrementCharacterCount(int by) {
if (character_count_ > String::kMaxLength - by) {
STATIC_ASSERT(String::kMaxLength < kMaxInt);
@@ -252,11 +103,7 @@ class ReplacementStringBuilder {
}
private:
- void AddElement(Object* element) {
- DCHECK(element->IsSmi() || element->IsString());
- DCHECK(array_builder_.capacity() > array_builder_.length());
- array_builder_.Add(element);
- }
+ void AddElement(Object* element);
Heap* heap_;
FixedArrayBuilder array_builder_;
@@ -265,7 +112,6 @@ class ReplacementStringBuilder {
bool is_one_byte_;
};
-
class IncrementalStringBuilder {
public:
explicit IncrementalStringBuilder(Isolate* isolate);
@@ -323,9 +169,7 @@ class IncrementalStringBuilder {
V8_INLINE bool HasOverflowed() const { return overflowed_; }
- V8_INLINE int Length() const {
- return accumulator_->length() + current_index_;
- }
+ int Length() const;
// Change encoding to two-byte.
void ChangeEncoding() {
@@ -442,7 +286,6 @@ class IncrementalStringBuilder {
Handle<String> current_part_;
};
-
template <typename SrcChar, typename DestChar>
void IncrementalStringBuilder::Append(SrcChar c) {
DCHECK_EQ(encoding_ == String::ONE_BYTE_ENCODING, sizeof(DestChar) == 1);
@@ -460,4 +303,4 @@ void IncrementalStringBuilder::Append(SrcChar c) {
} // namespace internal
} // namespace v8
-#endif // V8_STRING_BUILDER_H_
+#endif // V8_STRING_BUILDER_INL_H_
diff --git a/deps/v8/src/string-builder.cc b/deps/v8/src/string-builder.cc
index bee5db9508..66ccb77184 100644
--- a/deps/v8/src/string-builder.cc
+++ b/deps/v8/src/string-builder.cc
@@ -2,14 +2,193 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/string-builder.h"
+#include "src/string-builder-inl.h"
#include "src/isolate-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/fixed-array-inl.h"
+#include "src/objects/js-array-inl.h"
namespace v8 {
namespace internal {
+template <typename sinkchar>
+void StringBuilderConcatHelper(String* special, sinkchar* sink,
+ FixedArray* fixed_array, int array_length) {
+ DisallowHeapAllocation no_gc;
+ int position = 0;
+ for (int i = 0; i < array_length; i++) {
+ Object* element = fixed_array->get(i);
+ if (element->IsSmi()) {
+ // Smi encoding of position and length.
+ int encoded_slice = Smi::ToInt(element);
+ int pos;
+ int len;
+ if (encoded_slice > 0) {
+ // Position and length encoded in one smi.
+ pos = StringBuilderSubstringPosition::decode(encoded_slice);
+ len = StringBuilderSubstringLength::decode(encoded_slice);
+ } else {
+ // Position and length encoded in two smis.
+ Object* obj = fixed_array->get(++i);
+ DCHECK(obj->IsSmi());
+ pos = Smi::ToInt(obj);
+ len = -encoded_slice;
+ }
+ String::WriteToFlat(special, sink + position, pos, pos + len);
+ position += len;
+ } else {
+ String* string = String::cast(element);
+ int element_length = string->length();
+ String::WriteToFlat(string, sink + position, 0, element_length);
+ position += element_length;
+ }
+ }
+}
+
+template void StringBuilderConcatHelper<uint8_t>(String* special, uint8_t* sink,
+ FixedArray* fixed_array,
+ int array_length);
+
+template void StringBuilderConcatHelper<uc16>(String* special, uc16* sink,
+ FixedArray* fixed_array,
+ int array_length);
+
+int StringBuilderConcatLength(int special_length, FixedArray* fixed_array,
+ int array_length, bool* one_byte) {
+ DisallowHeapAllocation no_gc;
+ int position = 0;
+ for (int i = 0; i < array_length; i++) {
+ int increment = 0;
+ Object* elt = fixed_array->get(i);
+ if (elt->IsSmi()) {
+ // Smi encoding of position and length.
+ int smi_value = Smi::ToInt(elt);
+ int pos;
+ int len;
+ if (smi_value > 0) {
+ // Position and length encoded in one smi.
+ pos = StringBuilderSubstringPosition::decode(smi_value);
+ len = StringBuilderSubstringLength::decode(smi_value);
+ } else {
+ // Position and length encoded in two smis.
+ len = -smi_value;
+ // Get the position and check that it is a positive smi.
+ i++;
+ if (i >= array_length) return -1;
+ Object* next_smi = fixed_array->get(i);
+ if (!next_smi->IsSmi()) return -1;
+ pos = Smi::ToInt(next_smi);
+ if (pos < 0) return -1;
+ }
+ DCHECK_GE(pos, 0);
+ DCHECK_GE(len, 0);
+ if (pos > special_length || len > special_length - pos) return -1;
+ increment = len;
+ } else if (elt->IsString()) {
+ String* element = String::cast(elt);
+ int element_length = element->length();
+ increment = element_length;
+ if (*one_byte && !element->HasOnlyOneByteChars()) {
+ *one_byte = false;
+ }
+ } else {
+ return -1;
+ }
+ if (increment > String::kMaxLength - position) {
+ return kMaxInt; // Provoke throw on allocation.
+ }
+ position += increment;
+ }
+ return position;
+}
+
+FixedArrayBuilder::FixedArrayBuilder(Isolate* isolate, int initial_capacity)
+ : array_(isolate->factory()->NewFixedArrayWithHoles(initial_capacity)),
+ length_(0),
+ has_non_smi_elements_(false) {
+ // Require a non-zero initial size. Ensures that doubling the size to
+ // extend the array will work.
+ DCHECK_GT(initial_capacity, 0);
+}
+
+FixedArrayBuilder::FixedArrayBuilder(Handle<FixedArray> backing_store)
+ : array_(backing_store), length_(0), has_non_smi_elements_(false) {
+ // Require a non-zero initial size. Ensures that doubling the size to
+ // extend the array will work.
+ DCHECK_GT(backing_store->length(), 0);
+}
+
+bool FixedArrayBuilder::HasCapacity(int elements) {
+ int length = array_->length();
+ int required_length = length_ + elements;
+ return (length >= required_length);
+}
+
+void FixedArrayBuilder::EnsureCapacity(Isolate* isolate, int elements) {
+ int length = array_->length();
+ int required_length = length_ + elements;
+ if (length < required_length) {
+ int new_length = length;
+ do {
+ new_length *= 2;
+ } while (new_length < required_length);
+ Handle<FixedArray> extended_array =
+ isolate->factory()->NewFixedArrayWithHoles(new_length);
+ array_->CopyTo(0, *extended_array, 0, length_);
+ array_ = extended_array;
+ }
+}
+
+void FixedArrayBuilder::Add(Object* value) {
+ DCHECK(!value->IsSmi());
+ DCHECK(length_ < capacity());
+ array_->set(length_, value);
+ length_++;
+ has_non_smi_elements_ = true;
+}
+
+void FixedArrayBuilder::Add(Smi* value) {
+ DCHECK(value->IsSmi());
+ DCHECK(length_ < capacity());
+ array_->set(length_, value);
+ length_++;
+}
+
+int FixedArrayBuilder::capacity() { return array_->length(); }
+
+Handle<JSArray> FixedArrayBuilder::ToJSArray(Handle<JSArray> target_array) {
+ JSArray::SetContent(target_array, array_);
+ target_array->set_length(Smi::FromInt(length_));
+ return target_array;
+}
+
+ReplacementStringBuilder::ReplacementStringBuilder(Heap* heap,
+ Handle<String> subject,
+ int estimated_part_count)
+ : heap_(heap),
+ array_builder_(heap->isolate(), estimated_part_count),
+ subject_(subject),
+ character_count_(0),
+ is_one_byte_(subject->IsOneByteRepresentation()) {
+ // Require a non-zero initial size. Ensures that doubling the size to
+ // extend the array will work.
+ DCHECK_GT(estimated_part_count, 0);
+}
+
+void ReplacementStringBuilder::EnsureCapacity(int elements) {
+ array_builder_.EnsureCapacity(heap_->isolate(), elements);
+}
+
+void ReplacementStringBuilder::AddString(Handle<String> string) {
+ int length = string->length();
+ DCHECK_GT(length, 0);
+ AddElement(*string);
+ if (!string->IsOneByteRepresentation()) {
+ is_one_byte_ = false;
+ }
+ IncrementCharacterCount(length);
+}
+
MaybeHandle<String> ReplacementStringBuilder::ToString() {
Isolate* isolate = heap_->isolate();
if (array_builder_.length() == 0) {
@@ -44,6 +223,11 @@ MaybeHandle<String> ReplacementStringBuilder::ToString() {
return joined_string;
}
+void ReplacementStringBuilder::AddElement(Object* element) {
+ DCHECK(element->IsSmi() || element->IsString());
+ DCHECK(array_builder_.capacity() > array_builder_.length());
+ array_builder_.Add(element);
+}
IncrementalStringBuilder::IncrementalStringBuilder(Isolate* isolate)
: isolate_(isolate),
@@ -58,6 +242,9 @@ IncrementalStringBuilder::IncrementalStringBuilder(Isolate* isolate)
factory()->NewRawOneByteString(part_length_).ToHandleChecked();
}
+int IncrementalStringBuilder::Length() const {
+ return accumulator_->length() + current_index_;
+}
void IncrementalStringBuilder::Accumulate(Handle<String> new_part) {
Handle<String> new_accumulator;
diff --git a/deps/v8/src/string-hasher-inl.h b/deps/v8/src/string-hasher-inl.h
index caf0e082ba..35948c385b 100644
--- a/deps/v8/src/string-hasher-inl.h
+++ b/deps/v8/src/string-hasher-inl.h
@@ -5,9 +5,11 @@
#ifndef V8_STRING_HASHER_INL_H_
#define V8_STRING_HASHER_INL_H_
+#include "src/string-hasher.h"
+
#include "src/char-predicates-inl.h"
#include "src/objects.h"
-#include "src/string-hasher.h"
+#include "src/objects/string-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 80e8b2837b..3ab70c4ffa 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -9,6 +9,7 @@
#include "src/handles-inl.h"
#include "src/log.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/prototype.h"
namespace v8 {
diff --git a/deps/v8/src/third_party/utf8-decoder/utf8-decoder.h b/deps/v8/src/third_party/utf8-decoder/utf8-decoder.h
index 5668e5ad9e..c91afa7c5d 100644
--- a/deps/v8/src/third_party/utf8-decoder/utf8-decoder.h
+++ b/deps/v8/src/third_party/utf8-decoder/utf8-decoder.h
@@ -67,7 +67,6 @@ static inline void Decode(uint8_t byte, State* state, uint32_t* buffer) {
0, 0, 36, 36, 0, 0, 0, 0, 0, 0, 0, 0, // 4-byte mid/high = 96
};
- DCHECK_NE(*state, State::kReject);
uint8_t type = transitions[byte];
*state = static_cast<State>(states[*state + type]);
*buffer = (*buffer << 6) | (byte & (0x7F >> (type >> 1)));
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.cc b/deps/v8/src/third_party/vtune/vtune-jit.cc
index 53fc289351..5c49855359 100644
--- a/deps/v8/src/third_party/vtune/vtune-jit.cc
+++ b/deps/v8/src/third_party/vtune/vtune-jit.cc
@@ -184,7 +184,7 @@ void VTUNEJITInterface::event_handler(const v8::JitCodeEvent* event) {
Local<String>::Cast(script->GetScriptName());
temp_file_name =
new char[script_name->Utf8Length(event->isolate) + 1];
- script_name->WriteUtf8(temp_file_name);
+ script_name->WriteUtf8(event->isolate, temp_file_name);
jmethod.source_file_name = temp_file_name;
}
diff --git a/deps/v8/src/torque/Torque.g4 b/deps/v8/src/torque/Torque.g4
deleted file mode 100644
index 1497aaf9c1..0000000000
--- a/deps/v8/src/torque/Torque.g4
+++ /dev/null
@@ -1,315 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-grammar Torque;
-
-options {
- language=Cpp;
-}
-
-// parser rules start with lowercase letters, lexer rules with uppercase
-MACRO: 'macro';
-BUILTIN: 'builtin';
-RUNTIME: 'runtime';
-MODULE: 'module';
-JAVASCRIPT: 'javascript';
-DEFERRED: 'deferred';
-IF: 'if';
-FOR: 'for';
-WHILE: 'while';
-RETURN: 'return';
-CONSTEXPR: 'constexpr';
-CONTINUE: 'continue';
-BREAK: 'break';
-GOTO: 'goto';
-OTHERWISE: 'otherwise';
-TRY: 'try';
-LABEL: 'label';
-LABELS: 'labels';
-TAIL: 'tail';
-ISNT: 'isnt';
-IS: 'is';
-LET: 'let';
-CONST: 'const';
-EXTERN: 'extern';
-ASSERT_TOKEN: 'assert';
-CHECK_TOKEN: 'check';
-UNREACHABLE_TOKEN: 'unreachable';
-DEBUG_TOKEN: 'debug';
-
-ASSIGNMENT: '=';
-ASSIGNMENT_OPERATOR
- : '*='
- | '/='
- | '%='
- | '+='
- | '-='
- | '<<='
- | '>>='
- | '>>>='
- | '&='
- | '^='
- | '|='
- ;
-
-EQUAL: '==';
-PLUS: '+';
-MINUS: '-';
-MULTIPLY: '*';
-DIVIDE: '/';
-MODULO: '%';
-BIT_OR: '|';
-BIT_AND: '&';
-BIT_NOT: '~';
-MAX: 'max';
-MIN: 'min';
-NOT_EQUAL: '!=';
-LESS_THAN: '<';
-LESS_THAN_EQUAL: '<=';
-GREATER_THAN: '>';
-GREATER_THAN_EQUAL: '>=';
-SHIFT_LEFT: '<<';
-SHIFT_RIGHT: '>>';
-SHIFT_RIGHT_ARITHMETIC: '>>>';
-VARARGS: '...';
-
-EQUALITY_OPERATOR: EQUAL | NOT_EQUAL;
-
-INCREMENT: '++';
-DECREMENT: '--';
-NOT: '!';
-
-STRING_LITERAL : ('"' ( ESCAPE | ~('"' | '\\' | '\n' | '\r') ) * '"')
- | ('\'' ( ESCAPE | ~('\'' | '\\' | '\n' | '\r') ) * '\'');
-fragment ESCAPE : '\\' ( '\'' | '\\' | '"' | 'n' | 'r' );
-
-IDENTIFIER : [A-Za-z][0-9A-Za-z_]* ;
-
-WS : [ \t\r\n\f]+ -> channel(HIDDEN);
-
-BLOCK_COMMENT
- : '/*' .*? ('*/' | EOF) -> channel(HIDDEN)
- ;
-
-LINE_COMMENT
- : '//' ~[\r\n]* -> channel(HIDDEN)
- ;
-
-fragment DECIMAL_DIGIT
- : [0-9]
- ;
-
-fragment DECIMAL_INTEGER_LITERAL
- : '0'
- | [1-9] DECIMAL_DIGIT*
- ;
-
-fragment EXPONENT_PART
- : [eE] [+-]? DECIMAL_DIGIT+
- ;
-
-DECIMAL_LITERAL
- : MINUS? DECIMAL_INTEGER_LITERAL '.' DECIMAL_DIGIT* EXPONENT_PART?
- | MINUS? '.' DECIMAL_DIGIT+ EXPONENT_PART?
- | MINUS? DECIMAL_INTEGER_LITERAL EXPONENT_PART?
- | MINUS? '0x' [0-9a-fA-F]+
- ;
-
-type : CONSTEXPR? IDENTIFIER
- | BUILTIN '(' typeList ')' '=>' type
- | type BIT_OR type
- | '(' type ')'
- ;
-
-typeList : (type (',' type)*)?;
-genericSpecializationTypeList: '<' typeList '>';
-
-optionalGenericTypeList: ('<' IDENTIFIER ':' 'type' (',' IDENTIFIER ':' 'type')* '>')?;
-
-typeListMaybeVarArgs: '(' type? (',' type)* (',' VARARGS)? ')'
- | '(' VARARGS ')';
-
-labelParameter: IDENTIFIER ( '(' typeList ')' )?;
-
-optionalType: (':' type)?;
-optionalLabelList: (LABELS labelParameter (',' labelParameter)*)?;
-optionalOtherwise: (OTHERWISE IDENTIFIER (',' IDENTIFIER)*)?;
-
-parameter: IDENTIFIER ':' type?;
-parameterList
- : '(' parameter? (',' parameter)* ')'
- | '(' parameter ',' parameter ',' VARARGS IDENTIFIER ')';
-labelDeclaration: IDENTIFIER parameterList?;
-
-expression
- : conditionalExpression;
-
-conditionalExpression
- : logicalORExpression
- | conditionalExpression '?' logicalORExpression ':' logicalORExpression;
-
-logicalORExpression
- : logicalANDExpression
- | logicalORExpression '||' logicalANDExpression;
-
-logicalANDExpression
- : bitwiseExpression
- | logicalANDExpression '&&' bitwiseExpression;
-
-bitwiseExpression
- : equalityExpression
- | bitwiseExpression op=(BIT_AND | BIT_OR) equalityExpression;
-
-equalityExpression
- : relationalExpression
- | equalityExpression
- op=(EQUAL | NOT_EQUAL)
- relationalExpression;
-
-relationalExpression
- : shiftExpression
- | relationalExpression
- op=(LESS_THAN | LESS_THAN_EQUAL | GREATER_THAN | GREATER_THAN_EQUAL)
- shiftExpression;
-
-shiftExpression
- : additiveExpression
- | shiftExpression op=(SHIFT_RIGHT | SHIFT_LEFT | SHIFT_RIGHT_ARITHMETIC) additiveExpression;
-
-additiveExpression
- : multiplicativeExpression
- | additiveExpression op=(PLUS | MINUS) multiplicativeExpression;
-
-multiplicativeExpression
- : unaryExpression
- | multiplicativeExpression op=(MULTIPLY | DIVIDE | MODULO) unaryExpression;
-
-unaryExpression
- : assignmentExpression
- | op=(PLUS | MINUS | BIT_NOT | NOT) unaryExpression;
-
-locationExpression
- : IDENTIFIER
- | locationExpression '.' IDENTIFIER
- | primaryExpression '.' IDENTIFIER
- | locationExpression '[' expression ']'
- | primaryExpression '[' expression ']';
-
-incrementDecrement
- : INCREMENT locationExpression
- | DECREMENT locationExpression
- | locationExpression op=INCREMENT
- | locationExpression op=DECREMENT
- ;
-
-assignment
- : incrementDecrement
- | locationExpression ((ASSIGNMENT | ASSIGNMENT_OPERATOR) expression)?;
-
-assignmentExpression
- : functionPointerExpression
- | assignment;
-
-structExpression
- : IDENTIFIER '{' (expression (',' expression)*)? '}';
-
-functionPointerExpression
- : primaryExpression
- | IDENTIFIER genericSpecializationTypeList?
- ;
-
-primaryExpression
- : helperCall
- | structExpression
- | DECIMAL_LITERAL
- | STRING_LITERAL
- | ('(' expression ')')
- ;
-
-forInitialization : variableDeclarationWithInitialization?;
-forLoop: FOR '(' forInitialization ';' expression ';' assignment ')' statementBlock;
-
-rangeSpecifier: '[' begin=expression? ':' end=expression? ']';
-forOfRange: rangeSpecifier?;
-forOfLoop: FOR '(' variableDeclaration 'of' expression forOfRange ')' statementBlock;
-
-argument: expression;
-argumentList: '(' argument? (',' argument)* ')';
-
-helperCall: (MIN | MAX | IDENTIFIER) genericSpecializationTypeList? argumentList optionalOtherwise;
-
-labelReference: IDENTIFIER;
-variableDeclaration: (LET | CONST) IDENTIFIER ':' type;
-variableDeclarationWithInitialization: variableDeclaration (ASSIGNMENT expression)?;
-helperCallStatement: (TAIL)? helperCall;
-expressionStatement: assignment;
-ifStatement: IF CONSTEXPR? '(' expression ')' statementBlock ('else' statementBlock)?;
-whileLoop: WHILE '(' expression ')' statementBlock;
-returnStatement: RETURN expression?;
-breakStatement: BREAK;
-continueStatement: CONTINUE;
-gotoStatement: GOTO labelReference argumentList?;
-handlerWithStatement: LABEL labelDeclaration statementBlock;
-tryLabelStatement: TRY statementBlock handlerWithStatement+;
-
-diagnosticStatement: ((ASSERT_TOKEN | CHECK_TOKEN) '(' expression ')') | UNREACHABLE_TOKEN | DEBUG_TOKEN;
-
-statement : variableDeclarationWithInitialization ';'
- | helperCallStatement ';'
- | expressionStatement ';'
- | returnStatement ';'
- | breakStatement ';'
- | continueStatement ';'
- | gotoStatement ';'
- | ifStatement
- | diagnosticStatement ';'
- | whileLoop
- | forOfLoop
- | forLoop
- | tryLabelStatement
- ;
-
-statementList : statement*;
-statementScope : DEFERRED? '{' statementList '}';
-statementBlock
- : statement
- | statementScope;
-
-helperBody : statementScope;
-
-fieldDeclaration: IDENTIFIER ':' type ';';
-fieldListDeclaration: fieldDeclaration*;
-
-extendsDeclaration: 'extends' IDENTIFIER;
-generatesDeclaration: 'generates' STRING_LITERAL;
-constexprDeclaration: 'constexpr' STRING_LITERAL;
-typeDeclaration : 'type' IDENTIFIER extendsDeclaration? generatesDeclaration? constexprDeclaration?';';
-typeAliasDeclaration : 'type' IDENTIFIER '=' type ';';
-
-externalBuiltin : EXTERN JAVASCRIPT? BUILTIN IDENTIFIER optionalGenericTypeList '(' typeList ')' optionalType ';';
-externalMacro : EXTERN ('operator' STRING_LITERAL)? MACRO IDENTIFIER optionalGenericTypeList typeListMaybeVarArgs optionalType optionalLabelList ';';
-externalRuntime : EXTERN RUNTIME IDENTIFIER typeListMaybeVarArgs optionalType ';';
-builtinDeclaration : JAVASCRIPT? BUILTIN IDENTIFIER optionalGenericTypeList parameterList optionalType (helperBody | ';');
-genericSpecialization: IDENTIFIER genericSpecializationTypeList parameterList optionalType optionalLabelList helperBody;
-macroDeclaration : ('operator' STRING_LITERAL)? MACRO IDENTIFIER optionalGenericTypeList parameterList optionalType optionalLabelList (helperBody | ';');
-externConstDeclaration : CONST IDENTIFIER ':' type generatesDeclaration ';';
-constDeclaration: CONST IDENTIFIER ':' type ASSIGNMENT expression ';';
-structDeclaration : 'struct' IDENTIFIER '{' fieldListDeclaration '}';
-
-declaration
- : structDeclaration
- | typeDeclaration
- | typeAliasDeclaration
- | builtinDeclaration
- | genericSpecialization
- | macroDeclaration
- | externalMacro
- | externalBuiltin
- | externalRuntime
- | externConstDeclaration
- | constDeclaration;
-
-moduleDeclaration : MODULE IDENTIFIER '{' declaration* '}';
-
-file: (moduleDeclaration | declaration)*;
diff --git a/deps/v8/src/torque/Torque.interp b/deps/v8/src/torque/Torque.interp
deleted file mode 100644
index 0ffb78795f..0000000000
--- a/deps/v8/src/torque/Torque.interp
+++ /dev/null
@@ -1,249 +0,0 @@
-token literal names:
-null
-'('
-')'
-'=>'
-','
-':'
-'type'
-'?'
-'||'
-'&&'
-'.'
-'['
-']'
-'{'
-'}'
-';'
-'of'
-'else'
-'extends'
-'generates'
-'operator'
-'struct'
-'macro'
-'builtin'
-'runtime'
-'module'
-'javascript'
-'deferred'
-'if'
-'for'
-'while'
-'return'
-'constexpr'
-'continue'
-'break'
-'goto'
-'otherwise'
-'try'
-'label'
-'labels'
-'tail'
-'isnt'
-'is'
-'let'
-'const'
-'extern'
-'assert'
-'check'
-'unreachable'
-'debug'
-'='
-null
-'=='
-'+'
-'-'
-'*'
-'/'
-'%'
-'|'
-'&'
-'~'
-'max'
-'min'
-'!='
-'<'
-'<='
-'>'
-'>='
-'<<'
-'>>'
-'>>>'
-'...'
-null
-'++'
-'--'
-'!'
-null
-null
-null
-null
-null
-null
-
-token symbolic names:
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-MACRO
-BUILTIN
-RUNTIME
-MODULE
-JAVASCRIPT
-DEFERRED
-IF
-FOR
-WHILE
-RETURN
-CONSTEXPR
-CONTINUE
-BREAK
-GOTO
-OTHERWISE
-TRY
-LABEL
-LABELS
-TAIL
-ISNT
-IS
-LET
-CONST
-EXTERN
-ASSERT_TOKEN
-CHECK_TOKEN
-UNREACHABLE_TOKEN
-DEBUG_TOKEN
-ASSIGNMENT
-ASSIGNMENT_OPERATOR
-EQUAL
-PLUS
-MINUS
-MULTIPLY
-DIVIDE
-MODULO
-BIT_OR
-BIT_AND
-BIT_NOT
-MAX
-MIN
-NOT_EQUAL
-LESS_THAN
-LESS_THAN_EQUAL
-GREATER_THAN
-GREATER_THAN_EQUAL
-SHIFT_LEFT
-SHIFT_RIGHT
-SHIFT_RIGHT_ARITHMETIC
-VARARGS
-EQUALITY_OPERATOR
-INCREMENT
-DECREMENT
-NOT
-STRING_LITERAL
-IDENTIFIER
-WS
-BLOCK_COMMENT
-LINE_COMMENT
-DECIMAL_LITERAL
-
-rule names:
-type
-typeList
-genericSpecializationTypeList
-optionalGenericTypeList
-typeListMaybeVarArgs
-labelParameter
-optionalType
-optionalLabelList
-optionalOtherwise
-parameter
-parameterList
-labelDeclaration
-expression
-conditionalExpression
-logicalORExpression
-logicalANDExpression
-bitwiseExpression
-equalityExpression
-relationalExpression
-shiftExpression
-additiveExpression
-multiplicativeExpression
-unaryExpression
-locationExpression
-incrementDecrement
-assignment
-assignmentExpression
-structExpression
-functionPointerExpression
-primaryExpression
-forInitialization
-forLoop
-rangeSpecifier
-forOfRange
-forOfLoop
-argument
-argumentList
-helperCall
-labelReference
-variableDeclaration
-variableDeclarationWithInitialization
-helperCallStatement
-expressionStatement
-ifStatement
-whileLoop
-returnStatement
-breakStatement
-continueStatement
-gotoStatement
-handlerWithStatement
-tryLabelStatement
-diagnosticStatement
-statement
-statementList
-statementScope
-statementBlock
-helperBody
-fieldDeclaration
-fieldListDeclaration
-extendsDeclaration
-generatesDeclaration
-constexprDeclaration
-typeDeclaration
-typeAliasDeclaration
-externalBuiltin
-externalMacro
-externalRuntime
-builtinDeclaration
-genericSpecialization
-macroDeclaration
-externConstDeclaration
-constDeclaration
-structDeclaration
-declaration
-moduleDeclaration
-file
-
-
-atn:
-[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 83, 821, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, 4, 34, 9, 34, 4, 35, 9, 35, 4, 36, 9, 36, 4, 37, 9, 37, 4, 38, 9, 38, 4, 39, 9, 39, 4, 40, 9, 40, 4, 41, 9, 41, 4, 42, 9, 42, 4, 43, 9, 43, 4, 44, 9, 44, 4, 45, 9, 45, 4, 46, 9, 46, 4, 47, 9, 47, 4, 48, 9, 48, 4, 49, 9, 49, 4, 50, 9, 50, 4, 51, 9, 51, 4, 52, 9, 52, 4, 53, 9, 53, 4, 54, 9, 54, 4, 55, 9, 55, 4, 56, 9, 56, 4, 57, 9, 57, 4, 58, 9, 58, 4, 59, 9, 59, 4, 60, 9, 60, 4, 61, 9, 61, 4, 62, 9, 62, 4, 63, 9, 63, 4, 64, 9, 64, 4, 65, 9, 65, 4, 66, 9, 66, 4, 67, 9, 67, 4, 68, 9, 68, 4, 69, 9, 69, 4, 70, 9, 70, 4, 71, 9, 71, 4, 72, 9, 72, 4, 73, 9, 73, 4, 74, 9, 74, 4, 75, 9, 75, 4, 76, 9, 76, 4, 77, 9, 77, 3, 2, 3, 2, 5, 2, 157, 10, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 5, 2, 171, 10, 2, 3, 2, 3, 2, 3, 2, 7, 2, 176, 10, 2, 12, 2, 14, 2, 179, 11, 2, 3, 3, 3, 3, 3, 3, 7, 3, 184, 10, 3, 12, 3, 14, 3, 187, 11, 3, 5, 3, 189, 10, 3, 3, 4, 3, 4, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 7, 5, 203, 10, 5, 12, 5, 14, 5, 206, 11, 5, 3, 5, 5, 5, 209, 10, 5, 3, 6, 3, 6, 5, 6, 213, 10, 6, 3, 6, 3, 6, 7, 6, 217, 10, 6, 12, 6, 14, 6, 220, 11, 6, 3, 6, 3, 6, 5, 6, 224, 10, 6, 3, 6, 3, 6, 3, 6, 3, 6, 5, 6, 230, 10, 6, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 5, 7, 237, 10, 7, 3, 8, 3, 8, 5, 8, 241, 10, 8, 3, 9, 3, 9, 3, 9, 3, 9, 7, 9, 247, 10, 9, 12, 9, 14, 9, 250, 11, 9, 5, 9, 252, 10, 9, 3, 10, 3, 10, 3, 10, 3, 10, 7, 10, 258, 10, 10, 12, 10, 14, 10, 261, 11, 10, 5, 10, 263, 10, 10, 3, 11, 3, 11, 3, 11, 5, 11, 268, 10, 11, 3, 12, 3, 12, 5, 12, 272, 10, 12, 3, 12, 3, 12, 7, 12, 276, 10, 12, 12, 12, 14, 12, 279, 11, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 5, 12, 291, 10, 12, 3, 13, 3, 13, 5, 13, 295, 10, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 7, 15, 308, 10, 15, 12, 15, 14, 15, 311, 11, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 7, 16, 319, 10, 16, 12, 16, 14, 16, 322, 11, 16, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 3, 17, 7, 17, 330, 10, 17, 12, 17, 14, 17, 333, 11, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 7, 18, 341, 10, 18, 12, 18, 14, 18, 344, 11, 18, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 7, 19, 352, 10, 19, 12, 19, 14, 19, 355, 11, 19, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 7, 20, 363, 10, 20, 12, 20, 14, 20, 366, 11, 20, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 7, 21, 374, 10, 21, 12, 21, 14, 21, 377, 11, 21, 3, 22, 3, 22, 3, 22, 3, 22, 3, 22, 3, 22, 7, 22, 385, 10, 22, 12, 22, 14, 22, 388, 11, 22, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 7, 23, 396, 10, 23, 12, 23, 14, 23, 399, 11, 23, 3, 24, 3, 24, 3, 24, 5, 24, 404, 10, 24, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 5, 25, 417, 10, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 7, 25, 427, 10, 25, 12, 25, 14, 25, 430, 11, 25, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 5, 26, 442, 10, 26, 3, 27, 3, 27, 3, 27, 3, 27, 5, 27, 448, 10, 27, 5, 27, 450, 10, 27, 3, 28, 3, 28, 5, 28, 454, 10, 28, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 7, 29, 461, 10, 29, 12, 29, 14, 29, 464, 11, 29, 5, 29, 466, 10, 29, 3, 29, 3, 29, 3, 30, 3, 30, 3, 30, 5, 30, 473, 10, 30, 5, 30, 475, 10, 30, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 5, 31, 485, 10, 31, 3, 32, 5, 32, 488, 10, 32, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 34, 3, 34, 5, 34, 502, 10, 34, 3, 34, 3, 34, 5, 34, 506, 10, 34, 3, 34, 3, 34, 3, 35, 5, 35, 511, 10, 35, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 37, 3, 37, 3, 38, 3, 38, 5, 38, 526, 10, 38, 3, 38, 3, 38, 7, 38, 530, 10, 38, 12, 38, 14, 38, 533, 11, 38, 3, 38, 3, 38, 3, 39, 3, 39, 5, 39, 539, 10, 39, 3, 39, 3, 39, 3, 39, 3, 40, 3, 40, 3, 41, 3, 41, 3, 41, 3, 41, 3, 41, 3, 42, 3, 42, 3, 42, 5, 42, 554, 10, 42, 3, 43, 5, 43, 557, 10, 43, 3, 43, 3, 43, 3, 44, 3, 44, 3, 45, 3, 45, 5, 45, 565, 10, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 5, 45, 573, 10, 45, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 47, 3, 47, 5, 47, 583, 10, 47, 3, 48, 3, 48, 3, 49, 3, 49, 3, 50, 3, 50, 3, 50, 5, 50, 592, 10, 50, 3, 51, 3, 51, 3, 51, 3, 51, 3, 52, 3, 52, 3, 52, 6, 52, 601, 10, 52, 13, 52, 14, 52, 602, 3, 53, 3, 53, 3, 53, 3, 53, 3, 53, 3, 53, 3, 53, 5, 53, 612, 10, 53, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 3, 54, 5, 54, 643, 10, 54, 3, 55, 7, 55, 646, 10, 55, 12, 55, 14, 55, 649, 11, 55, 3, 56, 5, 56, 652, 10, 56, 3, 56, 3, 56, 3, 56, 3, 56, 3, 57, 3, 57, 5, 57, 660, 10, 57, 3, 58, 3, 58, 3, 59, 3, 59, 3, 59, 3, 59, 3, 59, 3, 60, 7, 60, 670, 10, 60, 12, 60, 14, 60, 673, 11, 60, 3, 61, 3, 61, 3, 61, 3, 62, 3, 62, 3, 62, 3, 63, 3, 63, 3, 63, 3, 64, 3, 64, 3, 64, 5, 64, 687, 10, 64, 3, 64, 5, 64, 690, 10, 64, 3, 64, 5, 64, 693, 10, 64, 3, 64, 3, 64, 3, 65, 3, 65, 3, 65, 3, 65, 3, 65, 3, 65, 3, 66, 3, 66, 5, 66, 705, 10, 66, 3, 66, 3, 66, 3, 66, 3, 66, 3, 66, 3, 66, 3, 66, 3, 66, 3, 66, 3, 67, 3, 67, 3, 67, 5, 67, 719, 10, 67, 3, 67, 3, 67, 3, 67, 3, 67, 3, 67, 3, 67, 3, 67, 3, 67, 3, 68, 3, 68, 3, 68, 3, 68, 3, 68, 3, 68, 3, 68, 3, 69, 5, 69, 737, 10, 69, 3, 69, 3, 69, 3, 69, 3, 69, 3, 69, 3, 69, 3, 69, 5, 69, 746, 10, 69, 3, 70, 3, 70, 3, 70, 3, 70, 3, 70, 3, 70, 3, 70, 3, 71, 3, 71, 5, 71, 757, 10, 71, 3, 71, 3, 71, 3, 71, 3, 71, 3, 71, 3, 71, 3, 71, 3, 71, 5, 71, 767, 10, 71, 3, 72, 3, 72, 3, 72, 3, 72, 3, 72, 3, 72, 3, 72, 3, 73, 3, 73, 3, 73, 3, 73, 3, 73, 3, 73, 3, 73, 3, 73, 3, 74, 3, 74, 3, 74, 3, 74, 3, 74, 3, 74, 3, 75, 3, 75, 3, 75, 3, 75, 3, 75, 3, 75, 3, 75, 3, 75, 3, 75, 3, 75, 3, 75, 5, 75, 801, 10, 75, 3, 76, 3, 76, 3, 76, 3, 76, 7, 76, 807, 10, 76, 12, 76, 14, 76, 810, 11, 76, 3, 76, 3, 76, 3, 77, 3, 77, 7, 77, 816, 10, 77, 12, 77, 14, 77, 819, 11, 77, 3, 77, 2, 13, 2, 28, 30, 32, 34, 36, 38, 40, 42, 44, 48, 78, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 2, 13, 3, 2, 60, 61, 4, 2, 54, 54, 65, 65, 3, 2, 66, 69, 3, 2, 70, 72, 3, 2, 55, 56, 3, 2, 57, 59, 5, 2, 55, 56, 62, 62, 77, 77, 3, 2, 52, 53, 4, 2, 63, 64, 79, 79, 3, 2, 45, 46, 3, 2, 48, 49, 2, 849, 2, 170, 3, 2, 2, 2, 4, 188, 3, 2, 2, 2, 6, 190, 3, 2, 2, 2, 8, 208, 3, 2, 2, 2, 10, 229, 3, 2, 2, 2, 12, 231, 3, 2, 2, 2, 14, 240, 3, 2, 2, 2, 16, 251, 3, 2, 2, 2, 18, 262, 3, 2, 2, 2, 20, 264, 3, 2, 2, 2, 22, 290, 3, 2, 2, 2, 24, 292, 3, 2, 2, 2, 26, 296, 3, 2, 2, 2, 28, 298, 3, 2, 2, 2, 30, 312, 3, 2, 2, 2, 32, 323, 3, 2, 2, 2, 34, 334, 3, 2, 2, 2, 36, 345, 3, 2, 2, 2, 38, 356, 3, 2, 2, 2, 40, 367, 3, 2, 2, 2, 42, 378, 3, 2, 2, 2, 44, 389, 3, 2, 2, 2, 46, 403, 3, 2, 2, 2, 48, 416, 3, 2, 2, 2, 50, 441, 3, 2, 2, 2, 52, 449, 3, 2, 2, 2, 54, 453, 3, 2, 2, 2, 56, 455, 3, 2, 2, 2, 58, 474, 3, 2, 2, 2, 60, 484, 3, 2, 2, 2, 62, 487, 3, 2, 2, 2, 64, 489, 3, 2, 2, 2, 66, 499, 3, 2, 2, 2, 68, 510, 3, 2, 2, 2, 70, 512, 3, 2, 2, 2, 72, 521, 3, 2, 2, 2, 74, 523, 3, 2, 2, 2, 76, 536, 3, 2, 2, 2, 78, 543, 3, 2, 2, 2, 80, 545, 3, 2, 2, 2, 82, 550, 3, 2, 2, 2, 84, 556, 3, 2, 2, 2, 86, 560, 3, 2, 2, 2, 88, 562, 3, 2, 2, 2, 90, 574, 3, 2, 2, 2, 92, 580, 3, 2, 2, 2, 94, 584, 3, 2, 2, 2, 96, 586, 3, 2, 2, 2, 98, 588, 3, 2, 2, 2, 100, 593, 3, 2, 2, 2, 102, 597, 3, 2, 2, 2, 104, 611, 3, 2, 2, 2, 106, 642, 3, 2, 2, 2, 108, 647, 3, 2, 2, 2, 110, 651, 3, 2, 2, 2, 112, 659, 3, 2, 2, 2, 114, 661, 3, 2, 2, 2, 116, 663, 3, 2, 2, 2, 118, 671, 3, 2, 2, 2, 120, 674, 3, 2, 2, 2, 122, 677, 3, 2, 2, 2, 124, 680, 3, 2, 2, 2, 126, 683, 3, 2, 2, 2, 128, 696, 3, 2, 2, 2, 130, 702, 3, 2, 2, 2, 132, 715, 3, 2, 2, 2, 134, 728, 3, 2, 2, 2, 136, 736, 3, 2, 2, 2, 138, 747, 3, 2, 2, 2, 140, 756, 3, 2, 2, 2, 142, 768, 3, 2, 2, 2, 144, 775, 3, 2, 2, 2, 146, 783, 3, 2, 2, 2, 148, 800, 3, 2, 2, 2, 150, 802, 3, 2, 2, 2, 152, 817, 3, 2, 2, 2, 154, 156, 8, 2, 1, 2, 155, 157, 7, 34, 2, 2, 156, 155, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, 157, 158, 3, 2, 2, 2, 158, 171, 7, 79, 2, 2, 159, 160, 7, 25, 2, 2, 160, 161, 7, 3, 2, 2, 161, 162, 5, 4, 3, 2, 162, 163, 7, 4, 2, 2, 163, 164, 7, 5, 2, 2, 164, 165, 5, 2, 2, 5, 165, 171, 3, 2, 2, 2, 166, 167, 7, 3, 2, 2, 167, 168, 5, 2, 2, 2, 168, 169, 7, 4, 2, 2, 169, 171, 3, 2, 2, 2, 170, 154, 3, 2, 2, 2, 170, 159, 3, 2, 2, 2, 170, 166, 3, 2, 2, 2, 171, 177, 3, 2, 2, 2, 172, 173, 12, 4, 2, 2, 173, 174, 7, 60, 2, 2, 174, 176, 5, 2, 2, 5, 175, 172, 3, 2, 2, 2, 176, 179, 3, 2, 2, 2, 177, 175, 3, 2, 2, 2, 177, 178, 3, 2, 2, 2, 178, 3, 3, 2, 2, 2, 179, 177, 3, 2, 2, 2, 180, 185, 5, 2, 2, 2, 181, 182, 7, 6, 2, 2, 182, 184, 5, 2, 2, 2, 183, 181, 3, 2, 2, 2, 184, 187, 3, 2, 2, 2, 185, 183, 3, 2, 2, 2, 185, 186, 3, 2, 2, 2, 186, 189, 3, 2, 2, 2, 187, 185, 3, 2, 2, 2, 188, 180, 3, 2, 2, 2, 188, 189, 3, 2, 2, 2, 189, 5, 3, 2, 2, 2, 190, 191, 7, 66, 2, 2, 191, 192, 5, 4, 3, 2, 192, 193, 7, 68, 2, 2, 193, 7, 3, 2, 2, 2, 194, 195, 7, 66, 2, 2, 195, 196, 7, 79, 2, 2, 196, 197, 7, 7, 2, 2, 197, 204, 7, 8, 2, 2, 198, 199, 7, 6, 2, 2, 199, 200, 7, 79, 2, 2, 200, 201, 7, 7, 2, 2, 201, 203, 7, 8, 2, 2, 202, 198, 3, 2, 2, 2, 203, 206, 3, 2, 2, 2, 204, 202, 3, 2, 2, 2, 204, 205, 3, 2, 2, 2, 205, 207, 3, 2, 2, 2, 206, 204, 3, 2, 2, 2, 207, 209, 7, 68, 2, 2, 208, 194, 3, 2, 2, 2, 208, 209, 3, 2, 2, 2, 209, 9, 3, 2, 2, 2, 210, 212, 7, 3, 2, 2, 211, 213, 5, 2, 2, 2, 212, 211, 3, 2, 2, 2, 212, 213, 3, 2, 2, 2, 213, 218, 3, 2, 2, 2, 214, 215, 7, 6, 2, 2, 215, 217, 5, 2, 2, 2, 216, 214, 3, 2, 2, 2, 217, 220, 3, 2, 2, 2, 218, 216, 3, 2, 2, 2, 218, 219, 3, 2, 2, 2, 219, 223, 3, 2, 2, 2, 220, 218, 3, 2, 2, 2, 221, 222, 7, 6, 2, 2, 222, 224, 7, 73, 2, 2, 223, 221, 3, 2, 2, 2, 223, 224, 3, 2, 2, 2, 224, 225, 3, 2, 2, 2, 225, 230, 7, 4, 2, 2, 226, 227, 7, 3, 2, 2, 227, 228, 7, 73, 2, 2, 228, 230, 7, 4, 2, 2, 229, 210, 3, 2, 2, 2, 229, 226, 3, 2, 2, 2, 230, 11, 3, 2, 2, 2, 231, 236, 7, 79, 2, 2, 232, 233, 7, 3, 2, 2, 233, 234, 5, 4, 3, 2, 234, 235, 7, 4, 2, 2, 235, 237, 3, 2, 2, 2, 236, 232, 3, 2, 2, 2, 236, 237, 3, 2, 2, 2, 237, 13, 3, 2, 2, 2, 238, 239, 7, 7, 2, 2, 239, 241, 5, 2, 2, 2, 240, 238, 3, 2, 2, 2, 240, 241, 3, 2, 2, 2, 241, 15, 3, 2, 2, 2, 242, 243, 7, 41, 2, 2, 243, 248, 5, 12, 7, 2, 244, 245, 7, 6, 2, 2, 245, 247, 5, 12, 7, 2, 246, 244, 3, 2, 2, 2, 247, 250, 3, 2, 2, 2, 248, 246, 3, 2, 2, 2, 248, 249, 3, 2, 2, 2, 249, 252, 3, 2, 2, 2, 250, 248, 3, 2, 2, 2, 251, 242, 3, 2, 2, 2, 251, 252, 3, 2, 2, 2, 252, 17, 3, 2, 2, 2, 253, 254, 7, 38, 2, 2, 254, 259, 7, 79, 2, 2, 255, 256, 7, 6, 2, 2, 256, 258, 7, 79, 2, 2, 257, 255, 3, 2, 2, 2, 258, 261, 3, 2, 2, 2, 259, 257, 3, 2, 2, 2, 259, 260, 3, 2, 2, 2, 260, 263, 3, 2, 2, 2, 261, 259, 3, 2, 2, 2, 262, 253, 3, 2, 2, 2, 262, 263, 3, 2, 2, 2, 263, 19, 3, 2, 2, 2, 264, 265, 7, 79, 2, 2, 265, 267, 7, 7, 2, 2, 266, 268, 5, 2, 2, 2, 267, 266, 3, 2, 2, 2, 267, 268, 3, 2, 2, 2, 268, 21, 3, 2, 2, 2, 269, 271, 7, 3, 2, 2, 270, 272, 5, 20, 11, 2, 271, 270, 3, 2, 2, 2, 271, 272, 3, 2, 2, 2, 272, 277, 3, 2, 2, 2, 273, 274, 7, 6, 2, 2, 274, 276, 5, 20, 11, 2, 275, 273, 3, 2, 2, 2, 276, 279, 3, 2, 2, 2, 277, 275, 3, 2, 2, 2, 277, 278, 3, 2, 2, 2, 278, 280, 3, 2, 2, 2, 279, 277, 3, 2, 2, 2, 280, 291, 7, 4, 2, 2, 281, 282, 7, 3, 2, 2, 282, 283, 5, 20, 11, 2, 283, 284, 7, 6, 2, 2, 284, 285, 5, 20, 11, 2, 285, 286, 7, 6, 2, 2, 286, 287, 7, 73, 2, 2, 287, 288, 7, 79, 2, 2, 288, 289, 7, 4, 2, 2, 289, 291, 3, 2, 2, 2, 290, 269, 3, 2, 2, 2, 290, 281, 3, 2, 2, 2, 291, 23, 3, 2, 2, 2, 292, 294, 7, 79, 2, 2, 293, 295, 5, 22, 12, 2, 294, 293, 3, 2, 2, 2, 294, 295, 3, 2, 2, 2, 295, 25, 3, 2, 2, 2, 296, 297, 5, 28, 15, 2, 297, 27, 3, 2, 2, 2, 298, 299, 8, 15, 1, 2, 299, 300, 5, 30, 16, 2, 300, 309, 3, 2, 2, 2, 301, 302, 12, 3, 2, 2, 302, 303, 7, 9, 2, 2, 303, 304, 5, 30, 16, 2, 304, 305, 7, 7, 2, 2, 305, 306, 5, 30, 16, 2, 306, 308, 3, 2, 2, 2, 307, 301, 3, 2, 2, 2, 308, 311, 3, 2, 2, 2, 309, 307, 3, 2, 2, 2, 309, 310, 3, 2, 2, 2, 310, 29, 3, 2, 2, 2, 311, 309, 3, 2, 2, 2, 312, 313, 8, 16, 1, 2, 313, 314, 5, 32, 17, 2, 314, 320, 3, 2, 2, 2, 315, 316, 12, 3, 2, 2, 316, 317, 7, 10, 2, 2, 317, 319, 5, 32, 17, 2, 318, 315, 3, 2, 2, 2, 319, 322, 3, 2, 2, 2, 320, 318, 3, 2, 2, 2, 320, 321, 3, 2, 2, 2, 321, 31, 3, 2, 2, 2, 322, 320, 3, 2, 2, 2, 323, 324, 8, 17, 1, 2, 324, 325, 5, 34, 18, 2, 325, 331, 3, 2, 2, 2, 326, 327, 12, 3, 2, 2, 327, 328, 7, 11, 2, 2, 328, 330, 5, 34, 18, 2, 329, 326, 3, 2, 2, 2, 330, 333, 3, 2, 2, 2, 331, 329, 3, 2, 2, 2, 331, 332, 3, 2, 2, 2, 332, 33, 3, 2, 2, 2, 333, 331, 3, 2, 2, 2, 334, 335, 8, 18, 1, 2, 335, 336, 5, 36, 19, 2, 336, 342, 3, 2, 2, 2, 337, 338, 12, 3, 2, 2, 338, 339, 9, 2, 2, 2, 339, 341, 5, 36, 19, 2, 340, 337, 3, 2, 2, 2, 341, 344, 3, 2, 2, 2, 342, 340, 3, 2, 2, 2, 342, 343, 3, 2, 2, 2, 343, 35, 3, 2, 2, 2, 344, 342, 3, 2, 2, 2, 345, 346, 8, 19, 1, 2, 346, 347, 5, 38, 20, 2, 347, 353, 3, 2, 2, 2, 348, 349, 12, 3, 2, 2, 349, 350, 9, 3, 2, 2, 350, 352, 5, 38, 20, 2, 351, 348, 3, 2, 2, 2, 352, 355, 3, 2, 2, 2, 353, 351, 3, 2, 2, 2, 353, 354, 3, 2, 2, 2, 354, 37, 3, 2, 2, 2, 355, 353, 3, 2, 2, 2, 356, 357, 8, 20, 1, 2, 357, 358, 5, 40, 21, 2, 358, 364, 3, 2, 2, 2, 359, 360, 12, 3, 2, 2, 360, 361, 9, 4, 2, 2, 361, 363, 5, 40, 21, 2, 362, 359, 3, 2, 2, 2, 363, 366, 3, 2, 2, 2, 364, 362, 3, 2, 2, 2, 364, 365, 3, 2, 2, 2, 365, 39, 3, 2, 2, 2, 366, 364, 3, 2, 2, 2, 367, 368, 8, 21, 1, 2, 368, 369, 5, 42, 22, 2, 369, 375, 3, 2, 2, 2, 370, 371, 12, 3, 2, 2, 371, 372, 9, 5, 2, 2, 372, 374, 5, 42, 22, 2, 373, 370, 3, 2, 2, 2, 374, 377, 3, 2, 2, 2, 375, 373, 3, 2, 2, 2, 375, 376, 3, 2, 2, 2, 376, 41, 3, 2, 2, 2, 377, 375, 3, 2, 2, 2, 378, 379, 8, 22, 1, 2, 379, 380, 5, 44, 23, 2, 380, 386, 3, 2, 2, 2, 381, 382, 12, 3, 2, 2, 382, 383, 9, 6, 2, 2, 383, 385, 5, 44, 23, 2, 384, 381, 3, 2, 2, 2, 385, 388, 3, 2, 2, 2, 386, 384, 3, 2, 2, 2, 386, 387, 3, 2, 2, 2, 387, 43, 3, 2, 2, 2, 388, 386, 3, 2, 2, 2, 389, 390, 8, 23, 1, 2, 390, 391, 5, 46, 24, 2, 391, 397, 3, 2, 2, 2, 392, 393, 12, 3, 2, 2, 393, 394, 9, 7, 2, 2, 394, 396, 5, 46, 24, 2, 395, 392, 3, 2, 2, 2, 396, 399, 3, 2, 2, 2, 397, 395, 3, 2, 2, 2, 397, 398, 3, 2, 2, 2, 398, 45, 3, 2, 2, 2, 399, 397, 3, 2, 2, 2, 400, 404, 5, 54, 28, 2, 401, 402, 9, 8, 2, 2, 402, 404, 5, 46, 24, 2, 403, 400, 3, 2, 2, 2, 403, 401, 3, 2, 2, 2, 404, 47, 3, 2, 2, 2, 405, 406, 8, 25, 1, 2, 406, 417, 7, 79, 2, 2, 407, 408, 5, 60, 31, 2, 408, 409, 7, 12, 2, 2, 409, 410, 7, 79, 2, 2, 410, 417, 3, 2, 2, 2, 411, 412, 5, 60, 31, 2, 412, 413, 7, 13, 2, 2, 413, 414, 5, 26, 14, 2, 414, 415, 7, 14, 2, 2, 415, 417, 3, 2, 2, 2, 416, 405, 3, 2, 2, 2, 416, 407, 3, 2, 2, 2, 416, 411, 3, 2, 2, 2, 417, 428, 3, 2, 2, 2, 418, 419, 12, 6, 2, 2, 419, 420, 7, 12, 2, 2, 420, 427, 7, 79, 2, 2, 421, 422, 12, 4, 2, 2, 422, 423, 7, 13, 2, 2, 423, 424, 5, 26, 14, 2, 424, 425, 7, 14, 2, 2, 425, 427, 3, 2, 2, 2, 426, 418, 3, 2, 2, 2, 426, 421, 3, 2, 2, 2, 427, 430, 3, 2, 2, 2, 428, 426, 3, 2, 2, 2, 428, 429, 3, 2, 2, 2, 429, 49, 3, 2, 2, 2, 430, 428, 3, 2, 2, 2, 431, 432, 7, 75, 2, 2, 432, 442, 5, 48, 25, 2, 433, 434, 7, 76, 2, 2, 434, 442, 5, 48, 25, 2, 435, 436, 5, 48, 25, 2, 436, 437, 7, 75, 2, 2, 437, 442, 3, 2, 2, 2, 438, 439, 5, 48, 25, 2, 439, 440, 7, 76, 2, 2, 440, 442, 3, 2, 2, 2, 441, 431, 3, 2, 2, 2, 441, 433, 3, 2, 2, 2, 441, 435, 3, 2, 2, 2, 441, 438, 3, 2, 2, 2, 442, 51, 3, 2, 2, 2, 443, 450, 5, 50, 26, 2, 444, 447, 5, 48, 25, 2, 445, 446, 9, 9, 2, 2, 446, 448, 5, 26, 14, 2, 447, 445, 3, 2, 2, 2, 447, 448, 3, 2, 2, 2, 448, 450, 3, 2, 2, 2, 449, 443, 3, 2, 2, 2, 449, 444, 3, 2, 2, 2, 450, 53, 3, 2, 2, 2, 451, 454, 5, 58, 30, 2, 452, 454, 5, 52, 27, 2, 453, 451, 3, 2, 2, 2, 453, 452, 3, 2, 2, 2, 454, 55, 3, 2, 2, 2, 455, 456, 7, 79, 2, 2, 456, 465, 7, 15, 2, 2, 457, 462, 5, 26, 14, 2, 458, 459, 7, 6, 2, 2, 459, 461, 5, 26, 14, 2, 460, 458, 3, 2, 2, 2, 461, 464, 3, 2, 2, 2, 462, 460, 3, 2, 2, 2, 462, 463, 3, 2, 2, 2, 463, 466, 3, 2, 2, 2, 464, 462, 3, 2, 2, 2, 465, 457, 3, 2, 2, 2, 465, 466, 3, 2, 2, 2, 466, 467, 3, 2, 2, 2, 467, 468, 7, 16, 2, 2, 468, 57, 3, 2, 2, 2, 469, 475, 5, 60, 31, 2, 470, 472, 7, 79, 2, 2, 471, 473, 5, 6, 4, 2, 472, 471, 3, 2, 2, 2, 472, 473, 3, 2, 2, 2, 473, 475, 3, 2, 2, 2, 474, 469, 3, 2, 2, 2, 474, 470, 3, 2, 2, 2, 475, 59, 3, 2, 2, 2, 476, 485, 5, 76, 39, 2, 477, 485, 5, 56, 29, 2, 478, 485, 7, 83, 2, 2, 479, 485, 7, 78, 2, 2, 480, 481, 7, 3, 2, 2, 481, 482, 5, 26, 14, 2, 482, 483, 7, 4, 2, 2, 483, 485, 3, 2, 2, 2, 484, 476, 3, 2, 2, 2, 484, 477, 3, 2, 2, 2, 484, 478, 3, 2, 2, 2, 484, 479, 3, 2, 2, 2, 484, 480, 3, 2, 2, 2, 485, 61, 3, 2, 2, 2, 486, 488, 5, 82, 42, 2, 487, 486, 3, 2, 2, 2, 487, 488, 3, 2, 2, 2, 488, 63, 3, 2, 2, 2, 489, 490, 7, 31, 2, 2, 490, 491, 7, 3, 2, 2, 491, 492, 5, 62, 32, 2, 492, 493, 7, 17, 2, 2, 493, 494, 5, 26, 14, 2, 494, 495, 7, 17, 2, 2, 495, 496, 5, 52, 27, 2, 496, 497, 7, 4, 2, 2, 497, 498, 5, 112, 57, 2, 498, 65, 3, 2, 2, 2, 499, 501, 7, 13, 2, 2, 500, 502, 5, 26, 14, 2, 501, 500, 3, 2, 2, 2, 501, 502, 3, 2, 2, 2, 502, 503, 3, 2, 2, 2, 503, 505, 7, 7, 2, 2, 504, 506, 5, 26, 14, 2, 505, 504, 3, 2, 2, 2, 505, 506, 3, 2, 2, 2, 506, 507, 3, 2, 2, 2, 507, 508, 7, 14, 2, 2, 508, 67, 3, 2, 2, 2, 509, 511, 5, 66, 34, 2, 510, 509, 3, 2, 2, 2, 510, 511, 3, 2, 2, 2, 511, 69, 3, 2, 2, 2, 512, 513, 7, 31, 2, 2, 513, 514, 7, 3, 2, 2, 514, 515, 5, 80, 41, 2, 515, 516, 7, 18, 2, 2, 516, 517, 5, 26, 14, 2, 517, 518, 5, 68, 35, 2, 518, 519, 7, 4, 2, 2, 519, 520, 5, 112, 57, 2, 520, 71, 3, 2, 2, 2, 521, 522, 5, 26, 14, 2, 522, 73, 3, 2, 2, 2, 523, 525, 7, 3, 2, 2, 524, 526, 5, 72, 37, 2, 525, 524, 3, 2, 2, 2, 525, 526, 3, 2, 2, 2, 526, 531, 3, 2, 2, 2, 527, 528, 7, 6, 2, 2, 528, 530, 5, 72, 37, 2, 529, 527, 3, 2, 2, 2, 530, 533, 3, 2, 2, 2, 531, 529, 3, 2, 2, 2, 531, 532, 3, 2, 2, 2, 532, 534, 3, 2, 2, 2, 533, 531, 3, 2, 2, 2, 534, 535, 7, 4, 2, 2, 535, 75, 3, 2, 2, 2, 536, 538, 9, 10, 2, 2, 537, 539, 5, 6, 4, 2, 538, 537, 3, 2, 2, 2, 538, 539, 3, 2, 2, 2, 539, 540, 3, 2, 2, 2, 540, 541, 5, 74, 38, 2, 541, 542, 5, 18, 10, 2, 542, 77, 3, 2, 2, 2, 543, 544, 7, 79, 2, 2, 544, 79, 3, 2, 2, 2, 545, 546, 9, 11, 2, 2, 546, 547, 7, 79, 2, 2, 547, 548, 7, 7, 2, 2, 548, 549, 5, 2, 2, 2, 549, 81, 3, 2, 2, 2, 550, 553, 5, 80, 41, 2, 551, 552, 7, 52, 2, 2, 552, 554, 5, 26, 14, 2, 553, 551, 3, 2, 2, 2, 553, 554, 3, 2, 2, 2, 554, 83, 3, 2, 2, 2, 555, 557, 7, 42, 2, 2, 556, 555, 3, 2, 2, 2, 556, 557, 3, 2, 2, 2, 557, 558, 3, 2, 2, 2, 558, 559, 5, 76, 39, 2, 559, 85, 3, 2, 2, 2, 560, 561, 5, 52, 27, 2, 561, 87, 3, 2, 2, 2, 562, 564, 7, 30, 2, 2, 563, 565, 7, 34, 2, 2, 564, 563, 3, 2, 2, 2, 564, 565, 3, 2, 2, 2, 565, 566, 3, 2, 2, 2, 566, 567, 7, 3, 2, 2, 567, 568, 5, 26, 14, 2, 568, 569, 7, 4, 2, 2, 569, 572, 5, 112, 57, 2, 570, 571, 7, 19, 2, 2, 571, 573, 5, 112, 57, 2, 572, 570, 3, 2, 2, 2, 572, 573, 3, 2, 2, 2, 573, 89, 3, 2, 2, 2, 574, 575, 7, 32, 2, 2, 575, 576, 7, 3, 2, 2, 576, 577, 5, 26, 14, 2, 577, 578, 7, 4, 2, 2, 578, 579, 5, 112, 57, 2, 579, 91, 3, 2, 2, 2, 580, 582, 7, 33, 2, 2, 581, 583, 5, 26, 14, 2, 582, 581, 3, 2, 2, 2, 582, 583, 3, 2, 2, 2, 583, 93, 3, 2, 2, 2, 584, 585, 7, 36, 2, 2, 585, 95, 3, 2, 2, 2, 586, 587, 7, 35, 2, 2, 587, 97, 3, 2, 2, 2, 588, 589, 7, 37, 2, 2, 589, 591, 5, 78, 40, 2, 590, 592, 5, 74, 38, 2, 591, 590, 3, 2, 2, 2, 591, 592, 3, 2, 2, 2, 592, 99, 3, 2, 2, 2, 593, 594, 7, 40, 2, 2, 594, 595, 5, 24, 13, 2, 595, 596, 5, 112, 57, 2, 596, 101, 3, 2, 2, 2, 597, 598, 7, 39, 2, 2, 598, 600, 5, 112, 57, 2, 599, 601, 5, 100, 51, 2, 600, 599, 3, 2, 2, 2, 601, 602, 3, 2, 2, 2, 602, 600, 3, 2, 2, 2, 602, 603, 3, 2, 2, 2, 603, 103, 3, 2, 2, 2, 604, 605, 9, 12, 2, 2, 605, 606, 7, 3, 2, 2, 606, 607, 5, 26, 14, 2, 607, 608, 7, 4, 2, 2, 608, 612, 3, 2, 2, 2, 609, 612, 7, 50, 2, 2, 610, 612, 7, 51, 2, 2, 611, 604, 3, 2, 2, 2, 611, 609, 3, 2, 2, 2, 611, 610, 3, 2, 2, 2, 612, 105, 3, 2, 2, 2, 613, 614, 5, 82, 42, 2, 614, 615, 7, 17, 2, 2, 615, 643, 3, 2, 2, 2, 616, 617, 5, 84, 43, 2, 617, 618, 7, 17, 2, 2, 618, 643, 3, 2, 2, 2, 619, 620, 5, 86, 44, 2, 620, 621, 7, 17, 2, 2, 621, 643, 3, 2, 2, 2, 622, 623, 5, 92, 47, 2, 623, 624, 7, 17, 2, 2, 624, 643, 3, 2, 2, 2, 625, 626, 5, 94, 48, 2, 626, 627, 7, 17, 2, 2, 627, 643, 3, 2, 2, 2, 628, 629, 5, 96, 49, 2, 629, 630, 7, 17, 2, 2, 630, 643, 3, 2, 2, 2, 631, 632, 5, 98, 50, 2, 632, 633, 7, 17, 2, 2, 633, 643, 3, 2, 2, 2, 634, 643, 5, 88, 45, 2, 635, 636, 5, 104, 53, 2, 636, 637, 7, 17, 2, 2, 637, 643, 3, 2, 2, 2, 638, 643, 5, 90, 46, 2, 639, 643, 5, 70, 36, 2, 640, 643, 5, 64, 33, 2, 641, 643, 5, 102, 52, 2, 642, 613, 3, 2, 2, 2, 642, 616, 3, 2, 2, 2, 642, 619, 3, 2, 2, 2, 642, 622, 3, 2, 2, 2, 642, 625, 3, 2, 2, 2, 642, 628, 3, 2, 2, 2, 642, 631, 3, 2, 2, 2, 642, 634, 3, 2, 2, 2, 642, 635, 3, 2, 2, 2, 642, 638, 3, 2, 2, 2, 642, 639, 3, 2, 2, 2, 642, 640, 3, 2, 2, 2, 642, 641, 3, 2, 2, 2, 643, 107, 3, 2, 2, 2, 644, 646, 5, 106, 54, 2, 645, 644, 3, 2, 2, 2, 646, 649, 3, 2, 2, 2, 647, 645, 3, 2, 2, 2, 647, 648, 3, 2, 2, 2, 648, 109, 3, 2, 2, 2, 649, 647, 3, 2, 2, 2, 650, 652, 7, 29, 2, 2, 651, 650, 3, 2, 2, 2, 651, 652, 3, 2, 2, 2, 652, 653, 3, 2, 2, 2, 653, 654, 7, 15, 2, 2, 654, 655, 5, 108, 55, 2, 655, 656, 7, 16, 2, 2, 656, 111, 3, 2, 2, 2, 657, 660, 5, 106, 54, 2, 658, 660, 5, 110, 56, 2, 659, 657, 3, 2, 2, 2, 659, 658, 3, 2, 2, 2, 660, 113, 3, 2, 2, 2, 661, 662, 5, 110, 56, 2, 662, 115, 3, 2, 2, 2, 663, 664, 7, 79, 2, 2, 664, 665, 7, 7, 2, 2, 665, 666, 5, 2, 2, 2, 666, 667, 7, 17, 2, 2, 667, 117, 3, 2, 2, 2, 668, 670, 5, 116, 59, 2, 669, 668, 3, 2, 2, 2, 670, 673, 3, 2, 2, 2, 671, 669, 3, 2, 2, 2, 671, 672, 3, 2, 2, 2, 672, 119, 3, 2, 2, 2, 673, 671, 3, 2, 2, 2, 674, 675, 7, 20, 2, 2, 675, 676, 7, 79, 2, 2, 676, 121, 3, 2, 2, 2, 677, 678, 7, 21, 2, 2, 678, 679, 7, 78, 2, 2, 679, 123, 3, 2, 2, 2, 680, 681, 7, 34, 2, 2, 681, 682, 7, 78, 2, 2, 682, 125, 3, 2, 2, 2, 683, 684, 7, 8, 2, 2, 684, 686, 7, 79, 2, 2, 685, 687, 5, 120, 61, 2, 686, 685, 3, 2, 2, 2, 686, 687, 3, 2, 2, 2, 687, 689, 3, 2, 2, 2, 688, 690, 5, 122, 62, 2, 689, 688, 3, 2, 2, 2, 689, 690, 3, 2, 2, 2, 690, 692, 3, 2, 2, 2, 691, 693, 5, 124, 63, 2, 692, 691, 3, 2, 2, 2, 692, 693, 3, 2, 2, 2, 693, 694, 3, 2, 2, 2, 694, 695, 7, 17, 2, 2, 695, 127, 3, 2, 2, 2, 696, 697, 7, 8, 2, 2, 697, 698, 7, 79, 2, 2, 698, 699, 7, 52, 2, 2, 699, 700, 5, 2, 2, 2, 700, 701, 7, 17, 2, 2, 701, 129, 3, 2, 2, 2, 702, 704, 7, 47, 2, 2, 703, 705, 7, 28, 2, 2, 704, 703, 3, 2, 2, 2, 704, 705, 3, 2, 2, 2, 705, 706, 3, 2, 2, 2, 706, 707, 7, 25, 2, 2, 707, 708, 7, 79, 2, 2, 708, 709, 5, 8, 5, 2, 709, 710, 7, 3, 2, 2, 710, 711, 5, 4, 3, 2, 711, 712, 7, 4, 2, 2, 712, 713, 5, 14, 8, 2, 713, 714, 7, 17, 2, 2, 714, 131, 3, 2, 2, 2, 715, 718, 7, 47, 2, 2, 716, 717, 7, 22, 2, 2, 717, 719, 7, 78, 2, 2, 718, 716, 3, 2, 2, 2, 718, 719, 3, 2, 2, 2, 719, 720, 3, 2, 2, 2, 720, 721, 7, 24, 2, 2, 721, 722, 7, 79, 2, 2, 722, 723, 5, 8, 5, 2, 723, 724, 5, 10, 6, 2, 724, 725, 5, 14, 8, 2, 725, 726, 5, 16, 9, 2, 726, 727, 7, 17, 2, 2, 727, 133, 3, 2, 2, 2, 728, 729, 7, 47, 2, 2, 729, 730, 7, 26, 2, 2, 730, 731, 7, 79, 2, 2, 731, 732, 5, 10, 6, 2, 732, 733, 5, 14, 8, 2, 733, 734, 7, 17, 2, 2, 734, 135, 3, 2, 2, 2, 735, 737, 7, 28, 2, 2, 736, 735, 3, 2, 2, 2, 736, 737, 3, 2, 2, 2, 737, 738, 3, 2, 2, 2, 738, 739, 7, 25, 2, 2, 739, 740, 7, 79, 2, 2, 740, 741, 5, 8, 5, 2, 741, 742, 5, 22, 12, 2, 742, 745, 5, 14, 8, 2, 743, 746, 5, 114, 58, 2, 744, 746, 7, 17, 2, 2, 745, 743, 3, 2, 2, 2, 745, 744, 3, 2, 2, 2, 746, 137, 3, 2, 2, 2, 747, 748, 7, 79, 2, 2, 748, 749, 5, 6, 4, 2, 749, 750, 5, 22, 12, 2, 750, 751, 5, 14, 8, 2, 751, 752, 5, 16, 9, 2, 752, 753, 5, 114, 58, 2, 753, 139, 3, 2, 2, 2, 754, 755, 7, 22, 2, 2, 755, 757, 7, 78, 2, 2, 756, 754, 3, 2, 2, 2, 756, 757, 3, 2, 2, 2, 757, 758, 3, 2, 2, 2, 758, 759, 7, 24, 2, 2, 759, 760, 7, 79, 2, 2, 760, 761, 5, 8, 5, 2, 761, 762, 5, 22, 12, 2, 762, 763, 5, 14, 8, 2, 763, 766, 5, 16, 9, 2, 764, 767, 5, 114, 58, 2, 765, 767, 7, 17, 2, 2, 766, 764, 3, 2, 2, 2, 766, 765, 3, 2, 2, 2, 767, 141, 3, 2, 2, 2, 768, 769, 7, 46, 2, 2, 769, 770, 7, 79, 2, 2, 770, 771, 7, 7, 2, 2, 771, 772, 5, 2, 2, 2, 772, 773, 5, 122, 62, 2, 773, 774, 7, 17, 2, 2, 774, 143, 3, 2, 2, 2, 775, 776, 7, 46, 2, 2, 776, 777, 7, 79, 2, 2, 777, 778, 7, 7, 2, 2, 778, 779, 5, 2, 2, 2, 779, 780, 7, 52, 2, 2, 780, 781, 5, 26, 14, 2, 781, 782, 7, 17, 2, 2, 782, 145, 3, 2, 2, 2, 783, 784, 7, 23, 2, 2, 784, 785, 7, 79, 2, 2, 785, 786, 7, 15, 2, 2, 786, 787, 5, 118, 60, 2, 787, 788, 7, 16, 2, 2, 788, 147, 3, 2, 2, 2, 789, 801, 5, 146, 74, 2, 790, 801, 5, 126, 64, 2, 791, 801, 5, 128, 65, 2, 792, 801, 5, 136, 69, 2, 793, 801, 5, 138, 70, 2, 794, 801, 5, 140, 71, 2, 795, 801, 5, 132, 67, 2, 796, 801, 5, 130, 66, 2, 797, 801, 5, 134, 68, 2, 798, 801, 5, 142, 72, 2, 799, 801, 5, 144, 73, 2, 800, 789, 3, 2, 2, 2, 800, 790, 3, 2, 2, 2, 800, 791, 3, 2, 2, 2, 800, 792, 3, 2, 2, 2, 800, 793, 3, 2, 2, 2, 800, 794, 3, 2, 2, 2, 800, 795, 3, 2, 2, 2, 800, 796, 3, 2, 2, 2, 800, 797, 3, 2, 2, 2, 800, 798, 3, 2, 2, 2, 800, 799, 3, 2, 2, 2, 801, 149, 3, 2, 2, 2, 802, 803, 7, 27, 2, 2, 803, 804, 7, 79, 2, 2, 804, 808, 7, 15, 2, 2, 805, 807, 5, 148, 75, 2, 806, 805, 3, 2, 2, 2, 807, 810, 3, 2, 2, 2, 808, 806, 3, 2, 2, 2, 808, 809, 3, 2, 2, 2, 809, 811, 3, 2, 2, 2, 810, 808, 3, 2, 2, 2, 811, 812, 7, 16, 2, 2, 812, 151, 3, 2, 2, 2, 813, 816, 5, 150, 76, 2, 814, 816, 5, 148, 75, 2, 815, 813, 3, 2, 2, 2, 815, 814, 3, 2, 2, 2, 816, 819, 3, 2, 2, 2, 817, 815, 3, 2, 2, 2, 817, 818, 3, 2, 2, 2, 818, 153, 3, 2, 2, 2, 819, 817, 3, 2, 2, 2, 79, 156, 170, 177, 185, 188, 204, 208, 212, 218, 223, 229, 236, 240, 248, 251, 259, 262, 267, 271, 277, 290, 294, 309, 320, 331, 342, 353, 364, 375, 386, 397, 403, 416, 426, 428, 441, 447, 449, 453, 462, 465, 472, 474, 484, 487, 501, 505, 510, 525, 531, 538, 553, 556, 564, 572, 582, 591, 602, 611, 642, 647, 651, 659, 671, 686, 689, 692, 704, 718, 736, 745, 756, 766, 800, 808, 815, 817] \ No newline at end of file
diff --git a/deps/v8/src/torque/Torque.tokens b/deps/v8/src/torque/Torque.tokens
deleted file mode 100644
index 63589b27b7..0000000000
--- a/deps/v8/src/torque/Torque.tokens
+++ /dev/null
@@ -1,154 +0,0 @@
-T__0=1
-T__1=2
-T__2=3
-T__3=4
-T__4=5
-T__5=6
-T__6=7
-T__7=8
-T__8=9
-T__9=10
-T__10=11
-T__11=12
-T__12=13
-T__13=14
-T__14=15
-T__15=16
-T__16=17
-T__17=18
-T__18=19
-T__19=20
-T__20=21
-MACRO=22
-BUILTIN=23
-RUNTIME=24
-MODULE=25
-JAVASCRIPT=26
-DEFERRED=27
-IF=28
-FOR=29
-WHILE=30
-RETURN=31
-CONSTEXPR=32
-CONTINUE=33
-BREAK=34
-GOTO=35
-OTHERWISE=36
-TRY=37
-LABEL=38
-LABELS=39
-TAIL=40
-ISNT=41
-IS=42
-LET=43
-CONST=44
-EXTERN=45
-ASSERT_TOKEN=46
-CHECK_TOKEN=47
-UNREACHABLE_TOKEN=48
-DEBUG_TOKEN=49
-ASSIGNMENT=50
-ASSIGNMENT_OPERATOR=51
-EQUAL=52
-PLUS=53
-MINUS=54
-MULTIPLY=55
-DIVIDE=56
-MODULO=57
-BIT_OR=58
-BIT_AND=59
-BIT_NOT=60
-MAX=61
-MIN=62
-NOT_EQUAL=63
-LESS_THAN=64
-LESS_THAN_EQUAL=65
-GREATER_THAN=66
-GREATER_THAN_EQUAL=67
-SHIFT_LEFT=68
-SHIFT_RIGHT=69
-SHIFT_RIGHT_ARITHMETIC=70
-VARARGS=71
-EQUALITY_OPERATOR=72
-INCREMENT=73
-DECREMENT=74
-NOT=75
-STRING_LITERAL=76
-IDENTIFIER=77
-WS=78
-BLOCK_COMMENT=79
-LINE_COMMENT=80
-DECIMAL_LITERAL=81
-'('=1
-')'=2
-'=>'=3
-','=4
-':'=5
-'type'=6
-'?'=7
-'||'=8
-'&&'=9
-'.'=10
-'['=11
-']'=12
-'{'=13
-'}'=14
-';'=15
-'of'=16
-'else'=17
-'extends'=18
-'generates'=19
-'operator'=20
-'struct'=21
-'macro'=22
-'builtin'=23
-'runtime'=24
-'module'=25
-'javascript'=26
-'deferred'=27
-'if'=28
-'for'=29
-'while'=30
-'return'=31
-'constexpr'=32
-'continue'=33
-'break'=34
-'goto'=35
-'otherwise'=36
-'try'=37
-'label'=38
-'labels'=39
-'tail'=40
-'isnt'=41
-'is'=42
-'let'=43
-'const'=44
-'extern'=45
-'assert'=46
-'check'=47
-'unreachable'=48
-'debug'=49
-'='=50
-'=='=52
-'+'=53
-'-'=54
-'*'=55
-'/'=56
-'%'=57
-'|'=58
-'&'=59
-'~'=60
-'max'=61
-'min'=62
-'!='=63
-'<'=64
-'<='=65
-'>'=66
-'>='=67
-'<<'=68
-'>>'=69
-'>>>'=70
-'...'=71
-'++'=73
-'--'=74
-'!'=75
diff --git a/deps/v8/src/torque/TorqueBaseListener.h b/deps/v8/src/torque/TorqueBaseListener.h
deleted file mode 100644
index 5b2e7613cc..0000000000
--- a/deps/v8/src/torque/TorqueBaseListener.h
+++ /dev/null
@@ -1,373 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-#ifndef V8_TORQUE_TORQUEBASELISTENER_H_
-#define V8_TORQUE_TORQUEBASELISTENER_H_
-
-// Generated from Torque.g4 by ANTLR 4.7.1
-
-#pragma once
-
-#include "./antlr4-runtime.h"
-#include "TorqueListener.h"
-
-/**
- * This class provides an empty implementation of TorqueListener,
- * which can be extended to create a listener which only needs to handle a
- * subset of the available methods.
- */
-class TorqueBaseListener : public TorqueListener {
- public:
- void enterType(TorqueParser::TypeContext* /*ctx*/) override {}
- void exitType(TorqueParser::TypeContext* /*ctx*/) override {}
-
- void enterTypeList(TorqueParser::TypeListContext* /*ctx*/) override {}
- void exitTypeList(TorqueParser::TypeListContext* /*ctx*/) override {}
-
- void enterGenericSpecializationTypeList(
- TorqueParser::GenericSpecializationTypeListContext* /*ctx*/) override {}
- void exitGenericSpecializationTypeList(
- TorqueParser::GenericSpecializationTypeListContext* /*ctx*/) override {}
-
- void enterOptionalGenericTypeList(
- TorqueParser::OptionalGenericTypeListContext* /*ctx*/) override {}
- void exitOptionalGenericTypeList(
- TorqueParser::OptionalGenericTypeListContext* /*ctx*/) override {}
-
- void enterTypeListMaybeVarArgs(
- TorqueParser::TypeListMaybeVarArgsContext* /*ctx*/) override {}
- void exitTypeListMaybeVarArgs(
- TorqueParser::TypeListMaybeVarArgsContext* /*ctx*/) override {}
-
- void enterLabelParameter(
- TorqueParser::LabelParameterContext* /*ctx*/) override {}
- void exitLabelParameter(
- TorqueParser::LabelParameterContext* /*ctx*/) override {}
-
- void enterOptionalType(TorqueParser::OptionalTypeContext* /*ctx*/) override {}
- void exitOptionalType(TorqueParser::OptionalTypeContext* /*ctx*/) override {}
-
- void enterOptionalLabelList(
- TorqueParser::OptionalLabelListContext* /*ctx*/) override {}
- void exitOptionalLabelList(
- TorqueParser::OptionalLabelListContext* /*ctx*/) override {}
-
- void enterOptionalOtherwise(
- TorqueParser::OptionalOtherwiseContext* /*ctx*/) override {}
- void exitOptionalOtherwise(
- TorqueParser::OptionalOtherwiseContext* /*ctx*/) override {}
-
- void enterParameter(TorqueParser::ParameterContext* /*ctx*/) override {}
- void exitParameter(TorqueParser::ParameterContext* /*ctx*/) override {}
-
- void enterParameterList(
- TorqueParser::ParameterListContext* /*ctx*/) override {}
- void exitParameterList(TorqueParser::ParameterListContext* /*ctx*/) override {
- }
-
- void enterLabelDeclaration(
- TorqueParser::LabelDeclarationContext* /*ctx*/) override {}
- void exitLabelDeclaration(
- TorqueParser::LabelDeclarationContext* /*ctx*/) override {}
-
- void enterExpression(TorqueParser::ExpressionContext* /*ctx*/) override {}
- void exitExpression(TorqueParser::ExpressionContext* /*ctx*/) override {}
-
- void enterConditionalExpression(
- TorqueParser::ConditionalExpressionContext* /*ctx*/) override {}
- void exitConditionalExpression(
- TorqueParser::ConditionalExpressionContext* /*ctx*/) override {}
-
- void enterLogicalORExpression(
- TorqueParser::LogicalORExpressionContext* /*ctx*/) override {}
- void exitLogicalORExpression(
- TorqueParser::LogicalORExpressionContext* /*ctx*/) override {}
-
- void enterLogicalANDExpression(
- TorqueParser::LogicalANDExpressionContext* /*ctx*/) override {}
- void exitLogicalANDExpression(
- TorqueParser::LogicalANDExpressionContext* /*ctx*/) override {}
-
- void enterBitwiseExpression(
- TorqueParser::BitwiseExpressionContext* /*ctx*/) override {}
- void exitBitwiseExpression(
- TorqueParser::BitwiseExpressionContext* /*ctx*/) override {}
-
- void enterEqualityExpression(
- TorqueParser::EqualityExpressionContext* /*ctx*/) override {}
- void exitEqualityExpression(
- TorqueParser::EqualityExpressionContext* /*ctx*/) override {}
-
- void enterRelationalExpression(
- TorqueParser::RelationalExpressionContext* /*ctx*/) override {}
- void exitRelationalExpression(
- TorqueParser::RelationalExpressionContext* /*ctx*/) override {}
-
- void enterShiftExpression(
- TorqueParser::ShiftExpressionContext* /*ctx*/) override {}
- void exitShiftExpression(
- TorqueParser::ShiftExpressionContext* /*ctx*/) override {}
-
- void enterAdditiveExpression(
- TorqueParser::AdditiveExpressionContext* /*ctx*/) override {}
- void exitAdditiveExpression(
- TorqueParser::AdditiveExpressionContext* /*ctx*/) override {}
-
- void enterMultiplicativeExpression(
- TorqueParser::MultiplicativeExpressionContext* /*ctx*/) override {}
- void exitMultiplicativeExpression(
- TorqueParser::MultiplicativeExpressionContext* /*ctx*/) override {}
-
- void enterUnaryExpression(
- TorqueParser::UnaryExpressionContext* /*ctx*/) override {}
- void exitUnaryExpression(
- TorqueParser::UnaryExpressionContext* /*ctx*/) override {}
-
- void enterLocationExpression(
- TorqueParser::LocationExpressionContext* /*ctx*/) override {}
- void exitLocationExpression(
- TorqueParser::LocationExpressionContext* /*ctx*/) override {}
-
- void enterIncrementDecrement(
- TorqueParser::IncrementDecrementContext* /*ctx*/) override {}
- void exitIncrementDecrement(
- TorqueParser::IncrementDecrementContext* /*ctx*/) override {}
-
- void enterAssignment(TorqueParser::AssignmentContext* /*ctx*/) override {}
- void exitAssignment(TorqueParser::AssignmentContext* /*ctx*/) override {}
-
- void enterAssignmentExpression(
- TorqueParser::AssignmentExpressionContext* /*ctx*/) override {}
- void exitAssignmentExpression(
- TorqueParser::AssignmentExpressionContext* /*ctx*/) override {}
-
- void enterStructExpression(
- TorqueParser::StructExpressionContext* /*ctx*/) override {}
- void exitStructExpression(
- TorqueParser::StructExpressionContext* /*ctx*/) override {}
-
- void enterFunctionPointerExpression(
- TorqueParser::FunctionPointerExpressionContext* /*ctx*/) override {}
- void exitFunctionPointerExpression(
- TorqueParser::FunctionPointerExpressionContext* /*ctx*/) override {}
-
- void enterPrimaryExpression(
- TorqueParser::PrimaryExpressionContext* /*ctx*/) override {}
- void exitPrimaryExpression(
- TorqueParser::PrimaryExpressionContext* /*ctx*/) override {}
-
- void enterForInitialization(
- TorqueParser::ForInitializationContext* /*ctx*/) override {}
- void exitForInitialization(
- TorqueParser::ForInitializationContext* /*ctx*/) override {}
-
- void enterForLoop(TorqueParser::ForLoopContext* /*ctx*/) override {}
- void exitForLoop(TorqueParser::ForLoopContext* /*ctx*/) override {}
-
- void enterRangeSpecifier(
- TorqueParser::RangeSpecifierContext* /*ctx*/) override {}
- void exitRangeSpecifier(
- TorqueParser::RangeSpecifierContext* /*ctx*/) override {}
-
- void enterForOfRange(TorqueParser::ForOfRangeContext* /*ctx*/) override {}
- void exitForOfRange(TorqueParser::ForOfRangeContext* /*ctx*/) override {}
-
- void enterForOfLoop(TorqueParser::ForOfLoopContext* /*ctx*/) override {}
- void exitForOfLoop(TorqueParser::ForOfLoopContext* /*ctx*/) override {}
-
- void enterArgument(TorqueParser::ArgumentContext* /*ctx*/) override {}
- void exitArgument(TorqueParser::ArgumentContext* /*ctx*/) override {}
-
- void enterArgumentList(TorqueParser::ArgumentListContext* /*ctx*/) override {}
- void exitArgumentList(TorqueParser::ArgumentListContext* /*ctx*/) override {}
-
- void enterHelperCall(TorqueParser::HelperCallContext* /*ctx*/) override {}
- void exitHelperCall(TorqueParser::HelperCallContext* /*ctx*/) override {}
-
- void enterLabelReference(
- TorqueParser::LabelReferenceContext* /*ctx*/) override {}
- void exitLabelReference(
- TorqueParser::LabelReferenceContext* /*ctx*/) override {}
-
- void enterVariableDeclaration(
- TorqueParser::VariableDeclarationContext* /*ctx*/) override {}
- void exitVariableDeclaration(
- TorqueParser::VariableDeclarationContext* /*ctx*/) override {}
-
- void enterVariableDeclarationWithInitialization(
- TorqueParser::VariableDeclarationWithInitializationContext* /*ctx*/)
- override {}
- void exitVariableDeclarationWithInitialization(
- TorqueParser::VariableDeclarationWithInitializationContext* /*ctx*/)
- override {}
-
- void enterHelperCallStatement(
- TorqueParser::HelperCallStatementContext* /*ctx*/) override {}
- void exitHelperCallStatement(
- TorqueParser::HelperCallStatementContext* /*ctx*/) override {}
-
- void enterExpressionStatement(
- TorqueParser::ExpressionStatementContext* /*ctx*/) override {}
- void exitExpressionStatement(
- TorqueParser::ExpressionStatementContext* /*ctx*/) override {}
-
- void enterIfStatement(TorqueParser::IfStatementContext* /*ctx*/) override {}
- void exitIfStatement(TorqueParser::IfStatementContext* /*ctx*/) override {}
-
- void enterWhileLoop(TorqueParser::WhileLoopContext* /*ctx*/) override {}
- void exitWhileLoop(TorqueParser::WhileLoopContext* /*ctx*/) override {}
-
- void enterReturnStatement(
- TorqueParser::ReturnStatementContext* /*ctx*/) override {}
- void exitReturnStatement(
- TorqueParser::ReturnStatementContext* /*ctx*/) override {}
-
- void enterBreakStatement(
- TorqueParser::BreakStatementContext* /*ctx*/) override {}
- void exitBreakStatement(
- TorqueParser::BreakStatementContext* /*ctx*/) override {}
-
- void enterContinueStatement(
- TorqueParser::ContinueStatementContext* /*ctx*/) override {}
- void exitContinueStatement(
- TorqueParser::ContinueStatementContext* /*ctx*/) override {}
-
- void enterGotoStatement(
- TorqueParser::GotoStatementContext* /*ctx*/) override {}
- void exitGotoStatement(TorqueParser::GotoStatementContext* /*ctx*/) override {
- }
-
- void enterHandlerWithStatement(
- TorqueParser::HandlerWithStatementContext* /*ctx*/) override {}
- void exitHandlerWithStatement(
- TorqueParser::HandlerWithStatementContext* /*ctx*/) override {}
-
- void enterTryLabelStatement(
- TorqueParser::TryLabelStatementContext* /*ctx*/) override {}
- void exitTryLabelStatement(
- TorqueParser::TryLabelStatementContext* /*ctx*/) override {}
-
- void enterDiagnosticStatement(
- TorqueParser::DiagnosticStatementContext* /*ctx*/) override {}
- void exitDiagnosticStatement(
- TorqueParser::DiagnosticStatementContext* /*ctx*/) override {}
-
- void enterStatement(TorqueParser::StatementContext* /*ctx*/) override {}
- void exitStatement(TorqueParser::StatementContext* /*ctx*/) override {}
-
- void enterStatementList(
- TorqueParser::StatementListContext* /*ctx*/) override {}
- void exitStatementList(TorqueParser::StatementListContext* /*ctx*/) override {
- }
-
- void enterStatementScope(
- TorqueParser::StatementScopeContext* /*ctx*/) override {}
- void exitStatementScope(
- TorqueParser::StatementScopeContext* /*ctx*/) override {}
-
- void enterStatementBlock(
- TorqueParser::StatementBlockContext* /*ctx*/) override {}
- void exitStatementBlock(
- TorqueParser::StatementBlockContext* /*ctx*/) override {}
-
- void enterHelperBody(TorqueParser::HelperBodyContext* /*ctx*/) override {}
- void exitHelperBody(TorqueParser::HelperBodyContext* /*ctx*/) override {}
-
- void enterFieldDeclaration(
- TorqueParser::FieldDeclarationContext* /*ctx*/) override {}
- void exitFieldDeclaration(
- TorqueParser::FieldDeclarationContext* /*ctx*/) override {}
-
- void enterFieldListDeclaration(
- TorqueParser::FieldListDeclarationContext* /*ctx*/) override {}
- void exitFieldListDeclaration(
- TorqueParser::FieldListDeclarationContext* /*ctx*/) override {}
-
- void enterExtendsDeclaration(
- TorqueParser::ExtendsDeclarationContext* /*ctx*/) override {}
- void exitExtendsDeclaration(
- TorqueParser::ExtendsDeclarationContext* /*ctx*/) override {}
-
- void enterGeneratesDeclaration(
- TorqueParser::GeneratesDeclarationContext* /*ctx*/) override {}
- void exitGeneratesDeclaration(
- TorqueParser::GeneratesDeclarationContext* /*ctx*/) override {}
-
- void enterConstexprDeclaration(
- TorqueParser::ConstexprDeclarationContext* /*ctx*/) override {}
- void exitConstexprDeclaration(
- TorqueParser::ConstexprDeclarationContext* /*ctx*/) override {}
-
- void enterTypeDeclaration(
- TorqueParser::TypeDeclarationContext* /*ctx*/) override {}
- void exitTypeDeclaration(
- TorqueParser::TypeDeclarationContext* /*ctx*/) override {}
-
- void enterTypeAliasDeclaration(
- TorqueParser::TypeAliasDeclarationContext* /*ctx*/) override {}
- void exitTypeAliasDeclaration(
- TorqueParser::TypeAliasDeclarationContext* /*ctx*/) override {}
-
- void enterExternalBuiltin(
- TorqueParser::ExternalBuiltinContext* /*ctx*/) override {}
- void exitExternalBuiltin(
- TorqueParser::ExternalBuiltinContext* /*ctx*/) override {}
-
- void enterExternalMacro(
- TorqueParser::ExternalMacroContext* /*ctx*/) override {}
- void exitExternalMacro(TorqueParser::ExternalMacroContext* /*ctx*/) override {
- }
-
- void enterExternalRuntime(
- TorqueParser::ExternalRuntimeContext* /*ctx*/) override {}
- void exitExternalRuntime(
- TorqueParser::ExternalRuntimeContext* /*ctx*/) override {}
-
- void enterBuiltinDeclaration(
- TorqueParser::BuiltinDeclarationContext* /*ctx*/) override {}
- void exitBuiltinDeclaration(
- TorqueParser::BuiltinDeclarationContext* /*ctx*/) override {}
-
- void enterGenericSpecialization(
- TorqueParser::GenericSpecializationContext* /*ctx*/) override {}
- void exitGenericSpecialization(
- TorqueParser::GenericSpecializationContext* /*ctx*/) override {}
-
- void enterMacroDeclaration(
- TorqueParser::MacroDeclarationContext* /*ctx*/) override {}
- void exitMacroDeclaration(
- TorqueParser::MacroDeclarationContext* /*ctx*/) override {}
-
- void enterExternConstDeclaration(
- TorqueParser::ExternConstDeclarationContext* /*ctx*/) override {}
- void exitExternConstDeclaration(
- TorqueParser::ExternConstDeclarationContext* /*ctx*/) override {}
-
- void enterConstDeclaration(
- TorqueParser::ConstDeclarationContext* /*ctx*/) override {}
- void exitConstDeclaration(
- TorqueParser::ConstDeclarationContext* /*ctx*/) override {}
-
- void enterStructDeclaration(
- TorqueParser::StructDeclarationContext* /*ctx*/) override {}
- void exitStructDeclaration(
- TorqueParser::StructDeclarationContext* /*ctx*/) override {}
-
- void enterDeclaration(TorqueParser::DeclarationContext* /*ctx*/) override {}
- void exitDeclaration(TorqueParser::DeclarationContext* /*ctx*/) override {}
-
- void enterModuleDeclaration(
- TorqueParser::ModuleDeclarationContext* /*ctx*/) override {}
- void exitModuleDeclaration(
- TorqueParser::ModuleDeclarationContext* /*ctx*/) override {}
-
- void enterFile(TorqueParser::FileContext* /*ctx*/) override {}
- void exitFile(TorqueParser::FileContext* /*ctx*/) override {}
-
- void enterEveryRule(antlr4::ParserRuleContext* /*ctx*/) override {}
- void exitEveryRule(antlr4::ParserRuleContext* /*ctx*/) override {}
- void visitTerminal(antlr4::tree::TerminalNode* /*node*/) override {}
- void visitErrorNode(antlr4::tree::ErrorNode* /*node*/) override {}
-};
-
-#endif // V8_TORQUE_TORQUEBASELISTENER_H_
diff --git a/deps/v8/src/torque/TorqueBaseVisitor.h b/deps/v8/src/torque/TorqueBaseVisitor.h
deleted file mode 100644
index df84a2ead5..0000000000
--- a/deps/v8/src/torque/TorqueBaseVisitor.h
+++ /dev/null
@@ -1,389 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-#ifndef V8_TORQUE_TORQUEBASEVISITOR_H_
-#define V8_TORQUE_TORQUEBASEVISITOR_H_
-
-// Generated from Torque.g4 by ANTLR 4.7.1
-
-#pragma once
-
-#include "./antlr4-runtime.h"
-#include "TorqueVisitor.h"
-
-/**
- * This class provides an empty implementation of TorqueVisitor, which can be
- * extended to create a visitor which only needs to handle a subset of the
- * available methods.
- */
-class TorqueBaseVisitor : public TorqueVisitor {
- public:
- antlrcpp::Any visitType(TorqueParser::TypeContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitTypeList(TorqueParser::TypeListContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitGenericSpecializationTypeList(
- TorqueParser::GenericSpecializationTypeListContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitOptionalGenericTypeList(
- TorqueParser::OptionalGenericTypeListContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitTypeListMaybeVarArgs(
- TorqueParser::TypeListMaybeVarArgsContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitLabelParameter(
- TorqueParser::LabelParameterContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitOptionalType(
- TorqueParser::OptionalTypeContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitOptionalLabelList(
- TorqueParser::OptionalLabelListContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitOptionalOtherwise(
- TorqueParser::OptionalOtherwiseContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitParameter(TorqueParser::ParameterContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitParameterList(
- TorqueParser::ParameterListContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitLabelDeclaration(
- TorqueParser::LabelDeclarationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitExpression(TorqueParser::ExpressionContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitConditionalExpression(
- TorqueParser::ConditionalExpressionContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitLogicalORExpression(
- TorqueParser::LogicalORExpressionContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitLogicalANDExpression(
- TorqueParser::LogicalANDExpressionContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitBitwiseExpression(
- TorqueParser::BitwiseExpressionContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitEqualityExpression(
- TorqueParser::EqualityExpressionContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitRelationalExpression(
- TorqueParser::RelationalExpressionContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitShiftExpression(
- TorqueParser::ShiftExpressionContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitAdditiveExpression(
- TorqueParser::AdditiveExpressionContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitMultiplicativeExpression(
- TorqueParser::MultiplicativeExpressionContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitUnaryExpression(
- TorqueParser::UnaryExpressionContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitLocationExpression(
- TorqueParser::LocationExpressionContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitIncrementDecrement(
- TorqueParser::IncrementDecrementContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitAssignment(TorqueParser::AssignmentContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitAssignmentExpression(
- TorqueParser::AssignmentExpressionContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitStructExpression(
- TorqueParser::StructExpressionContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitFunctionPointerExpression(
- TorqueParser::FunctionPointerExpressionContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitPrimaryExpression(
- TorqueParser::PrimaryExpressionContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitForInitialization(
- TorqueParser::ForInitializationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitForLoop(TorqueParser::ForLoopContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitRangeSpecifier(
- TorqueParser::RangeSpecifierContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitForOfRange(TorqueParser::ForOfRangeContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitForOfLoop(TorqueParser::ForOfLoopContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitArgument(TorqueParser::ArgumentContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitArgumentList(
- TorqueParser::ArgumentListContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitHelperCall(TorqueParser::HelperCallContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitLabelReference(
- TorqueParser::LabelReferenceContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitVariableDeclaration(
- TorqueParser::VariableDeclarationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitVariableDeclarationWithInitialization(
- TorqueParser::VariableDeclarationWithInitializationContext* ctx)
- override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitHelperCallStatement(
- TorqueParser::HelperCallStatementContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitExpressionStatement(
- TorqueParser::ExpressionStatementContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitIfStatement(
- TorqueParser::IfStatementContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitWhileLoop(TorqueParser::WhileLoopContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitReturnStatement(
- TorqueParser::ReturnStatementContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitBreakStatement(
- TorqueParser::BreakStatementContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitContinueStatement(
- TorqueParser::ContinueStatementContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitGotoStatement(
- TorqueParser::GotoStatementContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitHandlerWithStatement(
- TorqueParser::HandlerWithStatementContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitTryLabelStatement(
- TorqueParser::TryLabelStatementContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitDiagnosticStatement(
- TorqueParser::DiagnosticStatementContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitStatement(TorqueParser::StatementContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitStatementList(
- TorqueParser::StatementListContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitStatementScope(
- TorqueParser::StatementScopeContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitStatementBlock(
- TorqueParser::StatementBlockContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitHelperBody(TorqueParser::HelperBodyContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitFieldDeclaration(
- TorqueParser::FieldDeclarationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitFieldListDeclaration(
- TorqueParser::FieldListDeclarationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitExtendsDeclaration(
- TorqueParser::ExtendsDeclarationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitGeneratesDeclaration(
- TorqueParser::GeneratesDeclarationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitConstexprDeclaration(
- TorqueParser::ConstexprDeclarationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitTypeDeclaration(
- TorqueParser::TypeDeclarationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitTypeAliasDeclaration(
- TorqueParser::TypeAliasDeclarationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitExternalBuiltin(
- TorqueParser::ExternalBuiltinContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitExternalMacro(
- TorqueParser::ExternalMacroContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitExternalRuntime(
- TorqueParser::ExternalRuntimeContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitBuiltinDeclaration(
- TorqueParser::BuiltinDeclarationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitGenericSpecialization(
- TorqueParser::GenericSpecializationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitMacroDeclaration(
- TorqueParser::MacroDeclarationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitExternConstDeclaration(
- TorqueParser::ExternConstDeclarationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitConstDeclaration(
- TorqueParser::ConstDeclarationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitStructDeclaration(
- TorqueParser::StructDeclarationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitDeclaration(
- TorqueParser::DeclarationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitModuleDeclaration(
- TorqueParser::ModuleDeclarationContext* ctx) override {
- return visitChildren(ctx);
- }
-
- antlrcpp::Any visitFile(TorqueParser::FileContext* ctx) override {
- return visitChildren(ctx);
- }
-};
-
-#endif // V8_TORQUE_TORQUEBASEVISITOR_H_
diff --git a/deps/v8/src/torque/TorqueLexer.cpp b/deps/v8/src/torque/TorqueLexer.cpp
deleted file mode 100644
index b48f0cbf78..0000000000
--- a/deps/v8/src/torque/TorqueLexer.cpp
+++ /dev/null
@@ -1,988 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Generated from Torque.g4 by ANTLR 4.7.1
-
-#include "TorqueLexer.h"
-
-using namespace antlr4;
-
-TorqueLexer::TorqueLexer(CharStream* input) : Lexer(input) {
- _interpreter = new atn::LexerATNSimulator(this, _atn, _decisionToDFA,
- _sharedContextCache);
-}
-
-TorqueLexer::~TorqueLexer() { delete _interpreter; }
-
-std::string TorqueLexer::getGrammarFileName() const { return "Torque.g4"; }
-
-const std::vector<std::string>& TorqueLexer::getRuleNames() const {
- return _ruleNames;
-}
-
-const std::vector<std::string>& TorqueLexer::getChannelNames() const {
- return _channelNames;
-}
-
-const std::vector<std::string>& TorqueLexer::getModeNames() const {
- return _modeNames;
-}
-
-const std::vector<std::string>& TorqueLexer::getTokenNames() const {
- return _tokenNames;
-}
-
-dfa::Vocabulary& TorqueLexer::getVocabulary() const { return _vocabulary; }
-
-const std::vector<uint16_t> TorqueLexer::getSerializedATN() const {
- return _serializedATN;
-}
-
-const atn::ATN& TorqueLexer::getATN() const { return _atn; }
-
-// Static vars and initialization.
-std::vector<dfa::DFA> TorqueLexer::_decisionToDFA;
-atn::PredictionContextCache TorqueLexer::_sharedContextCache;
-
-// We own the ATN which in turn owns the ATN states.
-atn::ATN TorqueLexer::_atn;
-std::vector<uint16_t> TorqueLexer::_serializedATN;
-
-std::vector<std::string> TorqueLexer::_ruleNames = {u8"T__0",
- u8"T__1",
- u8"T__2",
- u8"T__3",
- u8"T__4",
- u8"T__5",
- u8"T__6",
- u8"T__7",
- u8"T__8",
- u8"T__9",
- u8"T__10",
- u8"T__11",
- u8"T__12",
- u8"T__13",
- u8"T__14",
- u8"T__15",
- u8"T__16",
- u8"T__17",
- u8"T__18",
- u8"T__19",
- u8"T__20",
- u8"MACRO",
- u8"BUILTIN",
- u8"RUNTIME",
- u8"MODULE",
- u8"JAVASCRIPT",
- u8"DEFERRED",
- u8"IF",
- u8"FOR",
- u8"WHILE",
- u8"RETURN",
- u8"CONSTEXPR",
- u8"CONTINUE",
- u8"BREAK",
- u8"GOTO",
- u8"OTHERWISE",
- u8"TRY",
- u8"LABEL",
- u8"LABELS",
- u8"TAIL",
- u8"ISNT",
- u8"IS",
- u8"LET",
- u8"CONST",
- u8"EXTERN",
- u8"ASSERT_TOKEN",
- u8"CHECK_TOKEN",
- u8"UNREACHABLE_TOKEN",
- u8"DEBUG_TOKEN",
- u8"ASSIGNMENT",
- u8"ASSIGNMENT_OPERATOR",
- u8"EQUAL",
- u8"PLUS",
- u8"MINUS",
- u8"MULTIPLY",
- u8"DIVIDE",
- u8"MODULO",
- u8"BIT_OR",
- u8"BIT_AND",
- u8"BIT_NOT",
- u8"MAX",
- u8"MIN",
- u8"NOT_EQUAL",
- u8"LESS_THAN",
- u8"LESS_THAN_EQUAL",
- u8"GREATER_THAN",
- u8"GREATER_THAN_EQUAL",
- u8"SHIFT_LEFT",
- u8"SHIFT_RIGHT",
- u8"SHIFT_RIGHT_ARITHMETIC",
- u8"VARARGS",
- u8"EQUALITY_OPERATOR",
- u8"INCREMENT",
- u8"DECREMENT",
- u8"NOT",
- u8"STRING_LITERAL",
- u8"ESCAPE",
- u8"IDENTIFIER",
- u8"WS",
- u8"BLOCK_COMMENT",
- u8"LINE_COMMENT",
- u8"DECIMAL_DIGIT",
- u8"DECIMAL_INTEGER_LITERAL",
- u8"EXPONENT_PART",
- u8"DECIMAL_LITERAL"};
-
-std::vector<std::string> TorqueLexer::_channelNames = {"DEFAULT_TOKEN_CHANNEL",
- "HIDDEN"};
-
-std::vector<std::string> TorqueLexer::_modeNames = {u8"DEFAULT_MODE"};
-
-std::vector<std::string> TorqueLexer::_literalNames = {"",
- u8"'('",
- u8"')'",
- u8"'=>'",
- u8"','",
- u8"':'",
- u8"'type'",
- u8"'?'",
- u8"'||'",
- u8"'&&'",
- u8"'.'",
- u8"'['",
- u8"']'",
- u8"'{'",
- u8"'}'",
- u8"';'",
- u8"'of'",
- u8"'else'",
- u8"'extends'",
- u8"'generates'",
- u8"'operator'",
- u8"'struct'",
- u8"'macro'",
- u8"'builtin'",
- u8"'runtime'",
- u8"'module'",
- u8"'javascript'",
- u8"'deferred'",
- u8"'if'",
- u8"'for'",
- u8"'while'",
- u8"'return'",
- u8"'constexpr'",
- u8"'continue'",
- u8"'break'",
- u8"'goto'",
- u8"'otherwise'",
- u8"'try'",
- u8"'label'",
- u8"'labels'",
- u8"'tail'",
- u8"'isnt'",
- u8"'is'",
- u8"'let'",
- u8"'const'",
- u8"'extern'",
- u8"'assert'",
- u8"'check'",
- u8"'unreachable'",
- u8"'debug'",
- u8"'='",
- "",
- u8"'=='",
- u8"'+'",
- u8"'-'",
- u8"'*'",
- u8"'/'",
- u8"'%'",
- u8"'|'",
- u8"'&'",
- u8"'~'",
- u8"'max'",
- u8"'min'",
- u8"'!='",
- u8"'<'",
- u8"'<='",
- u8"'>'",
- u8"'>='",
- u8"'<<'",
- u8"'>>'",
- u8"'>>>'",
- u8"'...'",
- "",
- u8"'++'",
- u8"'--'",
- u8"'!'"};
-
-std::vector<std::string> TorqueLexer::_symbolicNames = {
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- u8"MACRO",
- u8"BUILTIN",
- u8"RUNTIME",
- u8"MODULE",
- u8"JAVASCRIPT",
- u8"DEFERRED",
- u8"IF",
- u8"FOR",
- u8"WHILE",
- u8"RETURN",
- u8"CONSTEXPR",
- u8"CONTINUE",
- u8"BREAK",
- u8"GOTO",
- u8"OTHERWISE",
- u8"TRY",
- u8"LABEL",
- u8"LABELS",
- u8"TAIL",
- u8"ISNT",
- u8"IS",
- u8"LET",
- u8"CONST",
- u8"EXTERN",
- u8"ASSERT_TOKEN",
- u8"CHECK_TOKEN",
- u8"UNREACHABLE_TOKEN",
- u8"DEBUG_TOKEN",
- u8"ASSIGNMENT",
- u8"ASSIGNMENT_OPERATOR",
- u8"EQUAL",
- u8"PLUS",
- u8"MINUS",
- u8"MULTIPLY",
- u8"DIVIDE",
- u8"MODULO",
- u8"BIT_OR",
- u8"BIT_AND",
- u8"BIT_NOT",
- u8"MAX",
- u8"MIN",
- u8"NOT_EQUAL",
- u8"LESS_THAN",
- u8"LESS_THAN_EQUAL",
- u8"GREATER_THAN",
- u8"GREATER_THAN_EQUAL",
- u8"SHIFT_LEFT",
- u8"SHIFT_RIGHT",
- u8"SHIFT_RIGHT_ARITHMETIC",
- u8"VARARGS",
- u8"EQUALITY_OPERATOR",
- u8"INCREMENT",
- u8"DECREMENT",
- u8"NOT",
- u8"STRING_LITERAL",
- u8"IDENTIFIER",
- u8"WS",
- u8"BLOCK_COMMENT",
- u8"LINE_COMMENT",
- u8"DECIMAL_LITERAL"};
-
-dfa::Vocabulary TorqueLexer::_vocabulary(_literalNames, _symbolicNames);
-
-std::vector<std::string> TorqueLexer::_tokenNames;
-
-TorqueLexer::Initializer::Initializer() {
- // This code could be in a static initializer lambda, but VS doesn't allow
- // access to private class members from there.
- for (size_t i = 0; i < _symbolicNames.size(); ++i) {
- std::string name = _vocabulary.getLiteralName(i);
- if (name.empty()) {
- name = _vocabulary.getSymbolicName(i);
- }
-
- if (name.empty()) {
- _tokenNames.push_back("<INVALID>");
- } else {
- _tokenNames.push_back(name);
- }
- }
-
- _serializedATN = {
- 0x3, 0x608b, 0xa72a, 0x8133, 0xb9ed, 0x417c, 0x3be7, 0x7786, 0x5964,
- 0x2, 0x53, 0x299, 0x8, 0x1, 0x4, 0x2, 0x9, 0x2,
- 0x4, 0x3, 0x9, 0x3, 0x4, 0x4, 0x9, 0x4, 0x4,
- 0x5, 0x9, 0x5, 0x4, 0x6, 0x9, 0x6, 0x4, 0x7,
- 0x9, 0x7, 0x4, 0x8, 0x9, 0x8, 0x4, 0x9, 0x9,
- 0x9, 0x4, 0xa, 0x9, 0xa, 0x4, 0xb, 0x9, 0xb,
- 0x4, 0xc, 0x9, 0xc, 0x4, 0xd, 0x9, 0xd, 0x4,
- 0xe, 0x9, 0xe, 0x4, 0xf, 0x9, 0xf, 0x4, 0x10,
- 0x9, 0x10, 0x4, 0x11, 0x9, 0x11, 0x4, 0x12, 0x9,
- 0x12, 0x4, 0x13, 0x9, 0x13, 0x4, 0x14, 0x9, 0x14,
- 0x4, 0x15, 0x9, 0x15, 0x4, 0x16, 0x9, 0x16, 0x4,
- 0x17, 0x9, 0x17, 0x4, 0x18, 0x9, 0x18, 0x4, 0x19,
- 0x9, 0x19, 0x4, 0x1a, 0x9, 0x1a, 0x4, 0x1b, 0x9,
- 0x1b, 0x4, 0x1c, 0x9, 0x1c, 0x4, 0x1d, 0x9, 0x1d,
- 0x4, 0x1e, 0x9, 0x1e, 0x4, 0x1f, 0x9, 0x1f, 0x4,
- 0x20, 0x9, 0x20, 0x4, 0x21, 0x9, 0x21, 0x4, 0x22,
- 0x9, 0x22, 0x4, 0x23, 0x9, 0x23, 0x4, 0x24, 0x9,
- 0x24, 0x4, 0x25, 0x9, 0x25, 0x4, 0x26, 0x9, 0x26,
- 0x4, 0x27, 0x9, 0x27, 0x4, 0x28, 0x9, 0x28, 0x4,
- 0x29, 0x9, 0x29, 0x4, 0x2a, 0x9, 0x2a, 0x4, 0x2b,
- 0x9, 0x2b, 0x4, 0x2c, 0x9, 0x2c, 0x4, 0x2d, 0x9,
- 0x2d, 0x4, 0x2e, 0x9, 0x2e, 0x4, 0x2f, 0x9, 0x2f,
- 0x4, 0x30, 0x9, 0x30, 0x4, 0x31, 0x9, 0x31, 0x4,
- 0x32, 0x9, 0x32, 0x4, 0x33, 0x9, 0x33, 0x4, 0x34,
- 0x9, 0x34, 0x4, 0x35, 0x9, 0x35, 0x4, 0x36, 0x9,
- 0x36, 0x4, 0x37, 0x9, 0x37, 0x4, 0x38, 0x9, 0x38,
- 0x4, 0x39, 0x9, 0x39, 0x4, 0x3a, 0x9, 0x3a, 0x4,
- 0x3b, 0x9, 0x3b, 0x4, 0x3c, 0x9, 0x3c, 0x4, 0x3d,
- 0x9, 0x3d, 0x4, 0x3e, 0x9, 0x3e, 0x4, 0x3f, 0x9,
- 0x3f, 0x4, 0x40, 0x9, 0x40, 0x4, 0x41, 0x9, 0x41,
- 0x4, 0x42, 0x9, 0x42, 0x4, 0x43, 0x9, 0x43, 0x4,
- 0x44, 0x9, 0x44, 0x4, 0x45, 0x9, 0x45, 0x4, 0x46,
- 0x9, 0x46, 0x4, 0x47, 0x9, 0x47, 0x4, 0x48, 0x9,
- 0x48, 0x4, 0x49, 0x9, 0x49, 0x4, 0x4a, 0x9, 0x4a,
- 0x4, 0x4b, 0x9, 0x4b, 0x4, 0x4c, 0x9, 0x4c, 0x4,
- 0x4d, 0x9, 0x4d, 0x4, 0x4e, 0x9, 0x4e, 0x4, 0x4f,
- 0x9, 0x4f, 0x4, 0x50, 0x9, 0x50, 0x4, 0x51, 0x9,
- 0x51, 0x4, 0x52, 0x9, 0x52, 0x4, 0x53, 0x9, 0x53,
- 0x4, 0x54, 0x9, 0x54, 0x4, 0x55, 0x9, 0x55, 0x4,
- 0x56, 0x9, 0x56, 0x3, 0x2, 0x3, 0x2, 0x3, 0x3,
- 0x3, 0x3, 0x3, 0x4, 0x3, 0x4, 0x3, 0x4, 0x3,
- 0x5, 0x3, 0x5, 0x3, 0x6, 0x3, 0x6, 0x3, 0x7,
- 0x3, 0x7, 0x3, 0x7, 0x3, 0x7, 0x3, 0x7, 0x3,
- 0x8, 0x3, 0x8, 0x3, 0x9, 0x3, 0x9, 0x3, 0x9,
- 0x3, 0xa, 0x3, 0xa, 0x3, 0xa, 0x3, 0xb, 0x3,
- 0xb, 0x3, 0xc, 0x3, 0xc, 0x3, 0xd, 0x3, 0xd,
- 0x3, 0xe, 0x3, 0xe, 0x3, 0xf, 0x3, 0xf, 0x3,
- 0x10, 0x3, 0x10, 0x3, 0x11, 0x3, 0x11, 0x3, 0x11,
- 0x3, 0x12, 0x3, 0x12, 0x3, 0x12, 0x3, 0x12, 0x3,
- 0x12, 0x3, 0x13, 0x3, 0x13, 0x3, 0x13, 0x3, 0x13,
- 0x3, 0x13, 0x3, 0x13, 0x3, 0x13, 0x3, 0x13, 0x3,
- 0x14, 0x3, 0x14, 0x3, 0x14, 0x3, 0x14, 0x3, 0x14,
- 0x3, 0x14, 0x3, 0x14, 0x3, 0x14, 0x3, 0x14, 0x3,
- 0x14, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15,
- 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3,
- 0x15, 0x3, 0x16, 0x3, 0x16, 0x3, 0x16, 0x3, 0x16,
- 0x3, 0x16, 0x3, 0x16, 0x3, 0x16, 0x3, 0x17, 0x3,
- 0x17, 0x3, 0x17, 0x3, 0x17, 0x3, 0x17, 0x3, 0x17,
- 0x3, 0x18, 0x3, 0x18, 0x3, 0x18, 0x3, 0x18, 0x3,
- 0x18, 0x3, 0x18, 0x3, 0x18, 0x3, 0x18, 0x3, 0x19,
- 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3,
- 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x1a, 0x3, 0x1a,
- 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x3,
- 0x1a, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b,
- 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x3,
- 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1c, 0x3, 0x1c,
- 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3,
- 0x1c, 0x3, 0x1c, 0x3, 0x1c, 0x3, 0x1d, 0x3, 0x1d,
- 0x3, 0x1d, 0x3, 0x1e, 0x3, 0x1e, 0x3, 0x1e, 0x3,
- 0x1e, 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x1f,
- 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x20, 0x3, 0x20, 0x3,
- 0x20, 0x3, 0x20, 0x3, 0x20, 0x3, 0x20, 0x3, 0x20,
- 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3,
- 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21,
- 0x3, 0x21, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x3,
- 0x22, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22,
- 0x3, 0x22, 0x3, 0x23, 0x3, 0x23, 0x3, 0x23, 0x3,
- 0x23, 0x3, 0x23, 0x3, 0x23, 0x3, 0x24, 0x3, 0x24,
- 0x3, 0x24, 0x3, 0x24, 0x3, 0x24, 0x3, 0x25, 0x3,
- 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, 0x25,
- 0x3, 0x25, 0x3, 0x25, 0x3, 0x25, 0x3, 0x25, 0x3,
- 0x26, 0x3, 0x26, 0x3, 0x26, 0x3, 0x26, 0x3, 0x27,
- 0x3, 0x27, 0x3, 0x27, 0x3, 0x27, 0x3, 0x27, 0x3,
- 0x27, 0x3, 0x28, 0x3, 0x28, 0x3, 0x28, 0x3, 0x28,
- 0x3, 0x28, 0x3, 0x28, 0x3, 0x28, 0x3, 0x29, 0x3,
- 0x29, 0x3, 0x29, 0x3, 0x29, 0x3, 0x29, 0x3, 0x2a,
- 0x3, 0x2a, 0x3, 0x2a, 0x3, 0x2a, 0x3, 0x2a, 0x3,
- 0x2b, 0x3, 0x2b, 0x3, 0x2b, 0x3, 0x2c, 0x3, 0x2c,
- 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2d, 0x3, 0x2d, 0x3,
- 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2e,
- 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x3,
- 0x2e, 0x3, 0x2e, 0x3, 0x2f, 0x3, 0x2f, 0x3, 0x2f,
- 0x3, 0x2f, 0x3, 0x2f, 0x3, 0x2f, 0x3, 0x2f, 0x3,
- 0x30, 0x3, 0x30, 0x3, 0x30, 0x3, 0x30, 0x3, 0x30,
- 0x3, 0x30, 0x3, 0x31, 0x3, 0x31, 0x3, 0x31, 0x3,
- 0x31, 0x3, 0x31, 0x3, 0x31, 0x3, 0x31, 0x3, 0x31,
- 0x3, 0x31, 0x3, 0x31, 0x3, 0x31, 0x3, 0x31, 0x3,
- 0x32, 0x3, 0x32, 0x3, 0x32, 0x3, 0x32, 0x3, 0x32,
- 0x3, 0x32, 0x3, 0x33, 0x3, 0x33, 0x3, 0x34, 0x3,
- 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34,
- 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3,
- 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34,
- 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3,
- 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34, 0x3, 0x34,
- 0x3, 0x34, 0x3, 0x34, 0x5, 0x34, 0x1d3, 0xa, 0x34,
- 0x3, 0x35, 0x3, 0x35, 0x3, 0x35, 0x3, 0x36, 0x3,
- 0x36, 0x3, 0x37, 0x3, 0x37, 0x3, 0x38, 0x3, 0x38,
- 0x3, 0x39, 0x3, 0x39, 0x3, 0x3a, 0x3, 0x3a, 0x3,
- 0x3b, 0x3, 0x3b, 0x3, 0x3c, 0x3, 0x3c, 0x3, 0x3d,
- 0x3, 0x3d, 0x3, 0x3e, 0x3, 0x3e, 0x3, 0x3e, 0x3,
- 0x3e, 0x3, 0x3f, 0x3, 0x3f, 0x3, 0x3f, 0x3, 0x3f,
- 0x3, 0x40, 0x3, 0x40, 0x3, 0x40, 0x3, 0x41, 0x3,
- 0x41, 0x3, 0x42, 0x3, 0x42, 0x3, 0x42, 0x3, 0x43,
- 0x3, 0x43, 0x3, 0x44, 0x3, 0x44, 0x3, 0x44, 0x3,
- 0x45, 0x3, 0x45, 0x3, 0x45, 0x3, 0x46, 0x3, 0x46,
- 0x3, 0x46, 0x3, 0x47, 0x3, 0x47, 0x3, 0x47, 0x3,
- 0x47, 0x3, 0x48, 0x3, 0x48, 0x3, 0x48, 0x3, 0x48,
- 0x3, 0x49, 0x3, 0x49, 0x5, 0x49, 0x20d, 0xa, 0x49,
- 0x3, 0x4a, 0x3, 0x4a, 0x3, 0x4a, 0x3, 0x4b, 0x3,
- 0x4b, 0x3, 0x4b, 0x3, 0x4c, 0x3, 0x4c, 0x3, 0x4d,
- 0x3, 0x4d, 0x3, 0x4d, 0x7, 0x4d, 0x21a, 0xa, 0x4d,
- 0xc, 0x4d, 0xe, 0x4d, 0x21d, 0xb, 0x4d, 0x3, 0x4d,
- 0x3, 0x4d, 0x3, 0x4d, 0x3, 0x4d, 0x7, 0x4d, 0x223,
- 0xa, 0x4d, 0xc, 0x4d, 0xe, 0x4d, 0x226, 0xb, 0x4d,
- 0x3, 0x4d, 0x5, 0x4d, 0x229, 0xa, 0x4d, 0x3, 0x4e,
- 0x3, 0x4e, 0x3, 0x4e, 0x3, 0x4f, 0x3, 0x4f, 0x7,
- 0x4f, 0x230, 0xa, 0x4f, 0xc, 0x4f, 0xe, 0x4f, 0x233,
- 0xb, 0x4f, 0x3, 0x50, 0x6, 0x50, 0x236, 0xa, 0x50,
- 0xd, 0x50, 0xe, 0x50, 0x237, 0x3, 0x50, 0x3, 0x50,
- 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x7,
- 0x51, 0x240, 0xa, 0x51, 0xc, 0x51, 0xe, 0x51, 0x243,
- 0xb, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3, 0x51, 0x5,
- 0x51, 0x248, 0xa, 0x51, 0x3, 0x51, 0x3, 0x51, 0x3,
- 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x52, 0x7, 0x52,
- 0x250, 0xa, 0x52, 0xc, 0x52, 0xe, 0x52, 0x253, 0xb,
- 0x52, 0x3, 0x52, 0x3, 0x52, 0x3, 0x53, 0x3, 0x53,
- 0x3, 0x54, 0x3, 0x54, 0x3, 0x54, 0x7, 0x54, 0x25c,
- 0xa, 0x54, 0xc, 0x54, 0xe, 0x54, 0x25f, 0xb, 0x54,
- 0x5, 0x54, 0x261, 0xa, 0x54, 0x3, 0x55, 0x3, 0x55,
- 0x5, 0x55, 0x265, 0xa, 0x55, 0x3, 0x55, 0x6, 0x55,
- 0x268, 0xa, 0x55, 0xd, 0x55, 0xe, 0x55, 0x269, 0x3,
- 0x56, 0x5, 0x56, 0x26d, 0xa, 0x56, 0x3, 0x56, 0x3,
- 0x56, 0x3, 0x56, 0x7, 0x56, 0x272, 0xa, 0x56, 0xc,
- 0x56, 0xe, 0x56, 0x275, 0xb, 0x56, 0x3, 0x56, 0x5,
- 0x56, 0x278, 0xa, 0x56, 0x3, 0x56, 0x5, 0x56, 0x27b,
- 0xa, 0x56, 0x3, 0x56, 0x3, 0x56, 0x6, 0x56, 0x27f,
- 0xa, 0x56, 0xd, 0x56, 0xe, 0x56, 0x280, 0x3, 0x56,
- 0x5, 0x56, 0x284, 0xa, 0x56, 0x3, 0x56, 0x5, 0x56,
- 0x287, 0xa, 0x56, 0x3, 0x56, 0x3, 0x56, 0x5, 0x56,
- 0x28b, 0xa, 0x56, 0x3, 0x56, 0x5, 0x56, 0x28e, 0xa,
- 0x56, 0x3, 0x56, 0x3, 0x56, 0x3, 0x56, 0x3, 0x56,
- 0x6, 0x56, 0x294, 0xa, 0x56, 0xd, 0x56, 0xe, 0x56,
- 0x295, 0x5, 0x56, 0x298, 0xa, 0x56, 0x3, 0x241, 0x2,
- 0x57, 0x3, 0x3, 0x5, 0x4, 0x7, 0x5, 0x9, 0x6,
- 0xb, 0x7, 0xd, 0x8, 0xf, 0x9, 0x11, 0xa, 0x13,
- 0xb, 0x15, 0xc, 0x17, 0xd, 0x19, 0xe, 0x1b, 0xf,
- 0x1d, 0x10, 0x1f, 0x11, 0x21, 0x12, 0x23, 0x13, 0x25,
- 0x14, 0x27, 0x15, 0x29, 0x16, 0x2b, 0x17, 0x2d, 0x18,
- 0x2f, 0x19, 0x31, 0x1a, 0x33, 0x1b, 0x35, 0x1c, 0x37,
- 0x1d, 0x39, 0x1e, 0x3b, 0x1f, 0x3d, 0x20, 0x3f, 0x21,
- 0x41, 0x22, 0x43, 0x23, 0x45, 0x24, 0x47, 0x25, 0x49,
- 0x26, 0x4b, 0x27, 0x4d, 0x28, 0x4f, 0x29, 0x51, 0x2a,
- 0x53, 0x2b, 0x55, 0x2c, 0x57, 0x2d, 0x59, 0x2e, 0x5b,
- 0x2f, 0x5d, 0x30, 0x5f, 0x31, 0x61, 0x32, 0x63, 0x33,
- 0x65, 0x34, 0x67, 0x35, 0x69, 0x36, 0x6b, 0x37, 0x6d,
- 0x38, 0x6f, 0x39, 0x71, 0x3a, 0x73, 0x3b, 0x75, 0x3c,
- 0x77, 0x3d, 0x79, 0x3e, 0x7b, 0x3f, 0x7d, 0x40, 0x7f,
- 0x41, 0x81, 0x42, 0x83, 0x43, 0x85, 0x44, 0x87, 0x45,
- 0x89, 0x46, 0x8b, 0x47, 0x8d, 0x48, 0x8f, 0x49, 0x91,
- 0x4a, 0x93, 0x4b, 0x95, 0x4c, 0x97, 0x4d, 0x99, 0x4e,
- 0x9b, 0x2, 0x9d, 0x4f, 0x9f, 0x50, 0xa1, 0x51, 0xa3,
- 0x52, 0xa5, 0x2, 0xa7, 0x2, 0xa9, 0x2, 0xab, 0x53,
- 0x3, 0x2, 0xe, 0x6, 0x2, 0xc, 0xc, 0xf, 0xf,
- 0x24, 0x24, 0x5e, 0x5e, 0x6, 0x2, 0xc, 0xc, 0xf,
- 0xf, 0x29, 0x29, 0x5e, 0x5e, 0x7, 0x2, 0x24, 0x24,
- 0x29, 0x29, 0x5e, 0x5e, 0x70, 0x70, 0x74, 0x74, 0x4,
- 0x2, 0x43, 0x5c, 0x63, 0x7c, 0x6, 0x2, 0x32, 0x3b,
- 0x43, 0x5c, 0x61, 0x61, 0x63, 0x7c, 0x5, 0x2, 0xb,
- 0xc, 0xe, 0xf, 0x22, 0x22, 0x4, 0x2, 0xc, 0xc,
- 0xf, 0xf, 0x3, 0x2, 0x32, 0x3b, 0x3, 0x2, 0x33,
- 0x3b, 0x4, 0x2, 0x47, 0x47, 0x67, 0x67, 0x4, 0x2,
- 0x2d, 0x2d, 0x2f, 0x2f, 0x5, 0x2, 0x32, 0x3b, 0x43,
- 0x48, 0x63, 0x68, 0x2, 0x2ba, 0x2, 0x3, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x5, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x7, 0x3, 0x2, 0x2, 0x2, 0x2, 0x9, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0xb, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0xd, 0x3, 0x2, 0x2, 0x2, 0x2, 0xf, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x11, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x13, 0x3, 0x2, 0x2, 0x2, 0x2, 0x15, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x17, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x19, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1b, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x1d, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x1f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x21, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x23, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x25, 0x3, 0x2, 0x2, 0x2, 0x2, 0x27, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x29, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x2b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x2d, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x2f, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x31, 0x3, 0x2, 0x2, 0x2, 0x2, 0x33, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x35, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x37, 0x3, 0x2, 0x2, 0x2, 0x2, 0x39, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x3b, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x3d, 0x3, 0x2, 0x2, 0x2, 0x2, 0x3f, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x41, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x43, 0x3, 0x2, 0x2, 0x2, 0x2, 0x45, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x47, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x49, 0x3, 0x2, 0x2, 0x2, 0x2, 0x4b, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x4d, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x4f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x51, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x53, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x55, 0x3, 0x2, 0x2, 0x2, 0x2, 0x57, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x59, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x5b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x5d, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x5f, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x61, 0x3, 0x2, 0x2, 0x2, 0x2, 0x63, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x65, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x67, 0x3, 0x2, 0x2, 0x2, 0x2, 0x69, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x6b, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x6d, 0x3, 0x2, 0x2, 0x2, 0x2, 0x6f, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x71, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x73, 0x3, 0x2, 0x2, 0x2, 0x2, 0x75, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x77, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x79, 0x3, 0x2, 0x2, 0x2, 0x2, 0x7b, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x7d, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x7f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x81, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x83, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x85, 0x3, 0x2, 0x2, 0x2, 0x2, 0x87, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x89, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x8b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x8d, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x8f, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x91, 0x3, 0x2, 0x2, 0x2, 0x2, 0x93, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x95, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x97, 0x3, 0x2, 0x2, 0x2, 0x2, 0x99, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0x9d, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0x9f, 0x3, 0x2, 0x2, 0x2, 0x2, 0xa1, 0x3, 0x2,
- 0x2, 0x2, 0x2, 0xa3, 0x3, 0x2, 0x2, 0x2, 0x2,
- 0xab, 0x3, 0x2, 0x2, 0x2, 0x3, 0xad, 0x3, 0x2,
- 0x2, 0x2, 0x5, 0xaf, 0x3, 0x2, 0x2, 0x2, 0x7,
- 0xb1, 0x3, 0x2, 0x2, 0x2, 0x9, 0xb4, 0x3, 0x2,
- 0x2, 0x2, 0xb, 0xb6, 0x3, 0x2, 0x2, 0x2, 0xd,
- 0xb8, 0x3, 0x2, 0x2, 0x2, 0xf, 0xbd, 0x3, 0x2,
- 0x2, 0x2, 0x11, 0xbf, 0x3, 0x2, 0x2, 0x2, 0x13,
- 0xc2, 0x3, 0x2, 0x2, 0x2, 0x15, 0xc5, 0x3, 0x2,
- 0x2, 0x2, 0x17, 0xc7, 0x3, 0x2, 0x2, 0x2, 0x19,
- 0xc9, 0x3, 0x2, 0x2, 0x2, 0x1b, 0xcb, 0x3, 0x2,
- 0x2, 0x2, 0x1d, 0xcd, 0x3, 0x2, 0x2, 0x2, 0x1f,
- 0xcf, 0x3, 0x2, 0x2, 0x2, 0x21, 0xd1, 0x3, 0x2,
- 0x2, 0x2, 0x23, 0xd4, 0x3, 0x2, 0x2, 0x2, 0x25,
- 0xd9, 0x3, 0x2, 0x2, 0x2, 0x27, 0xe1, 0x3, 0x2,
- 0x2, 0x2, 0x29, 0xeb, 0x3, 0x2, 0x2, 0x2, 0x2b,
- 0xf4, 0x3, 0x2, 0x2, 0x2, 0x2d, 0xfb, 0x3, 0x2,
- 0x2, 0x2, 0x2f, 0x101, 0x3, 0x2, 0x2, 0x2, 0x31,
- 0x109, 0x3, 0x2, 0x2, 0x2, 0x33, 0x111, 0x3, 0x2,
- 0x2, 0x2, 0x35, 0x118, 0x3, 0x2, 0x2, 0x2, 0x37,
- 0x123, 0x3, 0x2, 0x2, 0x2, 0x39, 0x12c, 0x3, 0x2,
- 0x2, 0x2, 0x3b, 0x12f, 0x3, 0x2, 0x2, 0x2, 0x3d,
- 0x133, 0x3, 0x2, 0x2, 0x2, 0x3f, 0x139, 0x3, 0x2,
- 0x2, 0x2, 0x41, 0x140, 0x3, 0x2, 0x2, 0x2, 0x43,
- 0x14a, 0x3, 0x2, 0x2, 0x2, 0x45, 0x153, 0x3, 0x2,
- 0x2, 0x2, 0x47, 0x159, 0x3, 0x2, 0x2, 0x2, 0x49,
- 0x15e, 0x3, 0x2, 0x2, 0x2, 0x4b, 0x168, 0x3, 0x2,
- 0x2, 0x2, 0x4d, 0x16c, 0x3, 0x2, 0x2, 0x2, 0x4f,
- 0x172, 0x3, 0x2, 0x2, 0x2, 0x51, 0x179, 0x3, 0x2,
- 0x2, 0x2, 0x53, 0x17e, 0x3, 0x2, 0x2, 0x2, 0x55,
- 0x183, 0x3, 0x2, 0x2, 0x2, 0x57, 0x186, 0x3, 0x2,
- 0x2, 0x2, 0x59, 0x18a, 0x3, 0x2, 0x2, 0x2, 0x5b,
- 0x190, 0x3, 0x2, 0x2, 0x2, 0x5d, 0x197, 0x3, 0x2,
- 0x2, 0x2, 0x5f, 0x19e, 0x3, 0x2, 0x2, 0x2, 0x61,
- 0x1a4, 0x3, 0x2, 0x2, 0x2, 0x63, 0x1b0, 0x3, 0x2,
- 0x2, 0x2, 0x65, 0x1b6, 0x3, 0x2, 0x2, 0x2, 0x67,
- 0x1d2, 0x3, 0x2, 0x2, 0x2, 0x69, 0x1d4, 0x3, 0x2,
- 0x2, 0x2, 0x6b, 0x1d7, 0x3, 0x2, 0x2, 0x2, 0x6d,
- 0x1d9, 0x3, 0x2, 0x2, 0x2, 0x6f, 0x1db, 0x3, 0x2,
- 0x2, 0x2, 0x71, 0x1dd, 0x3, 0x2, 0x2, 0x2, 0x73,
- 0x1df, 0x3, 0x2, 0x2, 0x2, 0x75, 0x1e1, 0x3, 0x2,
- 0x2, 0x2, 0x77, 0x1e3, 0x3, 0x2, 0x2, 0x2, 0x79,
- 0x1e5, 0x3, 0x2, 0x2, 0x2, 0x7b, 0x1e7, 0x3, 0x2,
- 0x2, 0x2, 0x7d, 0x1eb, 0x3, 0x2, 0x2, 0x2, 0x7f,
- 0x1ef, 0x3, 0x2, 0x2, 0x2, 0x81, 0x1f2, 0x3, 0x2,
- 0x2, 0x2, 0x83, 0x1f4, 0x3, 0x2, 0x2, 0x2, 0x85,
- 0x1f7, 0x3, 0x2, 0x2, 0x2, 0x87, 0x1f9, 0x3, 0x2,
- 0x2, 0x2, 0x89, 0x1fc, 0x3, 0x2, 0x2, 0x2, 0x8b,
- 0x1ff, 0x3, 0x2, 0x2, 0x2, 0x8d, 0x202, 0x3, 0x2,
- 0x2, 0x2, 0x8f, 0x206, 0x3, 0x2, 0x2, 0x2, 0x91,
- 0x20c, 0x3, 0x2, 0x2, 0x2, 0x93, 0x20e, 0x3, 0x2,
- 0x2, 0x2, 0x95, 0x211, 0x3, 0x2, 0x2, 0x2, 0x97,
- 0x214, 0x3, 0x2, 0x2, 0x2, 0x99, 0x228, 0x3, 0x2,
- 0x2, 0x2, 0x9b, 0x22a, 0x3, 0x2, 0x2, 0x2, 0x9d,
- 0x22d, 0x3, 0x2, 0x2, 0x2, 0x9f, 0x235, 0x3, 0x2,
- 0x2, 0x2, 0xa1, 0x23b, 0x3, 0x2, 0x2, 0x2, 0xa3,
- 0x24b, 0x3, 0x2, 0x2, 0x2, 0xa5, 0x256, 0x3, 0x2,
- 0x2, 0x2, 0xa7, 0x260, 0x3, 0x2, 0x2, 0x2, 0xa9,
- 0x262, 0x3, 0x2, 0x2, 0x2, 0xab, 0x297, 0x3, 0x2,
- 0x2, 0x2, 0xad, 0xae, 0x7, 0x2a, 0x2, 0x2, 0xae,
- 0x4, 0x3, 0x2, 0x2, 0x2, 0xaf, 0xb0, 0x7, 0x2b,
- 0x2, 0x2, 0xb0, 0x6, 0x3, 0x2, 0x2, 0x2, 0xb1,
- 0xb2, 0x7, 0x3f, 0x2, 0x2, 0xb2, 0xb3, 0x7, 0x40,
- 0x2, 0x2, 0xb3, 0x8, 0x3, 0x2, 0x2, 0x2, 0xb4,
- 0xb5, 0x7, 0x2e, 0x2, 0x2, 0xb5, 0xa, 0x3, 0x2,
- 0x2, 0x2, 0xb6, 0xb7, 0x7, 0x3c, 0x2, 0x2, 0xb7,
- 0xc, 0x3, 0x2, 0x2, 0x2, 0xb8, 0xb9, 0x7, 0x76,
- 0x2, 0x2, 0xb9, 0xba, 0x7, 0x7b, 0x2, 0x2, 0xba,
- 0xbb, 0x7, 0x72, 0x2, 0x2, 0xbb, 0xbc, 0x7, 0x67,
- 0x2, 0x2, 0xbc, 0xe, 0x3, 0x2, 0x2, 0x2, 0xbd,
- 0xbe, 0x7, 0x41, 0x2, 0x2, 0xbe, 0x10, 0x3, 0x2,
- 0x2, 0x2, 0xbf, 0xc0, 0x7, 0x7e, 0x2, 0x2, 0xc0,
- 0xc1, 0x7, 0x7e, 0x2, 0x2, 0xc1, 0x12, 0x3, 0x2,
- 0x2, 0x2, 0xc2, 0xc3, 0x7, 0x28, 0x2, 0x2, 0xc3,
- 0xc4, 0x7, 0x28, 0x2, 0x2, 0xc4, 0x14, 0x3, 0x2,
- 0x2, 0x2, 0xc5, 0xc6, 0x7, 0x30, 0x2, 0x2, 0xc6,
- 0x16, 0x3, 0x2, 0x2, 0x2, 0xc7, 0xc8, 0x7, 0x5d,
- 0x2, 0x2, 0xc8, 0x18, 0x3, 0x2, 0x2, 0x2, 0xc9,
- 0xca, 0x7, 0x5f, 0x2, 0x2, 0xca, 0x1a, 0x3, 0x2,
- 0x2, 0x2, 0xcb, 0xcc, 0x7, 0x7d, 0x2, 0x2, 0xcc,
- 0x1c, 0x3, 0x2, 0x2, 0x2, 0xcd, 0xce, 0x7, 0x7f,
- 0x2, 0x2, 0xce, 0x1e, 0x3, 0x2, 0x2, 0x2, 0xcf,
- 0xd0, 0x7, 0x3d, 0x2, 0x2, 0xd0, 0x20, 0x3, 0x2,
- 0x2, 0x2, 0xd1, 0xd2, 0x7, 0x71, 0x2, 0x2, 0xd2,
- 0xd3, 0x7, 0x68, 0x2, 0x2, 0xd3, 0x22, 0x3, 0x2,
- 0x2, 0x2, 0xd4, 0xd5, 0x7, 0x67, 0x2, 0x2, 0xd5,
- 0xd6, 0x7, 0x6e, 0x2, 0x2, 0xd6, 0xd7, 0x7, 0x75,
- 0x2, 0x2, 0xd7, 0xd8, 0x7, 0x67, 0x2, 0x2, 0xd8,
- 0x24, 0x3, 0x2, 0x2, 0x2, 0xd9, 0xda, 0x7, 0x67,
- 0x2, 0x2, 0xda, 0xdb, 0x7, 0x7a, 0x2, 0x2, 0xdb,
- 0xdc, 0x7, 0x76, 0x2, 0x2, 0xdc, 0xdd, 0x7, 0x67,
- 0x2, 0x2, 0xdd, 0xde, 0x7, 0x70, 0x2, 0x2, 0xde,
- 0xdf, 0x7, 0x66, 0x2, 0x2, 0xdf, 0xe0, 0x7, 0x75,
- 0x2, 0x2, 0xe0, 0x26, 0x3, 0x2, 0x2, 0x2, 0xe1,
- 0xe2, 0x7, 0x69, 0x2, 0x2, 0xe2, 0xe3, 0x7, 0x67,
- 0x2, 0x2, 0xe3, 0xe4, 0x7, 0x70, 0x2, 0x2, 0xe4,
- 0xe5, 0x7, 0x67, 0x2, 0x2, 0xe5, 0xe6, 0x7, 0x74,
- 0x2, 0x2, 0xe6, 0xe7, 0x7, 0x63, 0x2, 0x2, 0xe7,
- 0xe8, 0x7, 0x76, 0x2, 0x2, 0xe8, 0xe9, 0x7, 0x67,
- 0x2, 0x2, 0xe9, 0xea, 0x7, 0x75, 0x2, 0x2, 0xea,
- 0x28, 0x3, 0x2, 0x2, 0x2, 0xeb, 0xec, 0x7, 0x71,
- 0x2, 0x2, 0xec, 0xed, 0x7, 0x72, 0x2, 0x2, 0xed,
- 0xee, 0x7, 0x67, 0x2, 0x2, 0xee, 0xef, 0x7, 0x74,
- 0x2, 0x2, 0xef, 0xf0, 0x7, 0x63, 0x2, 0x2, 0xf0,
- 0xf1, 0x7, 0x76, 0x2, 0x2, 0xf1, 0xf2, 0x7, 0x71,
- 0x2, 0x2, 0xf2, 0xf3, 0x7, 0x74, 0x2, 0x2, 0xf3,
- 0x2a, 0x3, 0x2, 0x2, 0x2, 0xf4, 0xf5, 0x7, 0x75,
- 0x2, 0x2, 0xf5, 0xf6, 0x7, 0x76, 0x2, 0x2, 0xf6,
- 0xf7, 0x7, 0x74, 0x2, 0x2, 0xf7, 0xf8, 0x7, 0x77,
- 0x2, 0x2, 0xf8, 0xf9, 0x7, 0x65, 0x2, 0x2, 0xf9,
- 0xfa, 0x7, 0x76, 0x2, 0x2, 0xfa, 0x2c, 0x3, 0x2,
- 0x2, 0x2, 0xfb, 0xfc, 0x7, 0x6f, 0x2, 0x2, 0xfc,
- 0xfd, 0x7, 0x63, 0x2, 0x2, 0xfd, 0xfe, 0x7, 0x65,
- 0x2, 0x2, 0xfe, 0xff, 0x7, 0x74, 0x2, 0x2, 0xff,
- 0x100, 0x7, 0x71, 0x2, 0x2, 0x100, 0x2e, 0x3, 0x2,
- 0x2, 0x2, 0x101, 0x102, 0x7, 0x64, 0x2, 0x2, 0x102,
- 0x103, 0x7, 0x77, 0x2, 0x2, 0x103, 0x104, 0x7, 0x6b,
- 0x2, 0x2, 0x104, 0x105, 0x7, 0x6e, 0x2, 0x2, 0x105,
- 0x106, 0x7, 0x76, 0x2, 0x2, 0x106, 0x107, 0x7, 0x6b,
- 0x2, 0x2, 0x107, 0x108, 0x7, 0x70, 0x2, 0x2, 0x108,
- 0x30, 0x3, 0x2, 0x2, 0x2, 0x109, 0x10a, 0x7, 0x74,
- 0x2, 0x2, 0x10a, 0x10b, 0x7, 0x77, 0x2, 0x2, 0x10b,
- 0x10c, 0x7, 0x70, 0x2, 0x2, 0x10c, 0x10d, 0x7, 0x76,
- 0x2, 0x2, 0x10d, 0x10e, 0x7, 0x6b, 0x2, 0x2, 0x10e,
- 0x10f, 0x7, 0x6f, 0x2, 0x2, 0x10f, 0x110, 0x7, 0x67,
- 0x2, 0x2, 0x110, 0x32, 0x3, 0x2, 0x2, 0x2, 0x111,
- 0x112, 0x7, 0x6f, 0x2, 0x2, 0x112, 0x113, 0x7, 0x71,
- 0x2, 0x2, 0x113, 0x114, 0x7, 0x66, 0x2, 0x2, 0x114,
- 0x115, 0x7, 0x77, 0x2, 0x2, 0x115, 0x116, 0x7, 0x6e,
- 0x2, 0x2, 0x116, 0x117, 0x7, 0x67, 0x2, 0x2, 0x117,
- 0x34, 0x3, 0x2, 0x2, 0x2, 0x118, 0x119, 0x7, 0x6c,
- 0x2, 0x2, 0x119, 0x11a, 0x7, 0x63, 0x2, 0x2, 0x11a,
- 0x11b, 0x7, 0x78, 0x2, 0x2, 0x11b, 0x11c, 0x7, 0x63,
- 0x2, 0x2, 0x11c, 0x11d, 0x7, 0x75, 0x2, 0x2, 0x11d,
- 0x11e, 0x7, 0x65, 0x2, 0x2, 0x11e, 0x11f, 0x7, 0x74,
- 0x2, 0x2, 0x11f, 0x120, 0x7, 0x6b, 0x2, 0x2, 0x120,
- 0x121, 0x7, 0x72, 0x2, 0x2, 0x121, 0x122, 0x7, 0x76,
- 0x2, 0x2, 0x122, 0x36, 0x3, 0x2, 0x2, 0x2, 0x123,
- 0x124, 0x7, 0x66, 0x2, 0x2, 0x124, 0x125, 0x7, 0x67,
- 0x2, 0x2, 0x125, 0x126, 0x7, 0x68, 0x2, 0x2, 0x126,
- 0x127, 0x7, 0x67, 0x2, 0x2, 0x127, 0x128, 0x7, 0x74,
- 0x2, 0x2, 0x128, 0x129, 0x7, 0x74, 0x2, 0x2, 0x129,
- 0x12a, 0x7, 0x67, 0x2, 0x2, 0x12a, 0x12b, 0x7, 0x66,
- 0x2, 0x2, 0x12b, 0x38, 0x3, 0x2, 0x2, 0x2, 0x12c,
- 0x12d, 0x7, 0x6b, 0x2, 0x2, 0x12d, 0x12e, 0x7, 0x68,
- 0x2, 0x2, 0x12e, 0x3a, 0x3, 0x2, 0x2, 0x2, 0x12f,
- 0x130, 0x7, 0x68, 0x2, 0x2, 0x130, 0x131, 0x7, 0x71,
- 0x2, 0x2, 0x131, 0x132, 0x7, 0x74, 0x2, 0x2, 0x132,
- 0x3c, 0x3, 0x2, 0x2, 0x2, 0x133, 0x134, 0x7, 0x79,
- 0x2, 0x2, 0x134, 0x135, 0x7, 0x6a, 0x2, 0x2, 0x135,
- 0x136, 0x7, 0x6b, 0x2, 0x2, 0x136, 0x137, 0x7, 0x6e,
- 0x2, 0x2, 0x137, 0x138, 0x7, 0x67, 0x2, 0x2, 0x138,
- 0x3e, 0x3, 0x2, 0x2, 0x2, 0x139, 0x13a, 0x7, 0x74,
- 0x2, 0x2, 0x13a, 0x13b, 0x7, 0x67, 0x2, 0x2, 0x13b,
- 0x13c, 0x7, 0x76, 0x2, 0x2, 0x13c, 0x13d, 0x7, 0x77,
- 0x2, 0x2, 0x13d, 0x13e, 0x7, 0x74, 0x2, 0x2, 0x13e,
- 0x13f, 0x7, 0x70, 0x2, 0x2, 0x13f, 0x40, 0x3, 0x2,
- 0x2, 0x2, 0x140, 0x141, 0x7, 0x65, 0x2, 0x2, 0x141,
- 0x142, 0x7, 0x71, 0x2, 0x2, 0x142, 0x143, 0x7, 0x70,
- 0x2, 0x2, 0x143, 0x144, 0x7, 0x75, 0x2, 0x2, 0x144,
- 0x145, 0x7, 0x76, 0x2, 0x2, 0x145, 0x146, 0x7, 0x67,
- 0x2, 0x2, 0x146, 0x147, 0x7, 0x7a, 0x2, 0x2, 0x147,
- 0x148, 0x7, 0x72, 0x2, 0x2, 0x148, 0x149, 0x7, 0x74,
- 0x2, 0x2, 0x149, 0x42, 0x3, 0x2, 0x2, 0x2, 0x14a,
- 0x14b, 0x7, 0x65, 0x2, 0x2, 0x14b, 0x14c, 0x7, 0x71,
- 0x2, 0x2, 0x14c, 0x14d, 0x7, 0x70, 0x2, 0x2, 0x14d,
- 0x14e, 0x7, 0x76, 0x2, 0x2, 0x14e, 0x14f, 0x7, 0x6b,
- 0x2, 0x2, 0x14f, 0x150, 0x7, 0x70, 0x2, 0x2, 0x150,
- 0x151, 0x7, 0x77, 0x2, 0x2, 0x151, 0x152, 0x7, 0x67,
- 0x2, 0x2, 0x152, 0x44, 0x3, 0x2, 0x2, 0x2, 0x153,
- 0x154, 0x7, 0x64, 0x2, 0x2, 0x154, 0x155, 0x7, 0x74,
- 0x2, 0x2, 0x155, 0x156, 0x7, 0x67, 0x2, 0x2, 0x156,
- 0x157, 0x7, 0x63, 0x2, 0x2, 0x157, 0x158, 0x7, 0x6d,
- 0x2, 0x2, 0x158, 0x46, 0x3, 0x2, 0x2, 0x2, 0x159,
- 0x15a, 0x7, 0x69, 0x2, 0x2, 0x15a, 0x15b, 0x7, 0x71,
- 0x2, 0x2, 0x15b, 0x15c, 0x7, 0x76, 0x2, 0x2, 0x15c,
- 0x15d, 0x7, 0x71, 0x2, 0x2, 0x15d, 0x48, 0x3, 0x2,
- 0x2, 0x2, 0x15e, 0x15f, 0x7, 0x71, 0x2, 0x2, 0x15f,
- 0x160, 0x7, 0x76, 0x2, 0x2, 0x160, 0x161, 0x7, 0x6a,
- 0x2, 0x2, 0x161, 0x162, 0x7, 0x67, 0x2, 0x2, 0x162,
- 0x163, 0x7, 0x74, 0x2, 0x2, 0x163, 0x164, 0x7, 0x79,
- 0x2, 0x2, 0x164, 0x165, 0x7, 0x6b, 0x2, 0x2, 0x165,
- 0x166, 0x7, 0x75, 0x2, 0x2, 0x166, 0x167, 0x7, 0x67,
- 0x2, 0x2, 0x167, 0x4a, 0x3, 0x2, 0x2, 0x2, 0x168,
- 0x169, 0x7, 0x76, 0x2, 0x2, 0x169, 0x16a, 0x7, 0x74,
- 0x2, 0x2, 0x16a, 0x16b, 0x7, 0x7b, 0x2, 0x2, 0x16b,
- 0x4c, 0x3, 0x2, 0x2, 0x2, 0x16c, 0x16d, 0x7, 0x6e,
- 0x2, 0x2, 0x16d, 0x16e, 0x7, 0x63, 0x2, 0x2, 0x16e,
- 0x16f, 0x7, 0x64, 0x2, 0x2, 0x16f, 0x170, 0x7, 0x67,
- 0x2, 0x2, 0x170, 0x171, 0x7, 0x6e, 0x2, 0x2, 0x171,
- 0x4e, 0x3, 0x2, 0x2, 0x2, 0x172, 0x173, 0x7, 0x6e,
- 0x2, 0x2, 0x173, 0x174, 0x7, 0x63, 0x2, 0x2, 0x174,
- 0x175, 0x7, 0x64, 0x2, 0x2, 0x175, 0x176, 0x7, 0x67,
- 0x2, 0x2, 0x176, 0x177, 0x7, 0x6e, 0x2, 0x2, 0x177,
- 0x178, 0x7, 0x75, 0x2, 0x2, 0x178, 0x50, 0x3, 0x2,
- 0x2, 0x2, 0x179, 0x17a, 0x7, 0x76, 0x2, 0x2, 0x17a,
- 0x17b, 0x7, 0x63, 0x2, 0x2, 0x17b, 0x17c, 0x7, 0x6b,
- 0x2, 0x2, 0x17c, 0x17d, 0x7, 0x6e, 0x2, 0x2, 0x17d,
- 0x52, 0x3, 0x2, 0x2, 0x2, 0x17e, 0x17f, 0x7, 0x6b,
- 0x2, 0x2, 0x17f, 0x180, 0x7, 0x75, 0x2, 0x2, 0x180,
- 0x181, 0x7, 0x70, 0x2, 0x2, 0x181, 0x182, 0x7, 0x76,
- 0x2, 0x2, 0x182, 0x54, 0x3, 0x2, 0x2, 0x2, 0x183,
- 0x184, 0x7, 0x6b, 0x2, 0x2, 0x184, 0x185, 0x7, 0x75,
- 0x2, 0x2, 0x185, 0x56, 0x3, 0x2, 0x2, 0x2, 0x186,
- 0x187, 0x7, 0x6e, 0x2, 0x2, 0x187, 0x188, 0x7, 0x67,
- 0x2, 0x2, 0x188, 0x189, 0x7, 0x76, 0x2, 0x2, 0x189,
- 0x58, 0x3, 0x2, 0x2, 0x2, 0x18a, 0x18b, 0x7, 0x65,
- 0x2, 0x2, 0x18b, 0x18c, 0x7, 0x71, 0x2, 0x2, 0x18c,
- 0x18d, 0x7, 0x70, 0x2, 0x2, 0x18d, 0x18e, 0x7, 0x75,
- 0x2, 0x2, 0x18e, 0x18f, 0x7, 0x76, 0x2, 0x2, 0x18f,
- 0x5a, 0x3, 0x2, 0x2, 0x2, 0x190, 0x191, 0x7, 0x67,
- 0x2, 0x2, 0x191, 0x192, 0x7, 0x7a, 0x2, 0x2, 0x192,
- 0x193, 0x7, 0x76, 0x2, 0x2, 0x193, 0x194, 0x7, 0x67,
- 0x2, 0x2, 0x194, 0x195, 0x7, 0x74, 0x2, 0x2, 0x195,
- 0x196, 0x7, 0x70, 0x2, 0x2, 0x196, 0x5c, 0x3, 0x2,
- 0x2, 0x2, 0x197, 0x198, 0x7, 0x63, 0x2, 0x2, 0x198,
- 0x199, 0x7, 0x75, 0x2, 0x2, 0x199, 0x19a, 0x7, 0x75,
- 0x2, 0x2, 0x19a, 0x19b, 0x7, 0x67, 0x2, 0x2, 0x19b,
- 0x19c, 0x7, 0x74, 0x2, 0x2, 0x19c, 0x19d, 0x7, 0x76,
- 0x2, 0x2, 0x19d, 0x5e, 0x3, 0x2, 0x2, 0x2, 0x19e,
- 0x19f, 0x7, 0x65, 0x2, 0x2, 0x19f, 0x1a0, 0x7, 0x6a,
- 0x2, 0x2, 0x1a0, 0x1a1, 0x7, 0x67, 0x2, 0x2, 0x1a1,
- 0x1a2, 0x7, 0x65, 0x2, 0x2, 0x1a2, 0x1a3, 0x7, 0x6d,
- 0x2, 0x2, 0x1a3, 0x60, 0x3, 0x2, 0x2, 0x2, 0x1a4,
- 0x1a5, 0x7, 0x77, 0x2, 0x2, 0x1a5, 0x1a6, 0x7, 0x70,
- 0x2, 0x2, 0x1a6, 0x1a7, 0x7, 0x74, 0x2, 0x2, 0x1a7,
- 0x1a8, 0x7, 0x67, 0x2, 0x2, 0x1a8, 0x1a9, 0x7, 0x63,
- 0x2, 0x2, 0x1a9, 0x1aa, 0x7, 0x65, 0x2, 0x2, 0x1aa,
- 0x1ab, 0x7, 0x6a, 0x2, 0x2, 0x1ab, 0x1ac, 0x7, 0x63,
- 0x2, 0x2, 0x1ac, 0x1ad, 0x7, 0x64, 0x2, 0x2, 0x1ad,
- 0x1ae, 0x7, 0x6e, 0x2, 0x2, 0x1ae, 0x1af, 0x7, 0x67,
- 0x2, 0x2, 0x1af, 0x62, 0x3, 0x2, 0x2, 0x2, 0x1b0,
- 0x1b1, 0x7, 0x66, 0x2, 0x2, 0x1b1, 0x1b2, 0x7, 0x67,
- 0x2, 0x2, 0x1b2, 0x1b3, 0x7, 0x64, 0x2, 0x2, 0x1b3,
- 0x1b4, 0x7, 0x77, 0x2, 0x2, 0x1b4, 0x1b5, 0x7, 0x69,
- 0x2, 0x2, 0x1b5, 0x64, 0x3, 0x2, 0x2, 0x2, 0x1b6,
- 0x1b7, 0x7, 0x3f, 0x2, 0x2, 0x1b7, 0x66, 0x3, 0x2,
- 0x2, 0x2, 0x1b8, 0x1b9, 0x7, 0x2c, 0x2, 0x2, 0x1b9,
- 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1ba, 0x1bb, 0x7, 0x31,
- 0x2, 0x2, 0x1bb, 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1bc,
- 0x1bd, 0x7, 0x27, 0x2, 0x2, 0x1bd, 0x1d3, 0x7, 0x3f,
- 0x2, 0x2, 0x1be, 0x1bf, 0x7, 0x2d, 0x2, 0x2, 0x1bf,
- 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1c0, 0x1c1, 0x7, 0x2f,
- 0x2, 0x2, 0x1c1, 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1c2,
- 0x1c3, 0x7, 0x3e, 0x2, 0x2, 0x1c3, 0x1c4, 0x7, 0x3e,
- 0x2, 0x2, 0x1c4, 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1c5,
- 0x1c6, 0x7, 0x40, 0x2, 0x2, 0x1c6, 0x1c7, 0x7, 0x40,
- 0x2, 0x2, 0x1c7, 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1c8,
- 0x1c9, 0x7, 0x40, 0x2, 0x2, 0x1c9, 0x1ca, 0x7, 0x40,
- 0x2, 0x2, 0x1ca, 0x1cb, 0x7, 0x40, 0x2, 0x2, 0x1cb,
- 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1cc, 0x1cd, 0x7, 0x28,
- 0x2, 0x2, 0x1cd, 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1ce,
- 0x1cf, 0x7, 0x60, 0x2, 0x2, 0x1cf, 0x1d3, 0x7, 0x3f,
- 0x2, 0x2, 0x1d0, 0x1d1, 0x7, 0x7e, 0x2, 0x2, 0x1d1,
- 0x1d3, 0x7, 0x3f, 0x2, 0x2, 0x1d2, 0x1b8, 0x3, 0x2,
- 0x2, 0x2, 0x1d2, 0x1ba, 0x3, 0x2, 0x2, 0x2, 0x1d2,
- 0x1bc, 0x3, 0x2, 0x2, 0x2, 0x1d2, 0x1be, 0x3, 0x2,
- 0x2, 0x2, 0x1d2, 0x1c0, 0x3, 0x2, 0x2, 0x2, 0x1d2,
- 0x1c2, 0x3, 0x2, 0x2, 0x2, 0x1d2, 0x1c5, 0x3, 0x2,
- 0x2, 0x2, 0x1d2, 0x1c8, 0x3, 0x2, 0x2, 0x2, 0x1d2,
- 0x1cc, 0x3, 0x2, 0x2, 0x2, 0x1d2, 0x1ce, 0x3, 0x2,
- 0x2, 0x2, 0x1d2, 0x1d0, 0x3, 0x2, 0x2, 0x2, 0x1d3,
- 0x68, 0x3, 0x2, 0x2, 0x2, 0x1d4, 0x1d5, 0x7, 0x3f,
- 0x2, 0x2, 0x1d5, 0x1d6, 0x7, 0x3f, 0x2, 0x2, 0x1d6,
- 0x6a, 0x3, 0x2, 0x2, 0x2, 0x1d7, 0x1d8, 0x7, 0x2d,
- 0x2, 0x2, 0x1d8, 0x6c, 0x3, 0x2, 0x2, 0x2, 0x1d9,
- 0x1da, 0x7, 0x2f, 0x2, 0x2, 0x1da, 0x6e, 0x3, 0x2,
- 0x2, 0x2, 0x1db, 0x1dc, 0x7, 0x2c, 0x2, 0x2, 0x1dc,
- 0x70, 0x3, 0x2, 0x2, 0x2, 0x1dd, 0x1de, 0x7, 0x31,
- 0x2, 0x2, 0x1de, 0x72, 0x3, 0x2, 0x2, 0x2, 0x1df,
- 0x1e0, 0x7, 0x27, 0x2, 0x2, 0x1e0, 0x74, 0x3, 0x2,
- 0x2, 0x2, 0x1e1, 0x1e2, 0x7, 0x7e, 0x2, 0x2, 0x1e2,
- 0x76, 0x3, 0x2, 0x2, 0x2, 0x1e3, 0x1e4, 0x7, 0x28,
- 0x2, 0x2, 0x1e4, 0x78, 0x3, 0x2, 0x2, 0x2, 0x1e5,
- 0x1e6, 0x7, 0x80, 0x2, 0x2, 0x1e6, 0x7a, 0x3, 0x2,
- 0x2, 0x2, 0x1e7, 0x1e8, 0x7, 0x6f, 0x2, 0x2, 0x1e8,
- 0x1e9, 0x7, 0x63, 0x2, 0x2, 0x1e9, 0x1ea, 0x7, 0x7a,
- 0x2, 0x2, 0x1ea, 0x7c, 0x3, 0x2, 0x2, 0x2, 0x1eb,
- 0x1ec, 0x7, 0x6f, 0x2, 0x2, 0x1ec, 0x1ed, 0x7, 0x6b,
- 0x2, 0x2, 0x1ed, 0x1ee, 0x7, 0x70, 0x2, 0x2, 0x1ee,
- 0x7e, 0x3, 0x2, 0x2, 0x2, 0x1ef, 0x1f0, 0x7, 0x23,
- 0x2, 0x2, 0x1f0, 0x1f1, 0x7, 0x3f, 0x2, 0x2, 0x1f1,
- 0x80, 0x3, 0x2, 0x2, 0x2, 0x1f2, 0x1f3, 0x7, 0x3e,
- 0x2, 0x2, 0x1f3, 0x82, 0x3, 0x2, 0x2, 0x2, 0x1f4,
- 0x1f5, 0x7, 0x3e, 0x2, 0x2, 0x1f5, 0x1f6, 0x7, 0x3f,
- 0x2, 0x2, 0x1f6, 0x84, 0x3, 0x2, 0x2, 0x2, 0x1f7,
- 0x1f8, 0x7, 0x40, 0x2, 0x2, 0x1f8, 0x86, 0x3, 0x2,
- 0x2, 0x2, 0x1f9, 0x1fa, 0x7, 0x40, 0x2, 0x2, 0x1fa,
- 0x1fb, 0x7, 0x3f, 0x2, 0x2, 0x1fb, 0x88, 0x3, 0x2,
- 0x2, 0x2, 0x1fc, 0x1fd, 0x7, 0x3e, 0x2, 0x2, 0x1fd,
- 0x1fe, 0x7, 0x3e, 0x2, 0x2, 0x1fe, 0x8a, 0x3, 0x2,
- 0x2, 0x2, 0x1ff, 0x200, 0x7, 0x40, 0x2, 0x2, 0x200,
- 0x201, 0x7, 0x40, 0x2, 0x2, 0x201, 0x8c, 0x3, 0x2,
- 0x2, 0x2, 0x202, 0x203, 0x7, 0x40, 0x2, 0x2, 0x203,
- 0x204, 0x7, 0x40, 0x2, 0x2, 0x204, 0x205, 0x7, 0x40,
- 0x2, 0x2, 0x205, 0x8e, 0x3, 0x2, 0x2, 0x2, 0x206,
- 0x207, 0x7, 0x30, 0x2, 0x2, 0x207, 0x208, 0x7, 0x30,
- 0x2, 0x2, 0x208, 0x209, 0x7, 0x30, 0x2, 0x2, 0x209,
- 0x90, 0x3, 0x2, 0x2, 0x2, 0x20a, 0x20d, 0x5, 0x69,
- 0x35, 0x2, 0x20b, 0x20d, 0x5, 0x7f, 0x40, 0x2, 0x20c,
- 0x20a, 0x3, 0x2, 0x2, 0x2, 0x20c, 0x20b, 0x3, 0x2,
- 0x2, 0x2, 0x20d, 0x92, 0x3, 0x2, 0x2, 0x2, 0x20e,
- 0x20f, 0x7, 0x2d, 0x2, 0x2, 0x20f, 0x210, 0x7, 0x2d,
- 0x2, 0x2, 0x210, 0x94, 0x3, 0x2, 0x2, 0x2, 0x211,
- 0x212, 0x7, 0x2f, 0x2, 0x2, 0x212, 0x213, 0x7, 0x2f,
- 0x2, 0x2, 0x213, 0x96, 0x3, 0x2, 0x2, 0x2, 0x214,
- 0x215, 0x7, 0x23, 0x2, 0x2, 0x215, 0x98, 0x3, 0x2,
- 0x2, 0x2, 0x216, 0x21b, 0x7, 0x24, 0x2, 0x2, 0x217,
- 0x21a, 0x5, 0x9b, 0x4e, 0x2, 0x218, 0x21a, 0xa, 0x2,
- 0x2, 0x2, 0x219, 0x217, 0x3, 0x2, 0x2, 0x2, 0x219,
- 0x218, 0x3, 0x2, 0x2, 0x2, 0x21a, 0x21d, 0x3, 0x2,
- 0x2, 0x2, 0x21b, 0x219, 0x3, 0x2, 0x2, 0x2, 0x21b,
- 0x21c, 0x3, 0x2, 0x2, 0x2, 0x21c, 0x21e, 0x3, 0x2,
- 0x2, 0x2, 0x21d, 0x21b, 0x3, 0x2, 0x2, 0x2, 0x21e,
- 0x229, 0x7, 0x24, 0x2, 0x2, 0x21f, 0x224, 0x7, 0x29,
- 0x2, 0x2, 0x220, 0x223, 0x5, 0x9b, 0x4e, 0x2, 0x221,
- 0x223, 0xa, 0x3, 0x2, 0x2, 0x222, 0x220, 0x3, 0x2,
- 0x2, 0x2, 0x222, 0x221, 0x3, 0x2, 0x2, 0x2, 0x223,
- 0x226, 0x3, 0x2, 0x2, 0x2, 0x224, 0x222, 0x3, 0x2,
- 0x2, 0x2, 0x224, 0x225, 0x3, 0x2, 0x2, 0x2, 0x225,
- 0x227, 0x3, 0x2, 0x2, 0x2, 0x226, 0x224, 0x3, 0x2,
- 0x2, 0x2, 0x227, 0x229, 0x7, 0x29, 0x2, 0x2, 0x228,
- 0x216, 0x3, 0x2, 0x2, 0x2, 0x228, 0x21f, 0x3, 0x2,
- 0x2, 0x2, 0x229, 0x9a, 0x3, 0x2, 0x2, 0x2, 0x22a,
- 0x22b, 0x7, 0x5e, 0x2, 0x2, 0x22b, 0x22c, 0x9, 0x4,
- 0x2, 0x2, 0x22c, 0x9c, 0x3, 0x2, 0x2, 0x2, 0x22d,
- 0x231, 0x9, 0x5, 0x2, 0x2, 0x22e, 0x230, 0x9, 0x6,
- 0x2, 0x2, 0x22f, 0x22e, 0x3, 0x2, 0x2, 0x2, 0x230,
- 0x233, 0x3, 0x2, 0x2, 0x2, 0x231, 0x22f, 0x3, 0x2,
- 0x2, 0x2, 0x231, 0x232, 0x3, 0x2, 0x2, 0x2, 0x232,
- 0x9e, 0x3, 0x2, 0x2, 0x2, 0x233, 0x231, 0x3, 0x2,
- 0x2, 0x2, 0x234, 0x236, 0x9, 0x7, 0x2, 0x2, 0x235,
- 0x234, 0x3, 0x2, 0x2, 0x2, 0x236, 0x237, 0x3, 0x2,
- 0x2, 0x2, 0x237, 0x235, 0x3, 0x2, 0x2, 0x2, 0x237,
- 0x238, 0x3, 0x2, 0x2, 0x2, 0x238, 0x239, 0x3, 0x2,
- 0x2, 0x2, 0x239, 0x23a, 0x8, 0x50, 0x2, 0x2, 0x23a,
- 0xa0, 0x3, 0x2, 0x2, 0x2, 0x23b, 0x23c, 0x7, 0x31,
- 0x2, 0x2, 0x23c, 0x23d, 0x7, 0x2c, 0x2, 0x2, 0x23d,
- 0x241, 0x3, 0x2, 0x2, 0x2, 0x23e, 0x240, 0xb, 0x2,
- 0x2, 0x2, 0x23f, 0x23e, 0x3, 0x2, 0x2, 0x2, 0x240,
- 0x243, 0x3, 0x2, 0x2, 0x2, 0x241, 0x242, 0x3, 0x2,
- 0x2, 0x2, 0x241, 0x23f, 0x3, 0x2, 0x2, 0x2, 0x242,
- 0x247, 0x3, 0x2, 0x2, 0x2, 0x243, 0x241, 0x3, 0x2,
- 0x2, 0x2, 0x244, 0x245, 0x7, 0x2c, 0x2, 0x2, 0x245,
- 0x248, 0x7, 0x31, 0x2, 0x2, 0x246, 0x248, 0x7, 0x2,
- 0x2, 0x3, 0x247, 0x244, 0x3, 0x2, 0x2, 0x2, 0x247,
- 0x246, 0x3, 0x2, 0x2, 0x2, 0x248, 0x249, 0x3, 0x2,
- 0x2, 0x2, 0x249, 0x24a, 0x8, 0x51, 0x2, 0x2, 0x24a,
- 0xa2, 0x3, 0x2, 0x2, 0x2, 0x24b, 0x24c, 0x7, 0x31,
- 0x2, 0x2, 0x24c, 0x24d, 0x7, 0x31, 0x2, 0x2, 0x24d,
- 0x251, 0x3, 0x2, 0x2, 0x2, 0x24e, 0x250, 0xa, 0x8,
- 0x2, 0x2, 0x24f, 0x24e, 0x3, 0x2, 0x2, 0x2, 0x250,
- 0x253, 0x3, 0x2, 0x2, 0x2, 0x251, 0x24f, 0x3, 0x2,
- 0x2, 0x2, 0x251, 0x252, 0x3, 0x2, 0x2, 0x2, 0x252,
- 0x254, 0x3, 0x2, 0x2, 0x2, 0x253, 0x251, 0x3, 0x2,
- 0x2, 0x2, 0x254, 0x255, 0x8, 0x52, 0x2, 0x2, 0x255,
- 0xa4, 0x3, 0x2, 0x2, 0x2, 0x256, 0x257, 0x9, 0x9,
- 0x2, 0x2, 0x257, 0xa6, 0x3, 0x2, 0x2, 0x2, 0x258,
- 0x261, 0x7, 0x32, 0x2, 0x2, 0x259, 0x25d, 0x9, 0xa,
- 0x2, 0x2, 0x25a, 0x25c, 0x5, 0xa5, 0x53, 0x2, 0x25b,
- 0x25a, 0x3, 0x2, 0x2, 0x2, 0x25c, 0x25f, 0x3, 0x2,
- 0x2, 0x2, 0x25d, 0x25b, 0x3, 0x2, 0x2, 0x2, 0x25d,
- 0x25e, 0x3, 0x2, 0x2, 0x2, 0x25e, 0x261, 0x3, 0x2,
- 0x2, 0x2, 0x25f, 0x25d, 0x3, 0x2, 0x2, 0x2, 0x260,
- 0x258, 0x3, 0x2, 0x2, 0x2, 0x260, 0x259, 0x3, 0x2,
- 0x2, 0x2, 0x261, 0xa8, 0x3, 0x2, 0x2, 0x2, 0x262,
- 0x264, 0x9, 0xb, 0x2, 0x2, 0x263, 0x265, 0x9, 0xc,
- 0x2, 0x2, 0x264, 0x263, 0x3, 0x2, 0x2, 0x2, 0x264,
- 0x265, 0x3, 0x2, 0x2, 0x2, 0x265, 0x267, 0x3, 0x2,
- 0x2, 0x2, 0x266, 0x268, 0x5, 0xa5, 0x53, 0x2, 0x267,
- 0x266, 0x3, 0x2, 0x2, 0x2, 0x268, 0x269, 0x3, 0x2,
- 0x2, 0x2, 0x269, 0x267, 0x3, 0x2, 0x2, 0x2, 0x269,
- 0x26a, 0x3, 0x2, 0x2, 0x2, 0x26a, 0xaa, 0x3, 0x2,
- 0x2, 0x2, 0x26b, 0x26d, 0x5, 0x6d, 0x37, 0x2, 0x26c,
- 0x26b, 0x3, 0x2, 0x2, 0x2, 0x26c, 0x26d, 0x3, 0x2,
- 0x2, 0x2, 0x26d, 0x26e, 0x3, 0x2, 0x2, 0x2, 0x26e,
- 0x26f, 0x5, 0xa7, 0x54, 0x2, 0x26f, 0x273, 0x7, 0x30,
- 0x2, 0x2, 0x270, 0x272, 0x5, 0xa5, 0x53, 0x2, 0x271,
- 0x270, 0x3, 0x2, 0x2, 0x2, 0x272, 0x275, 0x3, 0x2,
- 0x2, 0x2, 0x273, 0x271, 0x3, 0x2, 0x2, 0x2, 0x273,
- 0x274, 0x3, 0x2, 0x2, 0x2, 0x274, 0x277, 0x3, 0x2,
- 0x2, 0x2, 0x275, 0x273, 0x3, 0x2, 0x2, 0x2, 0x276,
- 0x278, 0x5, 0xa9, 0x55, 0x2, 0x277, 0x276, 0x3, 0x2,
- 0x2, 0x2, 0x277, 0x278, 0x3, 0x2, 0x2, 0x2, 0x278,
- 0x298, 0x3, 0x2, 0x2, 0x2, 0x279, 0x27b, 0x5, 0x6d,
- 0x37, 0x2, 0x27a, 0x279, 0x3, 0x2, 0x2, 0x2, 0x27a,
- 0x27b, 0x3, 0x2, 0x2, 0x2, 0x27b, 0x27c, 0x3, 0x2,
- 0x2, 0x2, 0x27c, 0x27e, 0x7, 0x30, 0x2, 0x2, 0x27d,
- 0x27f, 0x5, 0xa5, 0x53, 0x2, 0x27e, 0x27d, 0x3, 0x2,
- 0x2, 0x2, 0x27f, 0x280, 0x3, 0x2, 0x2, 0x2, 0x280,
- 0x27e, 0x3, 0x2, 0x2, 0x2, 0x280, 0x281, 0x3, 0x2,
- 0x2, 0x2, 0x281, 0x283, 0x3, 0x2, 0x2, 0x2, 0x282,
- 0x284, 0x5, 0xa9, 0x55, 0x2, 0x283, 0x282, 0x3, 0x2,
- 0x2, 0x2, 0x283, 0x284, 0x3, 0x2, 0x2, 0x2, 0x284,
- 0x298, 0x3, 0x2, 0x2, 0x2, 0x285, 0x287, 0x5, 0x6d,
- 0x37, 0x2, 0x286, 0x285, 0x3, 0x2, 0x2, 0x2, 0x286,
- 0x287, 0x3, 0x2, 0x2, 0x2, 0x287, 0x288, 0x3, 0x2,
- 0x2, 0x2, 0x288, 0x28a, 0x5, 0xa7, 0x54, 0x2, 0x289,
- 0x28b, 0x5, 0xa9, 0x55, 0x2, 0x28a, 0x289, 0x3, 0x2,
- 0x2, 0x2, 0x28a, 0x28b, 0x3, 0x2, 0x2, 0x2, 0x28b,
- 0x298, 0x3, 0x2, 0x2, 0x2, 0x28c, 0x28e, 0x5, 0x6d,
- 0x37, 0x2, 0x28d, 0x28c, 0x3, 0x2, 0x2, 0x2, 0x28d,
- 0x28e, 0x3, 0x2, 0x2, 0x2, 0x28e, 0x28f, 0x3, 0x2,
- 0x2, 0x2, 0x28f, 0x290, 0x7, 0x32, 0x2, 0x2, 0x290,
- 0x291, 0x7, 0x7a, 0x2, 0x2, 0x291, 0x293, 0x3, 0x2,
- 0x2, 0x2, 0x292, 0x294, 0x9, 0xd, 0x2, 0x2, 0x293,
- 0x292, 0x3, 0x2, 0x2, 0x2, 0x294, 0x295, 0x3, 0x2,
- 0x2, 0x2, 0x295, 0x293, 0x3, 0x2, 0x2, 0x2, 0x295,
- 0x296, 0x3, 0x2, 0x2, 0x2, 0x296, 0x298, 0x3, 0x2,
- 0x2, 0x2, 0x297, 0x26c, 0x3, 0x2, 0x2, 0x2, 0x297,
- 0x27a, 0x3, 0x2, 0x2, 0x2, 0x297, 0x286, 0x3, 0x2,
- 0x2, 0x2, 0x297, 0x28d, 0x3, 0x2, 0x2, 0x2, 0x298,
- 0xac, 0x3, 0x2, 0x2, 0x2, 0x1e, 0x2, 0x1d2, 0x20c,
- 0x219, 0x21b, 0x222, 0x224, 0x228, 0x231, 0x237, 0x241, 0x247,
- 0x251, 0x25d, 0x260, 0x264, 0x269, 0x26c, 0x273, 0x277, 0x27a,
- 0x280, 0x283, 0x286, 0x28a, 0x28d, 0x295, 0x297, 0x3, 0x2,
- 0x3, 0x2,
- };
-
- atn::ATNDeserializer deserializer;
- _atn = deserializer.deserialize(_serializedATN);
-
- size_t count = _atn.getNumberOfDecisions();
- _decisionToDFA.reserve(count);
- for (size_t i = 0; i < count; i++) {
- _decisionToDFA.emplace_back(_atn.getDecisionState(i), i);
- }
-}
-
-TorqueLexer::Initializer TorqueLexer::_init;
diff --git a/deps/v8/src/torque/TorqueLexer.h b/deps/v8/src/torque/TorqueLexer.h
deleted file mode 100644
index c95a80debd..0000000000
--- a/deps/v8/src/torque/TorqueLexer.h
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-#ifndef V8_TORQUE_TORQUELEXER_H_
-#define V8_TORQUE_TORQUELEXER_H_
-
-// Generated from Torque.g4 by ANTLR 4.7.1
-
-#pragma once
-
-#include "./antlr4-runtime.h"
-
-class TorqueLexer : public antlr4::Lexer {
- public:
- enum {
- T__0 = 1,
- T__1 = 2,
- T__2 = 3,
- T__3 = 4,
- T__4 = 5,
- T__5 = 6,
- T__6 = 7,
- T__7 = 8,
- T__8 = 9,
- T__9 = 10,
- T__10 = 11,
- T__11 = 12,
- T__12 = 13,
- T__13 = 14,
- T__14 = 15,
- T__15 = 16,
- T__16 = 17,
- T__17 = 18,
- T__18 = 19,
- T__19 = 20,
- T__20 = 21,
- MACRO = 22,
- BUILTIN = 23,
- RUNTIME = 24,
- MODULE = 25,
- JAVASCRIPT = 26,
- DEFERRED = 27,
- IF = 28,
- FOR = 29,
- WHILE = 30,
- RETURN = 31,
- CONSTEXPR = 32,
- CONTINUE = 33,
- BREAK = 34,
- GOTO = 35,
- OTHERWISE = 36,
- TRY = 37,
- LABEL = 38,
- LABELS = 39,
- TAIL = 40,
- ISNT = 41,
- IS = 42,
- LET = 43,
- CONST = 44,
- EXTERN = 45,
- ASSERT_TOKEN = 46,
- CHECK_TOKEN = 47,
- UNREACHABLE_TOKEN = 48,
- DEBUG_TOKEN = 49,
- ASSIGNMENT = 50,
- ASSIGNMENT_OPERATOR = 51,
- EQUAL = 52,
- PLUS = 53,
- MINUS = 54,
- MULTIPLY = 55,
- DIVIDE = 56,
- MODULO = 57,
- BIT_OR = 58,
- BIT_AND = 59,
- BIT_NOT = 60,
- MAX = 61,
- MIN = 62,
- NOT_EQUAL = 63,
- LESS_THAN = 64,
- LESS_THAN_EQUAL = 65,
- GREATER_THAN = 66,
- GREATER_THAN_EQUAL = 67,
- SHIFT_LEFT = 68,
- SHIFT_RIGHT = 69,
- SHIFT_RIGHT_ARITHMETIC = 70,
- VARARGS = 71,
- EQUALITY_OPERATOR = 72,
- INCREMENT = 73,
- DECREMENT = 74,
- NOT = 75,
- STRING_LITERAL = 76,
- IDENTIFIER = 77,
- WS = 78,
- BLOCK_COMMENT = 79,
- LINE_COMMENT = 80,
- DECIMAL_LITERAL = 81
- };
-
- explicit TorqueLexer(antlr4::CharStream* input);
- ~TorqueLexer();
-
- std::string getGrammarFileName() const override;
- const std::vector<std::string>& getRuleNames() const override;
-
- const std::vector<std::string>& getChannelNames() const override;
- const std::vector<std::string>& getModeNames() const override;
- const std::vector<std::string>& getTokenNames()
- const override; // deprecated, use vocabulary instead
- antlr4::dfa::Vocabulary& getVocabulary() const override;
-
- const std::vector<uint16_t> getSerializedATN() const override;
- const antlr4::atn::ATN& getATN() const override;
-
- private:
- static std::vector<antlr4::dfa::DFA> _decisionToDFA;
- static antlr4::atn::PredictionContextCache _sharedContextCache;
- static std::vector<std::string> _ruleNames;
- static std::vector<std::string> _tokenNames;
- static std::vector<std::string> _channelNames;
- static std::vector<std::string> _modeNames;
-
- static std::vector<std::string> _literalNames;
- static std::vector<std::string> _symbolicNames;
- static antlr4::dfa::Vocabulary _vocabulary;
- static antlr4::atn::ATN _atn;
- static std::vector<uint16_t> _serializedATN;
-
- // Individual action functions triggered by action() above.
-
- // Individual semantic predicate functions triggered by sempred() above.
-
- struct Initializer {
- Initializer();
- };
- static Initializer _init;
-};
-
-#endif // V8_TORQUE_TORQUELEXER_H_
diff --git a/deps/v8/src/torque/TorqueLexer.interp b/deps/v8/src/torque/TorqueLexer.interp
deleted file mode 100644
index bbe1cb77ee..0000000000
--- a/deps/v8/src/torque/TorqueLexer.interp
+++ /dev/null
@@ -1,264 +0,0 @@
-token literal names:
-null
-'('
-')'
-'=>'
-','
-':'
-'type'
-'?'
-'||'
-'&&'
-'.'
-'['
-']'
-'{'
-'}'
-';'
-'of'
-'else'
-'extends'
-'generates'
-'operator'
-'struct'
-'macro'
-'builtin'
-'runtime'
-'module'
-'javascript'
-'deferred'
-'if'
-'for'
-'while'
-'return'
-'constexpr'
-'continue'
-'break'
-'goto'
-'otherwise'
-'try'
-'label'
-'labels'
-'tail'
-'isnt'
-'is'
-'let'
-'const'
-'extern'
-'assert'
-'check'
-'unreachable'
-'debug'
-'='
-null
-'=='
-'+'
-'-'
-'*'
-'/'
-'%'
-'|'
-'&'
-'~'
-'max'
-'min'
-'!='
-'<'
-'<='
-'>'
-'>='
-'<<'
-'>>'
-'>>>'
-'...'
-null
-'++'
-'--'
-'!'
-null
-null
-null
-null
-null
-null
-
-token symbolic names:
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-null
-MACRO
-BUILTIN
-RUNTIME
-MODULE
-JAVASCRIPT
-DEFERRED
-IF
-FOR
-WHILE
-RETURN
-CONSTEXPR
-CONTINUE
-BREAK
-GOTO
-OTHERWISE
-TRY
-LABEL
-LABELS
-TAIL
-ISNT
-IS
-LET
-CONST
-EXTERN
-ASSERT_TOKEN
-CHECK_TOKEN
-UNREACHABLE_TOKEN
-DEBUG_TOKEN
-ASSIGNMENT
-ASSIGNMENT_OPERATOR
-EQUAL
-PLUS
-MINUS
-MULTIPLY
-DIVIDE
-MODULO
-BIT_OR
-BIT_AND
-BIT_NOT
-MAX
-MIN
-NOT_EQUAL
-LESS_THAN
-LESS_THAN_EQUAL
-GREATER_THAN
-GREATER_THAN_EQUAL
-SHIFT_LEFT
-SHIFT_RIGHT
-SHIFT_RIGHT_ARITHMETIC
-VARARGS
-EQUALITY_OPERATOR
-INCREMENT
-DECREMENT
-NOT
-STRING_LITERAL
-IDENTIFIER
-WS
-BLOCK_COMMENT
-LINE_COMMENT
-DECIMAL_LITERAL
-
-rule names:
-T__0
-T__1
-T__2
-T__3
-T__4
-T__5
-T__6
-T__7
-T__8
-T__9
-T__10
-T__11
-T__12
-T__13
-T__14
-T__15
-T__16
-T__17
-T__18
-T__19
-T__20
-MACRO
-BUILTIN
-RUNTIME
-MODULE
-JAVASCRIPT
-DEFERRED
-IF
-FOR
-WHILE
-RETURN
-CONSTEXPR
-CONTINUE
-BREAK
-GOTO
-OTHERWISE
-TRY
-LABEL
-LABELS
-TAIL
-ISNT
-IS
-LET
-CONST
-EXTERN
-ASSERT_TOKEN
-CHECK_TOKEN
-UNREACHABLE_TOKEN
-DEBUG_TOKEN
-ASSIGNMENT
-ASSIGNMENT_OPERATOR
-EQUAL
-PLUS
-MINUS
-MULTIPLY
-DIVIDE
-MODULO
-BIT_OR
-BIT_AND
-BIT_NOT
-MAX
-MIN
-NOT_EQUAL
-LESS_THAN
-LESS_THAN_EQUAL
-GREATER_THAN
-GREATER_THAN_EQUAL
-SHIFT_LEFT
-SHIFT_RIGHT
-SHIFT_RIGHT_ARITHMETIC
-VARARGS
-EQUALITY_OPERATOR
-INCREMENT
-DECREMENT
-NOT
-STRING_LITERAL
-ESCAPE
-IDENTIFIER
-WS
-BLOCK_COMMENT
-LINE_COMMENT
-DECIMAL_DIGIT
-DECIMAL_INTEGER_LITERAL
-EXPONENT_PART
-DECIMAL_LITERAL
-
-channel names:
-DEFAULT_TOKEN_CHANNEL
-HIDDEN
-
-mode names:
-DEFAULT_MODE
-
-atn:
-[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 83, 665, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, 4, 34, 9, 34, 4, 35, 9, 35, 4, 36, 9, 36, 4, 37, 9, 37, 4, 38, 9, 38, 4, 39, 9, 39, 4, 40, 9, 40, 4, 41, 9, 41, 4, 42, 9, 42, 4, 43, 9, 43, 4, 44, 9, 44, 4, 45, 9, 45, 4, 46, 9, 46, 4, 47, 9, 47, 4, 48, 9, 48, 4, 49, 9, 49, 4, 50, 9, 50, 4, 51, 9, 51, 4, 52, 9, 52, 4, 53, 9, 53, 4, 54, 9, 54, 4, 55, 9, 55, 4, 56, 9, 56, 4, 57, 9, 57, 4, 58, 9, 58, 4, 59, 9, 59, 4, 60, 9, 60, 4, 61, 9, 61, 4, 62, 9, 62, 4, 63, 9, 63, 4, 64, 9, 64, 4, 65, 9, 65, 4, 66, 9, 66, 4, 67, 9, 67, 4, 68, 9, 68, 4, 69, 9, 69, 4, 70, 9, 70, 4, 71, 9, 71, 4, 72, 9, 72, 4, 73, 9, 73, 4, 74, 9, 74, 4, 75, 9, 75, 4, 76, 9, 76, 4, 77, 9, 77, 4, 78, 9, 78, 4, 79, 9, 79, 4, 80, 9, 80, 4, 81, 9, 81, 4, 82, 9, 82, 4, 83, 9, 83, 4, 84, 9, 84, 4, 85, 9, 85, 4, 86, 9, 86, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, 16, 3, 16, 3, 17, 3, 17, 3, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 20, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 21, 3, 22, 3, 22, 3, 22, 3, 22, 3, 22, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 24, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 25, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 26, 3, 27, 3, 27, 3, 27, 3, 27, 3, 27, 3, 27, 3, 27, 3, 27, 3, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28, 3, 29, 3, 29, 3, 29, 3, 30, 3, 30, 3, 30, 3, 30, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 32, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 35, 3, 35, 3, 35, 3, 35, 3, 35, 3, 35, 3, 36, 3, 36, 3, 36, 3, 36, 3, 36, 3, 37, 3, 37, 3, 37, 3, 37, 3, 37, 3, 37, 3, 37, 3, 37, 3, 37, 3, 37, 3, 38, 3, 38, 3, 38, 3, 38, 3, 39, 3, 39, 3, 39, 3, 39, 3, 39, 3, 39, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 41, 3, 41, 3, 41, 3, 41, 3, 41, 3, 42, 3, 42, 3, 42, 3, 42, 3, 42, 3, 43, 3, 43, 3, 43, 3, 44, 3, 44, 3, 44, 3, 44, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 47, 3, 47, 3, 47, 3, 47, 3, 47, 3, 47, 3, 47, 3, 48, 3, 48, 3, 48, 3, 48, 3, 48, 3, 48, 3, 49, 3, 49, 3, 49, 3, 49, 3, 49, 3, 49, 3, 49, 3, 49, 3, 49, 3, 49, 3, 49, 3, 49, 3, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, 50, 3, 51, 3, 51, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 3, 52, 5, 52, 467, 10, 52, 3, 53, 3, 53, 3, 53, 3, 54, 3, 54, 3, 55, 3, 55, 3, 56, 3, 56, 3, 57, 3, 57, 3, 58, 3, 58, 3, 59, 3, 59, 3, 60, 3, 60, 3, 61, 3, 61, 3, 62, 3, 62, 3, 62, 3, 62, 3, 63, 3, 63, 3, 63, 3, 63, 3, 64, 3, 64, 3, 64, 3, 65, 3, 65, 3, 66, 3, 66, 3, 66, 3, 67, 3, 67, 3, 68, 3, 68, 3, 68, 3, 69, 3, 69, 3, 69, 3, 70, 3, 70, 3, 70, 3, 71, 3, 71, 3, 71, 3, 71, 3, 72, 3, 72, 3, 72, 3, 72, 3, 73, 3, 73, 5, 73, 525, 10, 73, 3, 74, 3, 74, 3, 74, 3, 75, 3, 75, 3, 75, 3, 76, 3, 76, 3, 77, 3, 77, 3, 77, 7, 77, 538, 10, 77, 12, 77, 14, 77, 541, 11, 77, 3, 77, 3, 77, 3, 77, 3, 77, 7, 77, 547, 10, 77, 12, 77, 14, 77, 550, 11, 77, 3, 77, 5, 77, 553, 10, 77, 3, 78, 3, 78, 3, 78, 3, 79, 3, 79, 7, 79, 560, 10, 79, 12, 79, 14, 79, 563, 11, 79, 3, 80, 6, 80, 566, 10, 80, 13, 80, 14, 80, 567, 3, 80, 3, 80, 3, 81, 3, 81, 3, 81, 3, 81, 7, 81, 576, 10, 81, 12, 81, 14, 81, 579, 11, 81, 3, 81, 3, 81, 3, 81, 5, 81, 584, 10, 81, 3, 81, 3, 81, 3, 82, 3, 82, 3, 82, 3, 82, 7, 82, 592, 10, 82, 12, 82, 14, 82, 595, 11, 82, 3, 82, 3, 82, 3, 83, 3, 83, 3, 84, 3, 84, 3, 84, 7, 84, 604, 10, 84, 12, 84, 14, 84, 607, 11, 84, 5, 84, 609, 10, 84, 3, 85, 3, 85, 5, 85, 613, 10, 85, 3, 85, 6, 85, 616, 10, 85, 13, 85, 14, 85, 617, 3, 86, 5, 86, 621, 10, 86, 3, 86, 3, 86, 3, 86, 7, 86, 626, 10, 86, 12, 86, 14, 86, 629, 11, 86, 3, 86, 5, 86, 632, 10, 86, 3, 86, 5, 86, 635, 10, 86, 3, 86, 3, 86, 6, 86, 639, 10, 86, 13, 86, 14, 86, 640, 3, 86, 5, 86, 644, 10, 86, 3, 86, 5, 86, 647, 10, 86, 3, 86, 3, 86, 5, 86, 651, 10, 86, 3, 86, 5, 86, 654, 10, 86, 3, 86, 3, 86, 3, 86, 3, 86, 6, 86, 660, 10, 86, 13, 86, 14, 86, 661, 5, 86, 664, 10, 86, 3, 577, 2, 87, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 69, 36, 71, 37, 73, 38, 75, 39, 77, 40, 79, 41, 81, 42, 83, 43, 85, 44, 87, 45, 89, 46, 91, 47, 93, 48, 95, 49, 97, 50, 99, 51, 101, 52, 103, 53, 105, 54, 107, 55, 109, 56, 111, 57, 113, 58, 115, 59, 117, 60, 119, 61, 121, 62, 123, 63, 125, 64, 127, 65, 129, 66, 131, 67, 133, 68, 135, 69, 137, 70, 139, 71, 141, 72, 143, 73, 145, 74, 147, 75, 149, 76, 151, 77, 153, 78, 155, 2, 157, 79, 159, 80, 161, 81, 163, 82, 165, 2, 167, 2, 169, 2, 171, 83, 3, 2, 14, 6, 2, 12, 12, 15, 15, 36, 36, 94, 94, 6, 2, 12, 12, 15, 15, 41, 41, 94, 94, 7, 2, 36, 36, 41, 41, 94, 94, 112, 112, 116, 116, 4, 2, 67, 92, 99, 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 5, 2, 11, 12, 14, 15, 34, 34, 4, 2, 12, 12, 15, 15, 3, 2, 50, 59, 3, 2, 51, 59, 4, 2, 71, 71, 103, 103, 4, 2, 45, 45, 47, 47, 5, 2, 50, 59, 67, 72, 99, 104, 2, 698, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, 67, 3, 2, 2, 2, 2, 69, 3, 2, 2, 2, 2, 71, 3, 2, 2, 2, 2, 73, 3, 2, 2, 2, 2, 75, 3, 2, 2, 2, 2, 77, 3, 2, 2, 2, 2, 79, 3, 2, 2, 2, 2, 81, 3, 2, 2, 2, 2, 83, 3, 2, 2, 2, 2, 85, 3, 2, 2, 2, 2, 87, 3, 2, 2, 2, 2, 89, 3, 2, 2, 2, 2, 91, 3, 2, 2, 2, 2, 93, 3, 2, 2, 2, 2, 95, 3, 2, 2, 2, 2, 97, 3, 2, 2, 2, 2, 99, 3, 2, 2, 2, 2, 101, 3, 2, 2, 2, 2, 103, 3, 2, 2, 2, 2, 105, 3, 2, 2, 2, 2, 107, 3, 2, 2, 2, 2, 109, 3, 2, 2, 2, 2, 111, 3, 2, 2, 2, 2, 113, 3, 2, 2, 2, 2, 115, 3, 2, 2, 2, 2, 117, 3, 2, 2, 2, 2, 119, 3, 2, 2, 2, 2, 121, 3, 2, 2, 2, 2, 123, 3, 2, 2, 2, 2, 125, 3, 2, 2, 2, 2, 127, 3, 2, 2, 2, 2, 129, 3, 2, 2, 2, 2, 131, 3, 2, 2, 2, 2, 133, 3, 2, 2, 2, 2, 135, 3, 2, 2, 2, 2, 137, 3, 2, 2, 2, 2, 139, 3, 2, 2, 2, 2, 141, 3, 2, 2, 2, 2, 143, 3, 2, 2, 2, 2, 145, 3, 2, 2, 2, 2, 147, 3, 2, 2, 2, 2, 149, 3, 2, 2, 2, 2, 151, 3, 2, 2, 2, 2, 153, 3, 2, 2, 2, 2, 157, 3, 2, 2, 2, 2, 159, 3, 2, 2, 2, 2, 161, 3, 2, 2, 2, 2, 163, 3, 2, 2, 2, 2, 171, 3, 2, 2, 2, 3, 173, 3, 2, 2, 2, 5, 175, 3, 2, 2, 2, 7, 177, 3, 2, 2, 2, 9, 180, 3, 2, 2, 2, 11, 182, 3, 2, 2, 2, 13, 184, 3, 2, 2, 2, 15, 189, 3, 2, 2, 2, 17, 191, 3, 2, 2, 2, 19, 194, 3, 2, 2, 2, 21, 197, 3, 2, 2, 2, 23, 199, 3, 2, 2, 2, 25, 201, 3, 2, 2, 2, 27, 203, 3, 2, 2, 2, 29, 205, 3, 2, 2, 2, 31, 207, 3, 2, 2, 2, 33, 209, 3, 2, 2, 2, 35, 212, 3, 2, 2, 2, 37, 217, 3, 2, 2, 2, 39, 225, 3, 2, 2, 2, 41, 235, 3, 2, 2, 2, 43, 244, 3, 2, 2, 2, 45, 251, 3, 2, 2, 2, 47, 257, 3, 2, 2, 2, 49, 265, 3, 2, 2, 2, 51, 273, 3, 2, 2, 2, 53, 280, 3, 2, 2, 2, 55, 291, 3, 2, 2, 2, 57, 300, 3, 2, 2, 2, 59, 303, 3, 2, 2, 2, 61, 307, 3, 2, 2, 2, 63, 313, 3, 2, 2, 2, 65, 320, 3, 2, 2, 2, 67, 330, 3, 2, 2, 2, 69, 339, 3, 2, 2, 2, 71, 345, 3, 2, 2, 2, 73, 350, 3, 2, 2, 2, 75, 360, 3, 2, 2, 2, 77, 364, 3, 2, 2, 2, 79, 370, 3, 2, 2, 2, 81, 377, 3, 2, 2, 2, 83, 382, 3, 2, 2, 2, 85, 387, 3, 2, 2, 2, 87, 390, 3, 2, 2, 2, 89, 394, 3, 2, 2, 2, 91, 400, 3, 2, 2, 2, 93, 407, 3, 2, 2, 2, 95, 414, 3, 2, 2, 2, 97, 420, 3, 2, 2, 2, 99, 432, 3, 2, 2, 2, 101, 438, 3, 2, 2, 2, 103, 466, 3, 2, 2, 2, 105, 468, 3, 2, 2, 2, 107, 471, 3, 2, 2, 2, 109, 473, 3, 2, 2, 2, 111, 475, 3, 2, 2, 2, 113, 477, 3, 2, 2, 2, 115, 479, 3, 2, 2, 2, 117, 481, 3, 2, 2, 2, 119, 483, 3, 2, 2, 2, 121, 485, 3, 2, 2, 2, 123, 487, 3, 2, 2, 2, 125, 491, 3, 2, 2, 2, 127, 495, 3, 2, 2, 2, 129, 498, 3, 2, 2, 2, 131, 500, 3, 2, 2, 2, 133, 503, 3, 2, 2, 2, 135, 505, 3, 2, 2, 2, 137, 508, 3, 2, 2, 2, 139, 511, 3, 2, 2, 2, 141, 514, 3, 2, 2, 2, 143, 518, 3, 2, 2, 2, 145, 524, 3, 2, 2, 2, 147, 526, 3, 2, 2, 2, 149, 529, 3, 2, 2, 2, 151, 532, 3, 2, 2, 2, 153, 552, 3, 2, 2, 2, 155, 554, 3, 2, 2, 2, 157, 557, 3, 2, 2, 2, 159, 565, 3, 2, 2, 2, 161, 571, 3, 2, 2, 2, 163, 587, 3, 2, 2, 2, 165, 598, 3, 2, 2, 2, 167, 608, 3, 2, 2, 2, 169, 610, 3, 2, 2, 2, 171, 663, 3, 2, 2, 2, 173, 174, 7, 42, 2, 2, 174, 4, 3, 2, 2, 2, 175, 176, 7, 43, 2, 2, 176, 6, 3, 2, 2, 2, 177, 178, 7, 63, 2, 2, 178, 179, 7, 64, 2, 2, 179, 8, 3, 2, 2, 2, 180, 181, 7, 46, 2, 2, 181, 10, 3, 2, 2, 2, 182, 183, 7, 60, 2, 2, 183, 12, 3, 2, 2, 2, 184, 185, 7, 118, 2, 2, 185, 186, 7, 123, 2, 2, 186, 187, 7, 114, 2, 2, 187, 188, 7, 103, 2, 2, 188, 14, 3, 2, 2, 2, 189, 190, 7, 65, 2, 2, 190, 16, 3, 2, 2, 2, 191, 192, 7, 126, 2, 2, 192, 193, 7, 126, 2, 2, 193, 18, 3, 2, 2, 2, 194, 195, 7, 40, 2, 2, 195, 196, 7, 40, 2, 2, 196, 20, 3, 2, 2, 2, 197, 198, 7, 48, 2, 2, 198, 22, 3, 2, 2, 2, 199, 200, 7, 93, 2, 2, 200, 24, 3, 2, 2, 2, 201, 202, 7, 95, 2, 2, 202, 26, 3, 2, 2, 2, 203, 204, 7, 125, 2, 2, 204, 28, 3, 2, 2, 2, 205, 206, 7, 127, 2, 2, 206, 30, 3, 2, 2, 2, 207, 208, 7, 61, 2, 2, 208, 32, 3, 2, 2, 2, 209, 210, 7, 113, 2, 2, 210, 211, 7, 104, 2, 2, 211, 34, 3, 2, 2, 2, 212, 213, 7, 103, 2, 2, 213, 214, 7, 110, 2, 2, 214, 215, 7, 117, 2, 2, 215, 216, 7, 103, 2, 2, 216, 36, 3, 2, 2, 2, 217, 218, 7, 103, 2, 2, 218, 219, 7, 122, 2, 2, 219, 220, 7, 118, 2, 2, 220, 221, 7, 103, 2, 2, 221, 222, 7, 112, 2, 2, 222, 223, 7, 102, 2, 2, 223, 224, 7, 117, 2, 2, 224, 38, 3, 2, 2, 2, 225, 226, 7, 105, 2, 2, 226, 227, 7, 103, 2, 2, 227, 228, 7, 112, 2, 2, 228, 229, 7, 103, 2, 2, 229, 230, 7, 116, 2, 2, 230, 231, 7, 99, 2, 2, 231, 232, 7, 118, 2, 2, 232, 233, 7, 103, 2, 2, 233, 234, 7, 117, 2, 2, 234, 40, 3, 2, 2, 2, 235, 236, 7, 113, 2, 2, 236, 237, 7, 114, 2, 2, 237, 238, 7, 103, 2, 2, 238, 239, 7, 116, 2, 2, 239, 240, 7, 99, 2, 2, 240, 241, 7, 118, 2, 2, 241, 242, 7, 113, 2, 2, 242, 243, 7, 116, 2, 2, 243, 42, 3, 2, 2, 2, 244, 245, 7, 117, 2, 2, 245, 246, 7, 118, 2, 2, 246, 247, 7, 116, 2, 2, 247, 248, 7, 119, 2, 2, 248, 249, 7, 101, 2, 2, 249, 250, 7, 118, 2, 2, 250, 44, 3, 2, 2, 2, 251, 252, 7, 111, 2, 2, 252, 253, 7, 99, 2, 2, 253, 254, 7, 101, 2, 2, 254, 255, 7, 116, 2, 2, 255, 256, 7, 113, 2, 2, 256, 46, 3, 2, 2, 2, 257, 258, 7, 100, 2, 2, 258, 259, 7, 119, 2, 2, 259, 260, 7, 107, 2, 2, 260, 261, 7, 110, 2, 2, 261, 262, 7, 118, 2, 2, 262, 263, 7, 107, 2, 2, 263, 264, 7, 112, 2, 2, 264, 48, 3, 2, 2, 2, 265, 266, 7, 116, 2, 2, 266, 267, 7, 119, 2, 2, 267, 268, 7, 112, 2, 2, 268, 269, 7, 118, 2, 2, 269, 270, 7, 107, 2, 2, 270, 271, 7, 111, 2, 2, 271, 272, 7, 103, 2, 2, 272, 50, 3, 2, 2, 2, 273, 274, 7, 111, 2, 2, 274, 275, 7, 113, 2, 2, 275, 276, 7, 102, 2, 2, 276, 277, 7, 119, 2, 2, 277, 278, 7, 110, 2, 2, 278, 279, 7, 103, 2, 2, 279, 52, 3, 2, 2, 2, 280, 281, 7, 108, 2, 2, 281, 282, 7, 99, 2, 2, 282, 283, 7, 120, 2, 2, 283, 284, 7, 99, 2, 2, 284, 285, 7, 117, 2, 2, 285, 286, 7, 101, 2, 2, 286, 287, 7, 116, 2, 2, 287, 288, 7, 107, 2, 2, 288, 289, 7, 114, 2, 2, 289, 290, 7, 118, 2, 2, 290, 54, 3, 2, 2, 2, 291, 292, 7, 102, 2, 2, 292, 293, 7, 103, 2, 2, 293, 294, 7, 104, 2, 2, 294, 295, 7, 103, 2, 2, 295, 296, 7, 116, 2, 2, 296, 297, 7, 116, 2, 2, 297, 298, 7, 103, 2, 2, 298, 299, 7, 102, 2, 2, 299, 56, 3, 2, 2, 2, 300, 301, 7, 107, 2, 2, 301, 302, 7, 104, 2, 2, 302, 58, 3, 2, 2, 2, 303, 304, 7, 104, 2, 2, 304, 305, 7, 113, 2, 2, 305, 306, 7, 116, 2, 2, 306, 60, 3, 2, 2, 2, 307, 308, 7, 121, 2, 2, 308, 309, 7, 106, 2, 2, 309, 310, 7, 107, 2, 2, 310, 311, 7, 110, 2, 2, 311, 312, 7, 103, 2, 2, 312, 62, 3, 2, 2, 2, 313, 314, 7, 116, 2, 2, 314, 315, 7, 103, 2, 2, 315, 316, 7, 118, 2, 2, 316, 317, 7, 119, 2, 2, 317, 318, 7, 116, 2, 2, 318, 319, 7, 112, 2, 2, 319, 64, 3, 2, 2, 2, 320, 321, 7, 101, 2, 2, 321, 322, 7, 113, 2, 2, 322, 323, 7, 112, 2, 2, 323, 324, 7, 117, 2, 2, 324, 325, 7, 118, 2, 2, 325, 326, 7, 103, 2, 2, 326, 327, 7, 122, 2, 2, 327, 328, 7, 114, 2, 2, 328, 329, 7, 116, 2, 2, 329, 66, 3, 2, 2, 2, 330, 331, 7, 101, 2, 2, 331, 332, 7, 113, 2, 2, 332, 333, 7, 112, 2, 2, 333, 334, 7, 118, 2, 2, 334, 335, 7, 107, 2, 2, 335, 336, 7, 112, 2, 2, 336, 337, 7, 119, 2, 2, 337, 338, 7, 103, 2, 2, 338, 68, 3, 2, 2, 2, 339, 340, 7, 100, 2, 2, 340, 341, 7, 116, 2, 2, 341, 342, 7, 103, 2, 2, 342, 343, 7, 99, 2, 2, 343, 344, 7, 109, 2, 2, 344, 70, 3, 2, 2, 2, 345, 346, 7, 105, 2, 2, 346, 347, 7, 113, 2, 2, 347, 348, 7, 118, 2, 2, 348, 349, 7, 113, 2, 2, 349, 72, 3, 2, 2, 2, 350, 351, 7, 113, 2, 2, 351, 352, 7, 118, 2, 2, 352, 353, 7, 106, 2, 2, 353, 354, 7, 103, 2, 2, 354, 355, 7, 116, 2, 2, 355, 356, 7, 121, 2, 2, 356, 357, 7, 107, 2, 2, 357, 358, 7, 117, 2, 2, 358, 359, 7, 103, 2, 2, 359, 74, 3, 2, 2, 2, 360, 361, 7, 118, 2, 2, 361, 362, 7, 116, 2, 2, 362, 363, 7, 123, 2, 2, 363, 76, 3, 2, 2, 2, 364, 365, 7, 110, 2, 2, 365, 366, 7, 99, 2, 2, 366, 367, 7, 100, 2, 2, 367, 368, 7, 103, 2, 2, 368, 369, 7, 110, 2, 2, 369, 78, 3, 2, 2, 2, 370, 371, 7, 110, 2, 2, 371, 372, 7, 99, 2, 2, 372, 373, 7, 100, 2, 2, 373, 374, 7, 103, 2, 2, 374, 375, 7, 110, 2, 2, 375, 376, 7, 117, 2, 2, 376, 80, 3, 2, 2, 2, 377, 378, 7, 118, 2, 2, 378, 379, 7, 99, 2, 2, 379, 380, 7, 107, 2, 2, 380, 381, 7, 110, 2, 2, 381, 82, 3, 2, 2, 2, 382, 383, 7, 107, 2, 2, 383, 384, 7, 117, 2, 2, 384, 385, 7, 112, 2, 2, 385, 386, 7, 118, 2, 2, 386, 84, 3, 2, 2, 2, 387, 388, 7, 107, 2, 2, 388, 389, 7, 117, 2, 2, 389, 86, 3, 2, 2, 2, 390, 391, 7, 110, 2, 2, 391, 392, 7, 103, 2, 2, 392, 393, 7, 118, 2, 2, 393, 88, 3, 2, 2, 2, 394, 395, 7, 101, 2, 2, 395, 396, 7, 113, 2, 2, 396, 397, 7, 112, 2, 2, 397, 398, 7, 117, 2, 2, 398, 399, 7, 118, 2, 2, 399, 90, 3, 2, 2, 2, 400, 401, 7, 103, 2, 2, 401, 402, 7, 122, 2, 2, 402, 403, 7, 118, 2, 2, 403, 404, 7, 103, 2, 2, 404, 405, 7, 116, 2, 2, 405, 406, 7, 112, 2, 2, 406, 92, 3, 2, 2, 2, 407, 408, 7, 99, 2, 2, 408, 409, 7, 117, 2, 2, 409, 410, 7, 117, 2, 2, 410, 411, 7, 103, 2, 2, 411, 412, 7, 116, 2, 2, 412, 413, 7, 118, 2, 2, 413, 94, 3, 2, 2, 2, 414, 415, 7, 101, 2, 2, 415, 416, 7, 106, 2, 2, 416, 417, 7, 103, 2, 2, 417, 418, 7, 101, 2, 2, 418, 419, 7, 109, 2, 2, 419, 96, 3, 2, 2, 2, 420, 421, 7, 119, 2, 2, 421, 422, 7, 112, 2, 2, 422, 423, 7, 116, 2, 2, 423, 424, 7, 103, 2, 2, 424, 425, 7, 99, 2, 2, 425, 426, 7, 101, 2, 2, 426, 427, 7, 106, 2, 2, 427, 428, 7, 99, 2, 2, 428, 429, 7, 100, 2, 2, 429, 430, 7, 110, 2, 2, 430, 431, 7, 103, 2, 2, 431, 98, 3, 2, 2, 2, 432, 433, 7, 102, 2, 2, 433, 434, 7, 103, 2, 2, 434, 435, 7, 100, 2, 2, 435, 436, 7, 119, 2, 2, 436, 437, 7, 105, 2, 2, 437, 100, 3, 2, 2, 2, 438, 439, 7, 63, 2, 2, 439, 102, 3, 2, 2, 2, 440, 441, 7, 44, 2, 2, 441, 467, 7, 63, 2, 2, 442, 443, 7, 49, 2, 2, 443, 467, 7, 63, 2, 2, 444, 445, 7, 39, 2, 2, 445, 467, 7, 63, 2, 2, 446, 447, 7, 45, 2, 2, 447, 467, 7, 63, 2, 2, 448, 449, 7, 47, 2, 2, 449, 467, 7, 63, 2, 2, 450, 451, 7, 62, 2, 2, 451, 452, 7, 62, 2, 2, 452, 467, 7, 63, 2, 2, 453, 454, 7, 64, 2, 2, 454, 455, 7, 64, 2, 2, 455, 467, 7, 63, 2, 2, 456, 457, 7, 64, 2, 2, 457, 458, 7, 64, 2, 2, 458, 459, 7, 64, 2, 2, 459, 467, 7, 63, 2, 2, 460, 461, 7, 40, 2, 2, 461, 467, 7, 63, 2, 2, 462, 463, 7, 96, 2, 2, 463, 467, 7, 63, 2, 2, 464, 465, 7, 126, 2, 2, 465, 467, 7, 63, 2, 2, 466, 440, 3, 2, 2, 2, 466, 442, 3, 2, 2, 2, 466, 444, 3, 2, 2, 2, 466, 446, 3, 2, 2, 2, 466, 448, 3, 2, 2, 2, 466, 450, 3, 2, 2, 2, 466, 453, 3, 2, 2, 2, 466, 456, 3, 2, 2, 2, 466, 460, 3, 2, 2, 2, 466, 462, 3, 2, 2, 2, 466, 464, 3, 2, 2, 2, 467, 104, 3, 2, 2, 2, 468, 469, 7, 63, 2, 2, 469, 470, 7, 63, 2, 2, 470, 106, 3, 2, 2, 2, 471, 472, 7, 45, 2, 2, 472, 108, 3, 2, 2, 2, 473, 474, 7, 47, 2, 2, 474, 110, 3, 2, 2, 2, 475, 476, 7, 44, 2, 2, 476, 112, 3, 2, 2, 2, 477, 478, 7, 49, 2, 2, 478, 114, 3, 2, 2, 2, 479, 480, 7, 39, 2, 2, 480, 116, 3, 2, 2, 2, 481, 482, 7, 126, 2, 2, 482, 118, 3, 2, 2, 2, 483, 484, 7, 40, 2, 2, 484, 120, 3, 2, 2, 2, 485, 486, 7, 128, 2, 2, 486, 122, 3, 2, 2, 2, 487, 488, 7, 111, 2, 2, 488, 489, 7, 99, 2, 2, 489, 490, 7, 122, 2, 2, 490, 124, 3, 2, 2, 2, 491, 492, 7, 111, 2, 2, 492, 493, 7, 107, 2, 2, 493, 494, 7, 112, 2, 2, 494, 126, 3, 2, 2, 2, 495, 496, 7, 35, 2, 2, 496, 497, 7, 63, 2, 2, 497, 128, 3, 2, 2, 2, 498, 499, 7, 62, 2, 2, 499, 130, 3, 2, 2, 2, 500, 501, 7, 62, 2, 2, 501, 502, 7, 63, 2, 2, 502, 132, 3, 2, 2, 2, 503, 504, 7, 64, 2, 2, 504, 134, 3, 2, 2, 2, 505, 506, 7, 64, 2, 2, 506, 507, 7, 63, 2, 2, 507, 136, 3, 2, 2, 2, 508, 509, 7, 62, 2, 2, 509, 510, 7, 62, 2, 2, 510, 138, 3, 2, 2, 2, 511, 512, 7, 64, 2, 2, 512, 513, 7, 64, 2, 2, 513, 140, 3, 2, 2, 2, 514, 515, 7, 64, 2, 2, 515, 516, 7, 64, 2, 2, 516, 517, 7, 64, 2, 2, 517, 142, 3, 2, 2, 2, 518, 519, 7, 48, 2, 2, 519, 520, 7, 48, 2, 2, 520, 521, 7, 48, 2, 2, 521, 144, 3, 2, 2, 2, 522, 525, 5, 105, 53, 2, 523, 525, 5, 127, 64, 2, 524, 522, 3, 2, 2, 2, 524, 523, 3, 2, 2, 2, 525, 146, 3, 2, 2, 2, 526, 527, 7, 45, 2, 2, 527, 528, 7, 45, 2, 2, 528, 148, 3, 2, 2, 2, 529, 530, 7, 47, 2, 2, 530, 531, 7, 47, 2, 2, 531, 150, 3, 2, 2, 2, 532, 533, 7, 35, 2, 2, 533, 152, 3, 2, 2, 2, 534, 539, 7, 36, 2, 2, 535, 538, 5, 155, 78, 2, 536, 538, 10, 2, 2, 2, 537, 535, 3, 2, 2, 2, 537, 536, 3, 2, 2, 2, 538, 541, 3, 2, 2, 2, 539, 537, 3, 2, 2, 2, 539, 540, 3, 2, 2, 2, 540, 542, 3, 2, 2, 2, 541, 539, 3, 2, 2, 2, 542, 553, 7, 36, 2, 2, 543, 548, 7, 41, 2, 2, 544, 547, 5, 155, 78, 2, 545, 547, 10, 3, 2, 2, 546, 544, 3, 2, 2, 2, 546, 545, 3, 2, 2, 2, 547, 550, 3, 2, 2, 2, 548, 546, 3, 2, 2, 2, 548, 549, 3, 2, 2, 2, 549, 551, 3, 2, 2, 2, 550, 548, 3, 2, 2, 2, 551, 553, 7, 41, 2, 2, 552, 534, 3, 2, 2, 2, 552, 543, 3, 2, 2, 2, 553, 154, 3, 2, 2, 2, 554, 555, 7, 94, 2, 2, 555, 556, 9, 4, 2, 2, 556, 156, 3, 2, 2, 2, 557, 561, 9, 5, 2, 2, 558, 560, 9, 6, 2, 2, 559, 558, 3, 2, 2, 2, 560, 563, 3, 2, 2, 2, 561, 559, 3, 2, 2, 2, 561, 562, 3, 2, 2, 2, 562, 158, 3, 2, 2, 2, 563, 561, 3, 2, 2, 2, 564, 566, 9, 7, 2, 2, 565, 564, 3, 2, 2, 2, 566, 567, 3, 2, 2, 2, 567, 565, 3, 2, 2, 2, 567, 568, 3, 2, 2, 2, 568, 569, 3, 2, 2, 2, 569, 570, 8, 80, 2, 2, 570, 160, 3, 2, 2, 2, 571, 572, 7, 49, 2, 2, 572, 573, 7, 44, 2, 2, 573, 577, 3, 2, 2, 2, 574, 576, 11, 2, 2, 2, 575, 574, 3, 2, 2, 2, 576, 579, 3, 2, 2, 2, 577, 578, 3, 2, 2, 2, 577, 575, 3, 2, 2, 2, 578, 583, 3, 2, 2, 2, 579, 577, 3, 2, 2, 2, 580, 581, 7, 44, 2, 2, 581, 584, 7, 49, 2, 2, 582, 584, 7, 2, 2, 3, 583, 580, 3, 2, 2, 2, 583, 582, 3, 2, 2, 2, 584, 585, 3, 2, 2, 2, 585, 586, 8, 81, 2, 2, 586, 162, 3, 2, 2, 2, 587, 588, 7, 49, 2, 2, 588, 589, 7, 49, 2, 2, 589, 593, 3, 2, 2, 2, 590, 592, 10, 8, 2, 2, 591, 590, 3, 2, 2, 2, 592, 595, 3, 2, 2, 2, 593, 591, 3, 2, 2, 2, 593, 594, 3, 2, 2, 2, 594, 596, 3, 2, 2, 2, 595, 593, 3, 2, 2, 2, 596, 597, 8, 82, 2, 2, 597, 164, 3, 2, 2, 2, 598, 599, 9, 9, 2, 2, 599, 166, 3, 2, 2, 2, 600, 609, 7, 50, 2, 2, 601, 605, 9, 10, 2, 2, 602, 604, 5, 165, 83, 2, 603, 602, 3, 2, 2, 2, 604, 607, 3, 2, 2, 2, 605, 603, 3, 2, 2, 2, 605, 606, 3, 2, 2, 2, 606, 609, 3, 2, 2, 2, 607, 605, 3, 2, 2, 2, 608, 600, 3, 2, 2, 2, 608, 601, 3, 2, 2, 2, 609, 168, 3, 2, 2, 2, 610, 612, 9, 11, 2, 2, 611, 613, 9, 12, 2, 2, 612, 611, 3, 2, 2, 2, 612, 613, 3, 2, 2, 2, 613, 615, 3, 2, 2, 2, 614, 616, 5, 165, 83, 2, 615, 614, 3, 2, 2, 2, 616, 617, 3, 2, 2, 2, 617, 615, 3, 2, 2, 2, 617, 618, 3, 2, 2, 2, 618, 170, 3, 2, 2, 2, 619, 621, 5, 109, 55, 2, 620, 619, 3, 2, 2, 2, 620, 621, 3, 2, 2, 2, 621, 622, 3, 2, 2, 2, 622, 623, 5, 167, 84, 2, 623, 627, 7, 48, 2, 2, 624, 626, 5, 165, 83, 2, 625, 624, 3, 2, 2, 2, 626, 629, 3, 2, 2, 2, 627, 625, 3, 2, 2, 2, 627, 628, 3, 2, 2, 2, 628, 631, 3, 2, 2, 2, 629, 627, 3, 2, 2, 2, 630, 632, 5, 169, 85, 2, 631, 630, 3, 2, 2, 2, 631, 632, 3, 2, 2, 2, 632, 664, 3, 2, 2, 2, 633, 635, 5, 109, 55, 2, 634, 633, 3, 2, 2, 2, 634, 635, 3, 2, 2, 2, 635, 636, 3, 2, 2, 2, 636, 638, 7, 48, 2, 2, 637, 639, 5, 165, 83, 2, 638, 637, 3, 2, 2, 2, 639, 640, 3, 2, 2, 2, 640, 638, 3, 2, 2, 2, 640, 641, 3, 2, 2, 2, 641, 643, 3, 2, 2, 2, 642, 644, 5, 169, 85, 2, 643, 642, 3, 2, 2, 2, 643, 644, 3, 2, 2, 2, 644, 664, 3, 2, 2, 2, 645, 647, 5, 109, 55, 2, 646, 645, 3, 2, 2, 2, 646, 647, 3, 2, 2, 2, 647, 648, 3, 2, 2, 2, 648, 650, 5, 167, 84, 2, 649, 651, 5, 169, 85, 2, 650, 649, 3, 2, 2, 2, 650, 651, 3, 2, 2, 2, 651, 664, 3, 2, 2, 2, 652, 654, 5, 109, 55, 2, 653, 652, 3, 2, 2, 2, 653, 654, 3, 2, 2, 2, 654, 655, 3, 2, 2, 2, 655, 656, 7, 50, 2, 2, 656, 657, 7, 122, 2, 2, 657, 659, 3, 2, 2, 2, 658, 660, 9, 13, 2, 2, 659, 658, 3, 2, 2, 2, 660, 661, 3, 2, 2, 2, 661, 659, 3, 2, 2, 2, 661, 662, 3, 2, 2, 2, 662, 664, 3, 2, 2, 2, 663, 620, 3, 2, 2, 2, 663, 634, 3, 2, 2, 2, 663, 646, 3, 2, 2, 2, 663, 653, 3, 2, 2, 2, 664, 172, 3, 2, 2, 2, 30, 2, 466, 524, 537, 539, 546, 548, 552, 561, 567, 577, 583, 593, 605, 608, 612, 617, 620, 627, 631, 634, 640, 643, 646, 650, 653, 661, 663, 3, 2, 3, 2] \ No newline at end of file
diff --git a/deps/v8/src/torque/TorqueLexer.tokens b/deps/v8/src/torque/TorqueLexer.tokens
deleted file mode 100644
index 63589b27b7..0000000000
--- a/deps/v8/src/torque/TorqueLexer.tokens
+++ /dev/null
@@ -1,154 +0,0 @@
-T__0=1
-T__1=2
-T__2=3
-T__3=4
-T__4=5
-T__5=6
-T__6=7
-T__7=8
-T__8=9
-T__9=10
-T__10=11
-T__11=12
-T__12=13
-T__13=14
-T__14=15
-T__15=16
-T__16=17
-T__17=18
-T__18=19
-T__19=20
-T__20=21
-MACRO=22
-BUILTIN=23
-RUNTIME=24
-MODULE=25
-JAVASCRIPT=26
-DEFERRED=27
-IF=28
-FOR=29
-WHILE=30
-RETURN=31
-CONSTEXPR=32
-CONTINUE=33
-BREAK=34
-GOTO=35
-OTHERWISE=36
-TRY=37
-LABEL=38
-LABELS=39
-TAIL=40
-ISNT=41
-IS=42
-LET=43
-CONST=44
-EXTERN=45
-ASSERT_TOKEN=46
-CHECK_TOKEN=47
-UNREACHABLE_TOKEN=48
-DEBUG_TOKEN=49
-ASSIGNMENT=50
-ASSIGNMENT_OPERATOR=51
-EQUAL=52
-PLUS=53
-MINUS=54
-MULTIPLY=55
-DIVIDE=56
-MODULO=57
-BIT_OR=58
-BIT_AND=59
-BIT_NOT=60
-MAX=61
-MIN=62
-NOT_EQUAL=63
-LESS_THAN=64
-LESS_THAN_EQUAL=65
-GREATER_THAN=66
-GREATER_THAN_EQUAL=67
-SHIFT_LEFT=68
-SHIFT_RIGHT=69
-SHIFT_RIGHT_ARITHMETIC=70
-VARARGS=71
-EQUALITY_OPERATOR=72
-INCREMENT=73
-DECREMENT=74
-NOT=75
-STRING_LITERAL=76
-IDENTIFIER=77
-WS=78
-BLOCK_COMMENT=79
-LINE_COMMENT=80
-DECIMAL_LITERAL=81
-'('=1
-')'=2
-'=>'=3
-','=4
-':'=5
-'type'=6
-'?'=7
-'||'=8
-'&&'=9
-'.'=10
-'['=11
-']'=12
-'{'=13
-'}'=14
-';'=15
-'of'=16
-'else'=17
-'extends'=18
-'generates'=19
-'operator'=20
-'struct'=21
-'macro'=22
-'builtin'=23
-'runtime'=24
-'module'=25
-'javascript'=26
-'deferred'=27
-'if'=28
-'for'=29
-'while'=30
-'return'=31
-'constexpr'=32
-'continue'=33
-'break'=34
-'goto'=35
-'otherwise'=36
-'try'=37
-'label'=38
-'labels'=39
-'tail'=40
-'isnt'=41
-'is'=42
-'let'=43
-'const'=44
-'extern'=45
-'assert'=46
-'check'=47
-'unreachable'=48
-'debug'=49
-'='=50
-'=='=52
-'+'=53
-'-'=54
-'*'=55
-'/'=56
-'%'=57
-'|'=58
-'&'=59
-'~'=60
-'max'=61
-'min'=62
-'!='=63
-'<'=64
-'<='=65
-'>'=66
-'>='=67
-'<<'=68
-'>>'=69
-'>>>'=70
-'...'=71
-'++'=73
-'--'=74
-'!'=75
diff --git a/deps/v8/src/torque/TorqueListener.h b/deps/v8/src/torque/TorqueListener.h
deleted file mode 100644
index 937ed606ba..0000000000
--- a/deps/v8/src/torque/TorqueListener.h
+++ /dev/null
@@ -1,351 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-#ifndef V8_TORQUE_TORQUELISTENER_H_
-#define V8_TORQUE_TORQUELISTENER_H_
-
-// Generated from Torque.g4 by ANTLR 4.7.1
-
-#pragma once
-
-#include "./antlr4-runtime.h"
-#include "TorqueParser.h"
-
-/**
- * This interface defines an abstract listener for a parse tree produced by
- * TorqueParser.
- */
-class TorqueListener : public antlr4::tree::ParseTreeListener {
- public:
- virtual void enterType(TorqueParser::TypeContext* ctx) = 0;
- virtual void exitType(TorqueParser::TypeContext* ctx) = 0;
-
- virtual void enterTypeList(TorqueParser::TypeListContext* ctx) = 0;
- virtual void exitTypeList(TorqueParser::TypeListContext* ctx) = 0;
-
- virtual void enterGenericSpecializationTypeList(
- TorqueParser::GenericSpecializationTypeListContext* ctx) = 0;
- virtual void exitGenericSpecializationTypeList(
- TorqueParser::GenericSpecializationTypeListContext* ctx) = 0;
-
- virtual void enterOptionalGenericTypeList(
- TorqueParser::OptionalGenericTypeListContext* ctx) = 0;
- virtual void exitOptionalGenericTypeList(
- TorqueParser::OptionalGenericTypeListContext* ctx) = 0;
-
- virtual void enterTypeListMaybeVarArgs(
- TorqueParser::TypeListMaybeVarArgsContext* ctx) = 0;
- virtual void exitTypeListMaybeVarArgs(
- TorqueParser::TypeListMaybeVarArgsContext* ctx) = 0;
-
- virtual void enterLabelParameter(
- TorqueParser::LabelParameterContext* ctx) = 0;
- virtual void exitLabelParameter(TorqueParser::LabelParameterContext* ctx) = 0;
-
- virtual void enterOptionalType(TorqueParser::OptionalTypeContext* ctx) = 0;
- virtual void exitOptionalType(TorqueParser::OptionalTypeContext* ctx) = 0;
-
- virtual void enterOptionalLabelList(
- TorqueParser::OptionalLabelListContext* ctx) = 0;
- virtual void exitOptionalLabelList(
- TorqueParser::OptionalLabelListContext* ctx) = 0;
-
- virtual void enterOptionalOtherwise(
- TorqueParser::OptionalOtherwiseContext* ctx) = 0;
- virtual void exitOptionalOtherwise(
- TorqueParser::OptionalOtherwiseContext* ctx) = 0;
-
- virtual void enterParameter(TorqueParser::ParameterContext* ctx) = 0;
- virtual void exitParameter(TorqueParser::ParameterContext* ctx) = 0;
-
- virtual void enterParameterList(TorqueParser::ParameterListContext* ctx) = 0;
- virtual void exitParameterList(TorqueParser::ParameterListContext* ctx) = 0;
-
- virtual void enterLabelDeclaration(
- TorqueParser::LabelDeclarationContext* ctx) = 0;
- virtual void exitLabelDeclaration(
- TorqueParser::LabelDeclarationContext* ctx) = 0;
-
- virtual void enterExpression(TorqueParser::ExpressionContext* ctx) = 0;
- virtual void exitExpression(TorqueParser::ExpressionContext* ctx) = 0;
-
- virtual void enterConditionalExpression(
- TorqueParser::ConditionalExpressionContext* ctx) = 0;
- virtual void exitConditionalExpression(
- TorqueParser::ConditionalExpressionContext* ctx) = 0;
-
- virtual void enterLogicalORExpression(
- TorqueParser::LogicalORExpressionContext* ctx) = 0;
- virtual void exitLogicalORExpression(
- TorqueParser::LogicalORExpressionContext* ctx) = 0;
-
- virtual void enterLogicalANDExpression(
- TorqueParser::LogicalANDExpressionContext* ctx) = 0;
- virtual void exitLogicalANDExpression(
- TorqueParser::LogicalANDExpressionContext* ctx) = 0;
-
- virtual void enterBitwiseExpression(
- TorqueParser::BitwiseExpressionContext* ctx) = 0;
- virtual void exitBitwiseExpression(
- TorqueParser::BitwiseExpressionContext* ctx) = 0;
-
- virtual void enterEqualityExpression(
- TorqueParser::EqualityExpressionContext* ctx) = 0;
- virtual void exitEqualityExpression(
- TorqueParser::EqualityExpressionContext* ctx) = 0;
-
- virtual void enterRelationalExpression(
- TorqueParser::RelationalExpressionContext* ctx) = 0;
- virtual void exitRelationalExpression(
- TorqueParser::RelationalExpressionContext* ctx) = 0;
-
- virtual void enterShiftExpression(
- TorqueParser::ShiftExpressionContext* ctx) = 0;
- virtual void exitShiftExpression(
- TorqueParser::ShiftExpressionContext* ctx) = 0;
-
- virtual void enterAdditiveExpression(
- TorqueParser::AdditiveExpressionContext* ctx) = 0;
- virtual void exitAdditiveExpression(
- TorqueParser::AdditiveExpressionContext* ctx) = 0;
-
- virtual void enterMultiplicativeExpression(
- TorqueParser::MultiplicativeExpressionContext* ctx) = 0;
- virtual void exitMultiplicativeExpression(
- TorqueParser::MultiplicativeExpressionContext* ctx) = 0;
-
- virtual void enterUnaryExpression(
- TorqueParser::UnaryExpressionContext* ctx) = 0;
- virtual void exitUnaryExpression(
- TorqueParser::UnaryExpressionContext* ctx) = 0;
-
- virtual void enterLocationExpression(
- TorqueParser::LocationExpressionContext* ctx) = 0;
- virtual void exitLocationExpression(
- TorqueParser::LocationExpressionContext* ctx) = 0;
-
- virtual void enterIncrementDecrement(
- TorqueParser::IncrementDecrementContext* ctx) = 0;
- virtual void exitIncrementDecrement(
- TorqueParser::IncrementDecrementContext* ctx) = 0;
-
- virtual void enterAssignment(TorqueParser::AssignmentContext* ctx) = 0;
- virtual void exitAssignment(TorqueParser::AssignmentContext* ctx) = 0;
-
- virtual void enterAssignmentExpression(
- TorqueParser::AssignmentExpressionContext* ctx) = 0;
- virtual void exitAssignmentExpression(
- TorqueParser::AssignmentExpressionContext* ctx) = 0;
-
- virtual void enterStructExpression(
- TorqueParser::StructExpressionContext* ctx) = 0;
- virtual void exitStructExpression(
- TorqueParser::StructExpressionContext* ctx) = 0;
-
- virtual void enterFunctionPointerExpression(
- TorqueParser::FunctionPointerExpressionContext* ctx) = 0;
- virtual void exitFunctionPointerExpression(
- TorqueParser::FunctionPointerExpressionContext* ctx) = 0;
-
- virtual void enterPrimaryExpression(
- TorqueParser::PrimaryExpressionContext* ctx) = 0;
- virtual void exitPrimaryExpression(
- TorqueParser::PrimaryExpressionContext* ctx) = 0;
-
- virtual void enterForInitialization(
- TorqueParser::ForInitializationContext* ctx) = 0;
- virtual void exitForInitialization(
- TorqueParser::ForInitializationContext* ctx) = 0;
-
- virtual void enterForLoop(TorqueParser::ForLoopContext* ctx) = 0;
- virtual void exitForLoop(TorqueParser::ForLoopContext* ctx) = 0;
-
- virtual void enterRangeSpecifier(
- TorqueParser::RangeSpecifierContext* ctx) = 0;
- virtual void exitRangeSpecifier(TorqueParser::RangeSpecifierContext* ctx) = 0;
-
- virtual void enterForOfRange(TorqueParser::ForOfRangeContext* ctx) = 0;
- virtual void exitForOfRange(TorqueParser::ForOfRangeContext* ctx) = 0;
-
- virtual void enterForOfLoop(TorqueParser::ForOfLoopContext* ctx) = 0;
- virtual void exitForOfLoop(TorqueParser::ForOfLoopContext* ctx) = 0;
-
- virtual void enterArgument(TorqueParser::ArgumentContext* ctx) = 0;
- virtual void exitArgument(TorqueParser::ArgumentContext* ctx) = 0;
-
- virtual void enterArgumentList(TorqueParser::ArgumentListContext* ctx) = 0;
- virtual void exitArgumentList(TorqueParser::ArgumentListContext* ctx) = 0;
-
- virtual void enterHelperCall(TorqueParser::HelperCallContext* ctx) = 0;
- virtual void exitHelperCall(TorqueParser::HelperCallContext* ctx) = 0;
-
- virtual void enterLabelReference(
- TorqueParser::LabelReferenceContext* ctx) = 0;
- virtual void exitLabelReference(TorqueParser::LabelReferenceContext* ctx) = 0;
-
- virtual void enterVariableDeclaration(
- TorqueParser::VariableDeclarationContext* ctx) = 0;
- virtual void exitVariableDeclaration(
- TorqueParser::VariableDeclarationContext* ctx) = 0;
-
- virtual void enterVariableDeclarationWithInitialization(
- TorqueParser::VariableDeclarationWithInitializationContext* ctx) = 0;
- virtual void exitVariableDeclarationWithInitialization(
- TorqueParser::VariableDeclarationWithInitializationContext* ctx) = 0;
-
- virtual void enterHelperCallStatement(
- TorqueParser::HelperCallStatementContext* ctx) = 0;
- virtual void exitHelperCallStatement(
- TorqueParser::HelperCallStatementContext* ctx) = 0;
-
- virtual void enterExpressionStatement(
- TorqueParser::ExpressionStatementContext* ctx) = 0;
- virtual void exitExpressionStatement(
- TorqueParser::ExpressionStatementContext* ctx) = 0;
-
- virtual void enterIfStatement(TorqueParser::IfStatementContext* ctx) = 0;
- virtual void exitIfStatement(TorqueParser::IfStatementContext* ctx) = 0;
-
- virtual void enterWhileLoop(TorqueParser::WhileLoopContext* ctx) = 0;
- virtual void exitWhileLoop(TorqueParser::WhileLoopContext* ctx) = 0;
-
- virtual void enterReturnStatement(
- TorqueParser::ReturnStatementContext* ctx) = 0;
- virtual void exitReturnStatement(
- TorqueParser::ReturnStatementContext* ctx) = 0;
-
- virtual void enterBreakStatement(
- TorqueParser::BreakStatementContext* ctx) = 0;
- virtual void exitBreakStatement(TorqueParser::BreakStatementContext* ctx) = 0;
-
- virtual void enterContinueStatement(
- TorqueParser::ContinueStatementContext* ctx) = 0;
- virtual void exitContinueStatement(
- TorqueParser::ContinueStatementContext* ctx) = 0;
-
- virtual void enterGotoStatement(TorqueParser::GotoStatementContext* ctx) = 0;
- virtual void exitGotoStatement(TorqueParser::GotoStatementContext* ctx) = 0;
-
- virtual void enterHandlerWithStatement(
- TorqueParser::HandlerWithStatementContext* ctx) = 0;
- virtual void exitHandlerWithStatement(
- TorqueParser::HandlerWithStatementContext* ctx) = 0;
-
- virtual void enterTryLabelStatement(
- TorqueParser::TryLabelStatementContext* ctx) = 0;
- virtual void exitTryLabelStatement(
- TorqueParser::TryLabelStatementContext* ctx) = 0;
-
- virtual void enterDiagnosticStatement(
- TorqueParser::DiagnosticStatementContext* ctx) = 0;
- virtual void exitDiagnosticStatement(
- TorqueParser::DiagnosticStatementContext* ctx) = 0;
-
- virtual void enterStatement(TorqueParser::StatementContext* ctx) = 0;
- virtual void exitStatement(TorqueParser::StatementContext* ctx) = 0;
-
- virtual void enterStatementList(TorqueParser::StatementListContext* ctx) = 0;
- virtual void exitStatementList(TorqueParser::StatementListContext* ctx) = 0;
-
- virtual void enterStatementScope(
- TorqueParser::StatementScopeContext* ctx) = 0;
- virtual void exitStatementScope(TorqueParser::StatementScopeContext* ctx) = 0;
-
- virtual void enterStatementBlock(
- TorqueParser::StatementBlockContext* ctx) = 0;
- virtual void exitStatementBlock(TorqueParser::StatementBlockContext* ctx) = 0;
-
- virtual void enterHelperBody(TorqueParser::HelperBodyContext* ctx) = 0;
- virtual void exitHelperBody(TorqueParser::HelperBodyContext* ctx) = 0;
-
- virtual void enterFieldDeclaration(
- TorqueParser::FieldDeclarationContext* ctx) = 0;
- virtual void exitFieldDeclaration(
- TorqueParser::FieldDeclarationContext* ctx) = 0;
-
- virtual void enterFieldListDeclaration(
- TorqueParser::FieldListDeclarationContext* ctx) = 0;
- virtual void exitFieldListDeclaration(
- TorqueParser::FieldListDeclarationContext* ctx) = 0;
-
- virtual void enterExtendsDeclaration(
- TorqueParser::ExtendsDeclarationContext* ctx) = 0;
- virtual void exitExtendsDeclaration(
- TorqueParser::ExtendsDeclarationContext* ctx) = 0;
-
- virtual void enterGeneratesDeclaration(
- TorqueParser::GeneratesDeclarationContext* ctx) = 0;
- virtual void exitGeneratesDeclaration(
- TorqueParser::GeneratesDeclarationContext* ctx) = 0;
-
- virtual void enterConstexprDeclaration(
- TorqueParser::ConstexprDeclarationContext* ctx) = 0;
- virtual void exitConstexprDeclaration(
- TorqueParser::ConstexprDeclarationContext* ctx) = 0;
-
- virtual void enterTypeDeclaration(
- TorqueParser::TypeDeclarationContext* ctx) = 0;
- virtual void exitTypeDeclaration(
- TorqueParser::TypeDeclarationContext* ctx) = 0;
-
- virtual void enterTypeAliasDeclaration(
- TorqueParser::TypeAliasDeclarationContext* ctx) = 0;
- virtual void exitTypeAliasDeclaration(
- TorqueParser::TypeAliasDeclarationContext* ctx) = 0;
-
- virtual void enterExternalBuiltin(
- TorqueParser::ExternalBuiltinContext* ctx) = 0;
- virtual void exitExternalBuiltin(
- TorqueParser::ExternalBuiltinContext* ctx) = 0;
-
- virtual void enterExternalMacro(TorqueParser::ExternalMacroContext* ctx) = 0;
- virtual void exitExternalMacro(TorqueParser::ExternalMacroContext* ctx) = 0;
-
- virtual void enterExternalRuntime(
- TorqueParser::ExternalRuntimeContext* ctx) = 0;
- virtual void exitExternalRuntime(
- TorqueParser::ExternalRuntimeContext* ctx) = 0;
-
- virtual void enterBuiltinDeclaration(
- TorqueParser::BuiltinDeclarationContext* ctx) = 0;
- virtual void exitBuiltinDeclaration(
- TorqueParser::BuiltinDeclarationContext* ctx) = 0;
-
- virtual void enterGenericSpecialization(
- TorqueParser::GenericSpecializationContext* ctx) = 0;
- virtual void exitGenericSpecialization(
- TorqueParser::GenericSpecializationContext* ctx) = 0;
-
- virtual void enterMacroDeclaration(
- TorqueParser::MacroDeclarationContext* ctx) = 0;
- virtual void exitMacroDeclaration(
- TorqueParser::MacroDeclarationContext* ctx) = 0;
-
- virtual void enterExternConstDeclaration(
- TorqueParser::ExternConstDeclarationContext* ctx) = 0;
- virtual void exitExternConstDeclaration(
- TorqueParser::ExternConstDeclarationContext* ctx) = 0;
-
- virtual void enterConstDeclaration(
- TorqueParser::ConstDeclarationContext* ctx) = 0;
- virtual void exitConstDeclaration(
- TorqueParser::ConstDeclarationContext* ctx) = 0;
-
- virtual void enterStructDeclaration(
- TorqueParser::StructDeclarationContext* ctx) = 0;
- virtual void exitStructDeclaration(
- TorqueParser::StructDeclarationContext* ctx) = 0;
-
- virtual void enterDeclaration(TorqueParser::DeclarationContext* ctx) = 0;
- virtual void exitDeclaration(TorqueParser::DeclarationContext* ctx) = 0;
-
- virtual void enterModuleDeclaration(
- TorqueParser::ModuleDeclarationContext* ctx) = 0;
- virtual void exitModuleDeclaration(
- TorqueParser::ModuleDeclarationContext* ctx) = 0;
-
- virtual void enterFile(TorqueParser::FileContext* ctx) = 0;
- virtual void exitFile(TorqueParser::FileContext* ctx) = 0;
-};
-
-#endif // V8_TORQUE_TORQUELISTENER_H_
diff --git a/deps/v8/src/torque/TorqueParser.cpp b/deps/v8/src/torque/TorqueParser.cpp
deleted file mode 100644
index 24548073a1..0000000000
--- a/deps/v8/src/torque/TorqueParser.cpp
+++ /dev/null
@@ -1,8370 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Generated from Torque.g4 by ANTLR 4.7.1
-
-#include "TorqueListener.h"
-#include "TorqueVisitor.h"
-
-#include "TorqueParser.h"
-
-using namespace antlrcpp;
-using namespace antlr4;
-
-TorqueParser::TorqueParser(TokenStream* input) : Parser(input) {
- _interpreter = new atn::ParserATNSimulator(this, _atn, _decisionToDFA,
- _sharedContextCache);
-}
-
-TorqueParser::~TorqueParser() { delete _interpreter; }
-
-std::string TorqueParser::getGrammarFileName() const { return "Torque.g4"; }
-
-const std::vector<std::string>& TorqueParser::getRuleNames() const {
- return _ruleNames;
-}
-
-dfa::Vocabulary& TorqueParser::getVocabulary() const { return _vocabulary; }
-
-//----------------- TypeContext
-//------------------------------------------------------------------
-
-TorqueParser::TypeContext::TypeContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::TypeContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-tree::TerminalNode* TorqueParser::TypeContext::CONSTEXPR() {
- return getToken(TorqueParser::CONSTEXPR, 0);
-}
-
-tree::TerminalNode* TorqueParser::TypeContext::BUILTIN() {
- return getToken(TorqueParser::BUILTIN, 0);
-}
-
-TorqueParser::TypeListContext* TorqueParser::TypeContext::typeList() {
- return getRuleContext<TorqueParser::TypeListContext>(0);
-}
-
-std::vector<TorqueParser::TypeContext*> TorqueParser::TypeContext::type() {
- return getRuleContexts<TorqueParser::TypeContext>();
-}
-
-TorqueParser::TypeContext* TorqueParser::TypeContext::type(size_t i) {
- return getRuleContext<TorqueParser::TypeContext>(i);
-}
-
-tree::TerminalNode* TorqueParser::TypeContext::BIT_OR() {
- return getToken(TorqueParser::BIT_OR, 0);
-}
-
-size_t TorqueParser::TypeContext::getRuleIndex() const {
- return TorqueParser::RuleType;
-}
-
-void TorqueParser::TypeContext::enterRule(tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterType(this);
-}
-
-void TorqueParser::TypeContext::exitRule(tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitType(this);
-}
-
-antlrcpp::Any TorqueParser::TypeContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitType(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::TypeContext* TorqueParser::type() { return type(0); }
-
-TorqueParser::TypeContext* TorqueParser::type(int precedence) {
- ParserRuleContext* parentContext = _ctx;
- size_t parentState = getState();
- TorqueParser::TypeContext* _localctx =
- _tracker.createInstance<TypeContext>(_ctx, parentState);
- TorqueParser::TypeContext* previousContext = _localctx;
- size_t startState = 0;
- enterRecursionRule(_localctx, 0, TorqueParser::RuleType, precedence);
-
- size_t _la = 0;
-
- auto onExit = finally([=] { unrollRecursionContexts(parentContext); });
- try {
- size_t alt;
- enterOuterAlt(_localctx, 1);
- setState(168);
- _errHandler->sync(this);
- switch (_input->LA(1)) {
- case TorqueParser::CONSTEXPR:
- case TorqueParser::IDENTIFIER: {
- setState(154);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::CONSTEXPR) {
- setState(153);
- match(TorqueParser::CONSTEXPR);
- }
- setState(156);
- match(TorqueParser::IDENTIFIER);
- break;
- }
-
- case TorqueParser::BUILTIN: {
- setState(157);
- match(TorqueParser::BUILTIN);
- setState(158);
- match(TorqueParser::T__0);
- setState(159);
- typeList();
- setState(160);
- match(TorqueParser::T__1);
- setState(161);
- match(TorqueParser::T__2);
- setState(162);
- type(3);
- break;
- }
-
- case TorqueParser::T__0: {
- setState(164);
- match(TorqueParser::T__0);
- setState(165);
- type(0);
- setState(166);
- match(TorqueParser::T__1);
- break;
- }
-
- default:
- throw NoViableAltException(this);
- }
- _ctx->stop = _input->LT(-1);
- setState(175);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 2,
- _ctx);
- while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
- if (alt == 1) {
- if (!_parseListeners.empty()) triggerExitRuleEvent();
- previousContext = _localctx;
- _localctx =
- _tracker.createInstance<TypeContext>(parentContext, parentState);
- pushNewRecursionContext(_localctx, startState, RuleType);
- setState(170);
-
- if (!(precpred(_ctx, 2)))
- throw FailedPredicateException(this, "precpred(_ctx, 2)");
- setState(171);
- match(TorqueParser::BIT_OR);
- setState(172);
- type(3);
- }
- setState(177);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input,
- 2, _ctx);
- }
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
- return _localctx;
-}
-
-//----------------- TypeListContext
-//------------------------------------------------------------------
-
-TorqueParser::TypeListContext::TypeListContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-std::vector<TorqueParser::TypeContext*> TorqueParser::TypeListContext::type() {
- return getRuleContexts<TorqueParser::TypeContext>();
-}
-
-TorqueParser::TypeContext* TorqueParser::TypeListContext::type(size_t i) {
- return getRuleContext<TorqueParser::TypeContext>(i);
-}
-
-size_t TorqueParser::TypeListContext::getRuleIndex() const {
- return TorqueParser::RuleTypeList;
-}
-
-void TorqueParser::TypeListContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterTypeList(this);
-}
-
-void TorqueParser::TypeListContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitTypeList(this);
-}
-
-antlrcpp::Any TorqueParser::TypeListContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitTypeList(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::TypeListContext* TorqueParser::typeList() {
- TypeListContext* _localctx =
- _tracker.createInstance<TypeListContext>(_ctx, getState());
- enterRule(_localctx, 2, TorqueParser::RuleTypeList);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(186);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if ((((_la & ~0x3fULL) == 0) &&
- ((1ULL << _la) &
- ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::BUILTIN) |
- (1ULL << TorqueParser::CONSTEXPR))) != 0) ||
- _la == TorqueParser::IDENTIFIER) {
- setState(178);
- type(0);
- setState(183);
- _errHandler->sync(this);
- _la = _input->LA(1);
- while (_la == TorqueParser::T__3) {
- setState(179);
- match(TorqueParser::T__3);
- setState(180);
- type(0);
- setState(185);
- _errHandler->sync(this);
- _la = _input->LA(1);
- }
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- GenericSpecializationTypeListContext
-//------------------------------------------------------------------
-
-TorqueParser::GenericSpecializationTypeListContext::
- GenericSpecializationTypeListContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::TypeListContext*
-TorqueParser::GenericSpecializationTypeListContext::typeList() {
- return getRuleContext<TorqueParser::TypeListContext>(0);
-}
-
-size_t TorqueParser::GenericSpecializationTypeListContext::getRuleIndex()
- const {
- return TorqueParser::RuleGenericSpecializationTypeList;
-}
-
-void TorqueParser::GenericSpecializationTypeListContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->enterGenericSpecializationTypeList(this);
-}
-
-void TorqueParser::GenericSpecializationTypeListContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->exitGenericSpecializationTypeList(this);
-}
-
-antlrcpp::Any TorqueParser::GenericSpecializationTypeListContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitGenericSpecializationTypeList(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::GenericSpecializationTypeListContext*
-TorqueParser::genericSpecializationTypeList() {
- GenericSpecializationTypeListContext* _localctx =
- _tracker.createInstance<GenericSpecializationTypeListContext>(_ctx,
- getState());
- enterRule(_localctx, 4, TorqueParser::RuleGenericSpecializationTypeList);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(188);
- match(TorqueParser::LESS_THAN);
- setState(189);
- typeList();
- setState(190);
- match(TorqueParser::GREATER_THAN);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- OptionalGenericTypeListContext
-//------------------------------------------------------------------
-
-TorqueParser::OptionalGenericTypeListContext::OptionalGenericTypeListContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-std::vector<tree::TerminalNode*>
-TorqueParser::OptionalGenericTypeListContext::IDENTIFIER() {
- return getTokens(TorqueParser::IDENTIFIER);
-}
-
-tree::TerminalNode* TorqueParser::OptionalGenericTypeListContext::IDENTIFIER(
- size_t i) {
- return getToken(TorqueParser::IDENTIFIER, i);
-}
-
-size_t TorqueParser::OptionalGenericTypeListContext::getRuleIndex() const {
- return TorqueParser::RuleOptionalGenericTypeList;
-}
-
-void TorqueParser::OptionalGenericTypeListContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->enterOptionalGenericTypeList(this);
-}
-
-void TorqueParser::OptionalGenericTypeListContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->exitOptionalGenericTypeList(this);
-}
-
-antlrcpp::Any TorqueParser::OptionalGenericTypeListContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitOptionalGenericTypeList(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::OptionalGenericTypeListContext*
-TorqueParser::optionalGenericTypeList() {
- OptionalGenericTypeListContext* _localctx =
- _tracker.createInstance<OptionalGenericTypeListContext>(_ctx, getState());
- enterRule(_localctx, 6, TorqueParser::RuleOptionalGenericTypeList);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(206);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::LESS_THAN) {
- setState(192);
- match(TorqueParser::LESS_THAN);
- setState(193);
- match(TorqueParser::IDENTIFIER);
- setState(194);
- match(TorqueParser::T__4);
- setState(195);
- match(TorqueParser::T__5);
- setState(202);
- _errHandler->sync(this);
- _la = _input->LA(1);
- while (_la == TorqueParser::T__3) {
- setState(196);
- match(TorqueParser::T__3);
- setState(197);
- match(TorqueParser::IDENTIFIER);
- setState(198);
- match(TorqueParser::T__4);
- setState(199);
- match(TorqueParser::T__5);
- setState(204);
- _errHandler->sync(this);
- _la = _input->LA(1);
- }
- setState(205);
- match(TorqueParser::GREATER_THAN);
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- TypeListMaybeVarArgsContext
-//------------------------------------------------------------------
-
-TorqueParser::TypeListMaybeVarArgsContext::TypeListMaybeVarArgsContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-std::vector<TorqueParser::TypeContext*>
-TorqueParser::TypeListMaybeVarArgsContext::type() {
- return getRuleContexts<TorqueParser::TypeContext>();
-}
-
-TorqueParser::TypeContext* TorqueParser::TypeListMaybeVarArgsContext::type(
- size_t i) {
- return getRuleContext<TorqueParser::TypeContext>(i);
-}
-
-tree::TerminalNode* TorqueParser::TypeListMaybeVarArgsContext::VARARGS() {
- return getToken(TorqueParser::VARARGS, 0);
-}
-
-size_t TorqueParser::TypeListMaybeVarArgsContext::getRuleIndex() const {
- return TorqueParser::RuleTypeListMaybeVarArgs;
-}
-
-void TorqueParser::TypeListMaybeVarArgsContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->enterTypeListMaybeVarArgs(this);
-}
-
-void TorqueParser::TypeListMaybeVarArgsContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitTypeListMaybeVarArgs(this);
-}
-
-antlrcpp::Any TorqueParser::TypeListMaybeVarArgsContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitTypeListMaybeVarArgs(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::TypeListMaybeVarArgsContext*
-TorqueParser::typeListMaybeVarArgs() {
- TypeListMaybeVarArgsContext* _localctx =
- _tracker.createInstance<TypeListMaybeVarArgsContext>(_ctx, getState());
- enterRule(_localctx, 8, TorqueParser::RuleTypeListMaybeVarArgs);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- size_t alt;
- setState(227);
- _errHandler->sync(this);
- switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 10, _ctx)) {
- case 1: {
- enterOuterAlt(_localctx, 1);
- setState(208);
- match(TorqueParser::T__0);
- setState(210);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if ((((_la & ~0x3fULL) == 0) &&
- ((1ULL << _la) &
- ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::BUILTIN) |
- (1ULL << TorqueParser::CONSTEXPR))) != 0) ||
- _la == TorqueParser::IDENTIFIER) {
- setState(209);
- type(0);
- }
- setState(216);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 8, _ctx);
- while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
- if (alt == 1) {
- setState(212);
- match(TorqueParser::T__3);
- setState(213);
- type(0);
- }
- setState(218);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 8, _ctx);
- }
- setState(221);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::T__3) {
- setState(219);
- match(TorqueParser::T__3);
- setState(220);
- match(TorqueParser::VARARGS);
- }
- setState(223);
- match(TorqueParser::T__1);
- break;
- }
-
- case 2: {
- enterOuterAlt(_localctx, 2);
- setState(224);
- match(TorqueParser::T__0);
- setState(225);
- match(TorqueParser::VARARGS);
- setState(226);
- match(TorqueParser::T__1);
- break;
- }
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- LabelParameterContext
-//------------------------------------------------------------------
-
-TorqueParser::LabelParameterContext::LabelParameterContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::LabelParameterContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::TypeListContext* TorqueParser::LabelParameterContext::typeList() {
- return getRuleContext<TorqueParser::TypeListContext>(0);
-}
-
-size_t TorqueParser::LabelParameterContext::getRuleIndex() const {
- return TorqueParser::RuleLabelParameter;
-}
-
-void TorqueParser::LabelParameterContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterLabelParameter(this);
-}
-
-void TorqueParser::LabelParameterContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitLabelParameter(this);
-}
-
-antlrcpp::Any TorqueParser::LabelParameterContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitLabelParameter(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::LabelParameterContext* TorqueParser::labelParameter() {
- LabelParameterContext* _localctx =
- _tracker.createInstance<LabelParameterContext>(_ctx, getState());
- enterRule(_localctx, 10, TorqueParser::RuleLabelParameter);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(229);
- match(TorqueParser::IDENTIFIER);
- setState(234);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::T__0) {
- setState(230);
- match(TorqueParser::T__0);
- setState(231);
- typeList();
- setState(232);
- match(TorqueParser::T__1);
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- OptionalTypeContext
-//------------------------------------------------------------------
-
-TorqueParser::OptionalTypeContext::OptionalTypeContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::TypeContext* TorqueParser::OptionalTypeContext::type() {
- return getRuleContext<TorqueParser::TypeContext>(0);
-}
-
-size_t TorqueParser::OptionalTypeContext::getRuleIndex() const {
- return TorqueParser::RuleOptionalType;
-}
-
-void TorqueParser::OptionalTypeContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterOptionalType(this);
-}
-
-void TorqueParser::OptionalTypeContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitOptionalType(this);
-}
-
-antlrcpp::Any TorqueParser::OptionalTypeContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitOptionalType(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::OptionalTypeContext* TorqueParser::optionalType() {
- OptionalTypeContext* _localctx =
- _tracker.createInstance<OptionalTypeContext>(_ctx, getState());
- enterRule(_localctx, 12, TorqueParser::RuleOptionalType);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(238);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::T__4) {
- setState(236);
- match(TorqueParser::T__4);
- setState(237);
- type(0);
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- OptionalLabelListContext
-//------------------------------------------------------------------
-
-TorqueParser::OptionalLabelListContext::OptionalLabelListContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::OptionalLabelListContext::LABELS() {
- return getToken(TorqueParser::LABELS, 0);
-}
-
-std::vector<TorqueParser::LabelParameterContext*>
-TorqueParser::OptionalLabelListContext::labelParameter() {
- return getRuleContexts<TorqueParser::LabelParameterContext>();
-}
-
-TorqueParser::LabelParameterContext*
-TorqueParser::OptionalLabelListContext::labelParameter(size_t i) {
- return getRuleContext<TorqueParser::LabelParameterContext>(i);
-}
-
-size_t TorqueParser::OptionalLabelListContext::getRuleIndex() const {
- return TorqueParser::RuleOptionalLabelList;
-}
-
-void TorqueParser::OptionalLabelListContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterOptionalLabelList(this);
-}
-
-void TorqueParser::OptionalLabelListContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitOptionalLabelList(this);
-}
-
-antlrcpp::Any TorqueParser::OptionalLabelListContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitOptionalLabelList(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::OptionalLabelListContext* TorqueParser::optionalLabelList() {
- OptionalLabelListContext* _localctx =
- _tracker.createInstance<OptionalLabelListContext>(_ctx, getState());
- enterRule(_localctx, 14, TorqueParser::RuleOptionalLabelList);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(249);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::LABELS) {
- setState(240);
- match(TorqueParser::LABELS);
- setState(241);
- labelParameter();
- setState(246);
- _errHandler->sync(this);
- _la = _input->LA(1);
- while (_la == TorqueParser::T__3) {
- setState(242);
- match(TorqueParser::T__3);
- setState(243);
- labelParameter();
- setState(248);
- _errHandler->sync(this);
- _la = _input->LA(1);
- }
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- OptionalOtherwiseContext
-//------------------------------------------------------------------
-
-TorqueParser::OptionalOtherwiseContext::OptionalOtherwiseContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::OptionalOtherwiseContext::OTHERWISE() {
- return getToken(TorqueParser::OTHERWISE, 0);
-}
-
-std::vector<tree::TerminalNode*>
-TorqueParser::OptionalOtherwiseContext::IDENTIFIER() {
- return getTokens(TorqueParser::IDENTIFIER);
-}
-
-tree::TerminalNode* TorqueParser::OptionalOtherwiseContext::IDENTIFIER(
- size_t i) {
- return getToken(TorqueParser::IDENTIFIER, i);
-}
-
-size_t TorqueParser::OptionalOtherwiseContext::getRuleIndex() const {
- return TorqueParser::RuleOptionalOtherwise;
-}
-
-void TorqueParser::OptionalOtherwiseContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterOptionalOtherwise(this);
-}
-
-void TorqueParser::OptionalOtherwiseContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitOptionalOtherwise(this);
-}
-
-antlrcpp::Any TorqueParser::OptionalOtherwiseContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitOptionalOtherwise(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::OptionalOtherwiseContext* TorqueParser::optionalOtherwise() {
- OptionalOtherwiseContext* _localctx =
- _tracker.createInstance<OptionalOtherwiseContext>(_ctx, getState());
- enterRule(_localctx, 16, TorqueParser::RuleOptionalOtherwise);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- size_t alt;
- enterOuterAlt(_localctx, 1);
- setState(260);
- _errHandler->sync(this);
-
- switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 16, _ctx)) {
- case 1: {
- setState(251);
- match(TorqueParser::OTHERWISE);
- setState(252);
- match(TorqueParser::IDENTIFIER);
- setState(257);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 15, _ctx);
- while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
- if (alt == 1) {
- setState(253);
- match(TorqueParser::T__3);
- setState(254);
- match(TorqueParser::IDENTIFIER);
- }
- setState(259);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 15, _ctx);
- }
- break;
- }
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ParameterContext
-//------------------------------------------------------------------
-
-TorqueParser::ParameterContext::ParameterContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::ParameterContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::TypeContext* TorqueParser::ParameterContext::type() {
- return getRuleContext<TorqueParser::TypeContext>(0);
-}
-
-size_t TorqueParser::ParameterContext::getRuleIndex() const {
- return TorqueParser::RuleParameter;
-}
-
-void TorqueParser::ParameterContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterParameter(this);
-}
-
-void TorqueParser::ParameterContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitParameter(this);
-}
-
-antlrcpp::Any TorqueParser::ParameterContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitParameter(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ParameterContext* TorqueParser::parameter() {
- ParameterContext* _localctx =
- _tracker.createInstance<ParameterContext>(_ctx, getState());
- enterRule(_localctx, 18, TorqueParser::RuleParameter);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(262);
- match(TorqueParser::IDENTIFIER);
- setState(263);
- match(TorqueParser::T__4);
- setState(265);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if ((((_la & ~0x3fULL) == 0) &&
- ((1ULL << _la) &
- ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::BUILTIN) |
- (1ULL << TorqueParser::CONSTEXPR))) != 0) ||
- _la == TorqueParser::IDENTIFIER) {
- setState(264);
- type(0);
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ParameterListContext
-//------------------------------------------------------------------
-
-TorqueParser::ParameterListContext::ParameterListContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-std::vector<TorqueParser::ParameterContext*>
-TorqueParser::ParameterListContext::parameter() {
- return getRuleContexts<TorqueParser::ParameterContext>();
-}
-
-TorqueParser::ParameterContext* TorqueParser::ParameterListContext::parameter(
- size_t i) {
- return getRuleContext<TorqueParser::ParameterContext>(i);
-}
-
-tree::TerminalNode* TorqueParser::ParameterListContext::VARARGS() {
- return getToken(TorqueParser::VARARGS, 0);
-}
-
-tree::TerminalNode* TorqueParser::ParameterListContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-size_t TorqueParser::ParameterListContext::getRuleIndex() const {
- return TorqueParser::RuleParameterList;
-}
-
-void TorqueParser::ParameterListContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterParameterList(this);
-}
-
-void TorqueParser::ParameterListContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitParameterList(this);
-}
-
-antlrcpp::Any TorqueParser::ParameterListContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitParameterList(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ParameterListContext* TorqueParser::parameterList() {
- ParameterListContext* _localctx =
- _tracker.createInstance<ParameterListContext>(_ctx, getState());
- enterRule(_localctx, 20, TorqueParser::RuleParameterList);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- setState(288);
- _errHandler->sync(this);
- switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 20, _ctx)) {
- case 1: {
- enterOuterAlt(_localctx, 1);
- setState(267);
- match(TorqueParser::T__0);
- setState(269);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::IDENTIFIER) {
- setState(268);
- parameter();
- }
- setState(275);
- _errHandler->sync(this);
- _la = _input->LA(1);
- while (_la == TorqueParser::T__3) {
- setState(271);
- match(TorqueParser::T__3);
- setState(272);
- parameter();
- setState(277);
- _errHandler->sync(this);
- _la = _input->LA(1);
- }
- setState(278);
- match(TorqueParser::T__1);
- break;
- }
-
- case 2: {
- enterOuterAlt(_localctx, 2);
- setState(279);
- match(TorqueParser::T__0);
- setState(280);
- parameter();
- setState(281);
- match(TorqueParser::T__3);
- setState(282);
- parameter();
- setState(283);
- match(TorqueParser::T__3);
- setState(284);
- match(TorqueParser::VARARGS);
- setState(285);
- match(TorqueParser::IDENTIFIER);
- setState(286);
- match(TorqueParser::T__1);
- break;
- }
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- LabelDeclarationContext
-//------------------------------------------------------------------
-
-TorqueParser::LabelDeclarationContext::LabelDeclarationContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::LabelDeclarationContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::ParameterListContext*
-TorqueParser::LabelDeclarationContext::parameterList() {
- return getRuleContext<TorqueParser::ParameterListContext>(0);
-}
-
-size_t TorqueParser::LabelDeclarationContext::getRuleIndex() const {
- return TorqueParser::RuleLabelDeclaration;
-}
-
-void TorqueParser::LabelDeclarationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterLabelDeclaration(this);
-}
-
-void TorqueParser::LabelDeclarationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitLabelDeclaration(this);
-}
-
-antlrcpp::Any TorqueParser::LabelDeclarationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitLabelDeclaration(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::LabelDeclarationContext* TorqueParser::labelDeclaration() {
- LabelDeclarationContext* _localctx =
- _tracker.createInstance<LabelDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 22, TorqueParser::RuleLabelDeclaration);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(290);
- match(TorqueParser::IDENTIFIER);
- setState(292);
- _errHandler->sync(this);
-
- switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 21, _ctx)) {
- case 1: {
- setState(291);
- parameterList();
- break;
- }
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ExpressionContext
-//------------------------------------------------------------------
-
-TorqueParser::ExpressionContext::ExpressionContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::ConditionalExpressionContext*
-TorqueParser::ExpressionContext::conditionalExpression() {
- return getRuleContext<TorqueParser::ConditionalExpressionContext>(0);
-}
-
-size_t TorqueParser::ExpressionContext::getRuleIndex() const {
- return TorqueParser::RuleExpression;
-}
-
-void TorqueParser::ExpressionContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterExpression(this);
-}
-
-void TorqueParser::ExpressionContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitExpression(this);
-}
-
-antlrcpp::Any TorqueParser::ExpressionContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitExpression(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ExpressionContext* TorqueParser::expression() {
- ExpressionContext* _localctx =
- _tracker.createInstance<ExpressionContext>(_ctx, getState());
- enterRule(_localctx, 24, TorqueParser::RuleExpression);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(294);
- conditionalExpression(0);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ConditionalExpressionContext
-//------------------------------------------------------------------
-
-TorqueParser::ConditionalExpressionContext::ConditionalExpressionContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-std::vector<TorqueParser::LogicalORExpressionContext*>
-TorqueParser::ConditionalExpressionContext::logicalORExpression() {
- return getRuleContexts<TorqueParser::LogicalORExpressionContext>();
-}
-
-TorqueParser::LogicalORExpressionContext*
-TorqueParser::ConditionalExpressionContext::logicalORExpression(size_t i) {
- return getRuleContext<TorqueParser::LogicalORExpressionContext>(i);
-}
-
-TorqueParser::ConditionalExpressionContext*
-TorqueParser::ConditionalExpressionContext::conditionalExpression() {
- return getRuleContext<TorqueParser::ConditionalExpressionContext>(0);
-}
-
-size_t TorqueParser::ConditionalExpressionContext::getRuleIndex() const {
- return TorqueParser::RuleConditionalExpression;
-}
-
-void TorqueParser::ConditionalExpressionContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->enterConditionalExpression(this);
-}
-
-void TorqueParser::ConditionalExpressionContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->exitConditionalExpression(this);
-}
-
-antlrcpp::Any TorqueParser::ConditionalExpressionContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitConditionalExpression(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ConditionalExpressionContext*
-TorqueParser::conditionalExpression() {
- return conditionalExpression(0);
-}
-
-TorqueParser::ConditionalExpressionContext* TorqueParser::conditionalExpression(
- int precedence) {
- ParserRuleContext* parentContext = _ctx;
- size_t parentState = getState();
- TorqueParser::ConditionalExpressionContext* _localctx =
- _tracker.createInstance<ConditionalExpressionContext>(_ctx, parentState);
- TorqueParser::ConditionalExpressionContext* previousContext = _localctx;
- size_t startState = 26;
- enterRecursionRule(_localctx, 26, TorqueParser::RuleConditionalExpression,
- precedence);
-
- auto onExit = finally([=] { unrollRecursionContexts(parentContext); });
- try {
- size_t alt;
- enterOuterAlt(_localctx, 1);
- setState(297);
- logicalORExpression(0);
- _ctx->stop = _input->LT(-1);
- setState(307);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 22,
- _ctx);
- while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
- if (alt == 1) {
- if (!_parseListeners.empty()) triggerExitRuleEvent();
- previousContext = _localctx;
- _localctx = _tracker.createInstance<ConditionalExpressionContext>(
- parentContext, parentState);
- pushNewRecursionContext(_localctx, startState,
- RuleConditionalExpression);
- setState(299);
-
- if (!(precpred(_ctx, 1)))
- throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(300);
- match(TorqueParser::T__6);
- setState(301);
- logicalORExpression(0);
- setState(302);
- match(TorqueParser::T__4);
- setState(303);
- logicalORExpression(0);
- }
- setState(309);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 22, _ctx);
- }
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
- return _localctx;
-}
-
-//----------------- LogicalORExpressionContext
-//------------------------------------------------------------------
-
-TorqueParser::LogicalORExpressionContext::LogicalORExpressionContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::LogicalANDExpressionContext*
-TorqueParser::LogicalORExpressionContext::logicalANDExpression() {
- return getRuleContext<TorqueParser::LogicalANDExpressionContext>(0);
-}
-
-TorqueParser::LogicalORExpressionContext*
-TorqueParser::LogicalORExpressionContext::logicalORExpression() {
- return getRuleContext<TorqueParser::LogicalORExpressionContext>(0);
-}
-
-size_t TorqueParser::LogicalORExpressionContext::getRuleIndex() const {
- return TorqueParser::RuleLogicalORExpression;
-}
-
-void TorqueParser::LogicalORExpressionContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterLogicalORExpression(this);
-}
-
-void TorqueParser::LogicalORExpressionContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitLogicalORExpression(this);
-}
-
-antlrcpp::Any TorqueParser::LogicalORExpressionContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitLogicalORExpression(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::LogicalORExpressionContext* TorqueParser::logicalORExpression() {
- return logicalORExpression(0);
-}
-
-TorqueParser::LogicalORExpressionContext* TorqueParser::logicalORExpression(
- int precedence) {
- ParserRuleContext* parentContext = _ctx;
- size_t parentState = getState();
- TorqueParser::LogicalORExpressionContext* _localctx =
- _tracker.createInstance<LogicalORExpressionContext>(_ctx, parentState);
- TorqueParser::LogicalORExpressionContext* previousContext = _localctx;
- size_t startState = 28;
- enterRecursionRule(_localctx, 28, TorqueParser::RuleLogicalORExpression,
- precedence);
-
- auto onExit = finally([=] { unrollRecursionContexts(parentContext); });
- try {
- size_t alt;
- enterOuterAlt(_localctx, 1);
- setState(311);
- logicalANDExpression(0);
- _ctx->stop = _input->LT(-1);
- setState(318);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 23,
- _ctx);
- while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
- if (alt == 1) {
- if (!_parseListeners.empty()) triggerExitRuleEvent();
- previousContext = _localctx;
- _localctx = _tracker.createInstance<LogicalORExpressionContext>(
- parentContext, parentState);
- pushNewRecursionContext(_localctx, startState, RuleLogicalORExpression);
- setState(313);
-
- if (!(precpred(_ctx, 1)))
- throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(314);
- match(TorqueParser::T__7);
- setState(315);
- logicalANDExpression(0);
- }
- setState(320);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 23, _ctx);
- }
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
- return _localctx;
-}
-
-//----------------- LogicalANDExpressionContext
-//------------------------------------------------------------------
-
-TorqueParser::LogicalANDExpressionContext::LogicalANDExpressionContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::BitwiseExpressionContext*
-TorqueParser::LogicalANDExpressionContext::bitwiseExpression() {
- return getRuleContext<TorqueParser::BitwiseExpressionContext>(0);
-}
-
-TorqueParser::LogicalANDExpressionContext*
-TorqueParser::LogicalANDExpressionContext::logicalANDExpression() {
- return getRuleContext<TorqueParser::LogicalANDExpressionContext>(0);
-}
-
-size_t TorqueParser::LogicalANDExpressionContext::getRuleIndex() const {
- return TorqueParser::RuleLogicalANDExpression;
-}
-
-void TorqueParser::LogicalANDExpressionContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->enterLogicalANDExpression(this);
-}
-
-void TorqueParser::LogicalANDExpressionContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitLogicalANDExpression(this);
-}
-
-antlrcpp::Any TorqueParser::LogicalANDExpressionContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitLogicalANDExpression(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::LogicalANDExpressionContext*
-TorqueParser::logicalANDExpression() {
- return logicalANDExpression(0);
-}
-
-TorqueParser::LogicalANDExpressionContext* TorqueParser::logicalANDExpression(
- int precedence) {
- ParserRuleContext* parentContext = _ctx;
- size_t parentState = getState();
- TorqueParser::LogicalANDExpressionContext* _localctx =
- _tracker.createInstance<LogicalANDExpressionContext>(_ctx, parentState);
- TorqueParser::LogicalANDExpressionContext* previousContext = _localctx;
- size_t startState = 30;
- enterRecursionRule(_localctx, 30, TorqueParser::RuleLogicalANDExpression,
- precedence);
-
- auto onExit = finally([=] { unrollRecursionContexts(parentContext); });
- try {
- size_t alt;
- enterOuterAlt(_localctx, 1);
- setState(322);
- bitwiseExpression(0);
- _ctx->stop = _input->LT(-1);
- setState(329);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 24,
- _ctx);
- while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
- if (alt == 1) {
- if (!_parseListeners.empty()) triggerExitRuleEvent();
- previousContext = _localctx;
- _localctx = _tracker.createInstance<LogicalANDExpressionContext>(
- parentContext, parentState);
- pushNewRecursionContext(_localctx, startState,
- RuleLogicalANDExpression);
- setState(324);
-
- if (!(precpred(_ctx, 1)))
- throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(325);
- match(TorqueParser::T__8);
- setState(326);
- bitwiseExpression(0);
- }
- setState(331);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 24, _ctx);
- }
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
- return _localctx;
-}
-
-//----------------- BitwiseExpressionContext
-//------------------------------------------------------------------
-
-TorqueParser::BitwiseExpressionContext::BitwiseExpressionContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::EqualityExpressionContext*
-TorqueParser::BitwiseExpressionContext::equalityExpression() {
- return getRuleContext<TorqueParser::EqualityExpressionContext>(0);
-}
-
-TorqueParser::BitwiseExpressionContext*
-TorqueParser::BitwiseExpressionContext::bitwiseExpression() {
- return getRuleContext<TorqueParser::BitwiseExpressionContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::BitwiseExpressionContext::BIT_AND() {
- return getToken(TorqueParser::BIT_AND, 0);
-}
-
-tree::TerminalNode* TorqueParser::BitwiseExpressionContext::BIT_OR() {
- return getToken(TorqueParser::BIT_OR, 0);
-}
-
-size_t TorqueParser::BitwiseExpressionContext::getRuleIndex() const {
- return TorqueParser::RuleBitwiseExpression;
-}
-
-void TorqueParser::BitwiseExpressionContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterBitwiseExpression(this);
-}
-
-void TorqueParser::BitwiseExpressionContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitBitwiseExpression(this);
-}
-
-antlrcpp::Any TorqueParser::BitwiseExpressionContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitBitwiseExpression(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::BitwiseExpressionContext* TorqueParser::bitwiseExpression() {
- return bitwiseExpression(0);
-}
-
-TorqueParser::BitwiseExpressionContext* TorqueParser::bitwiseExpression(
- int precedence) {
- ParserRuleContext* parentContext = _ctx;
- size_t parentState = getState();
- TorqueParser::BitwiseExpressionContext* _localctx =
- _tracker.createInstance<BitwiseExpressionContext>(_ctx, parentState);
- TorqueParser::BitwiseExpressionContext* previousContext = _localctx;
- size_t startState = 32;
- enterRecursionRule(_localctx, 32, TorqueParser::RuleBitwiseExpression,
- precedence);
-
- size_t _la = 0;
-
- auto onExit = finally([=] { unrollRecursionContexts(parentContext); });
- try {
- size_t alt;
- enterOuterAlt(_localctx, 1);
- setState(333);
- equalityExpression(0);
- _ctx->stop = _input->LT(-1);
- setState(340);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 25,
- _ctx);
- while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
- if (alt == 1) {
- if (!_parseListeners.empty()) triggerExitRuleEvent();
- previousContext = _localctx;
- _localctx = _tracker.createInstance<BitwiseExpressionContext>(
- parentContext, parentState);
- pushNewRecursionContext(_localctx, startState, RuleBitwiseExpression);
- setState(335);
-
- if (!(precpred(_ctx, 1)))
- throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(336);
- dynamic_cast<BitwiseExpressionContext*>(_localctx)->op = _input->LT(1);
- _la = _input->LA(1);
- if (!(_la == TorqueParser::BIT_OR
-
- || _la == TorqueParser::BIT_AND)) {
- dynamic_cast<BitwiseExpressionContext*>(_localctx)->op =
- _errHandler->recoverInline(this);
- } else {
- _errHandler->reportMatch(this);
- consume();
- }
- setState(337);
- equalityExpression(0);
- }
- setState(342);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 25, _ctx);
- }
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
- return _localctx;
-}
-
-//----------------- EqualityExpressionContext
-//------------------------------------------------------------------
-
-TorqueParser::EqualityExpressionContext::EqualityExpressionContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::RelationalExpressionContext*
-TorqueParser::EqualityExpressionContext::relationalExpression() {
- return getRuleContext<TorqueParser::RelationalExpressionContext>(0);
-}
-
-TorqueParser::EqualityExpressionContext*
-TorqueParser::EqualityExpressionContext::equalityExpression() {
- return getRuleContext<TorqueParser::EqualityExpressionContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::EqualityExpressionContext::EQUAL() {
- return getToken(TorqueParser::EQUAL, 0);
-}
-
-tree::TerminalNode* TorqueParser::EqualityExpressionContext::NOT_EQUAL() {
- return getToken(TorqueParser::NOT_EQUAL, 0);
-}
-
-size_t TorqueParser::EqualityExpressionContext::getRuleIndex() const {
- return TorqueParser::RuleEqualityExpression;
-}
-
-void TorqueParser::EqualityExpressionContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterEqualityExpression(this);
-}
-
-void TorqueParser::EqualityExpressionContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitEqualityExpression(this);
-}
-
-antlrcpp::Any TorqueParser::EqualityExpressionContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitEqualityExpression(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::EqualityExpressionContext* TorqueParser::equalityExpression() {
- return equalityExpression(0);
-}
-
-TorqueParser::EqualityExpressionContext* TorqueParser::equalityExpression(
- int precedence) {
- ParserRuleContext* parentContext = _ctx;
- size_t parentState = getState();
- TorqueParser::EqualityExpressionContext* _localctx =
- _tracker.createInstance<EqualityExpressionContext>(_ctx, parentState);
- TorqueParser::EqualityExpressionContext* previousContext = _localctx;
- size_t startState = 34;
- enterRecursionRule(_localctx, 34, TorqueParser::RuleEqualityExpression,
- precedence);
-
- size_t _la = 0;
-
- auto onExit = finally([=] { unrollRecursionContexts(parentContext); });
- try {
- size_t alt;
- enterOuterAlt(_localctx, 1);
- setState(344);
- relationalExpression(0);
- _ctx->stop = _input->LT(-1);
- setState(351);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 26,
- _ctx);
- while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
- if (alt == 1) {
- if (!_parseListeners.empty()) triggerExitRuleEvent();
- previousContext = _localctx;
- _localctx = _tracker.createInstance<EqualityExpressionContext>(
- parentContext, parentState);
- pushNewRecursionContext(_localctx, startState, RuleEqualityExpression);
- setState(346);
-
- if (!(precpred(_ctx, 1)))
- throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(347);
- dynamic_cast<EqualityExpressionContext*>(_localctx)->op = _input->LT(1);
- _la = _input->LA(1);
- if (!(_la == TorqueParser::EQUAL
-
- || _la == TorqueParser::NOT_EQUAL)) {
- dynamic_cast<EqualityExpressionContext*>(_localctx)->op =
- _errHandler->recoverInline(this);
- } else {
- _errHandler->reportMatch(this);
- consume();
- }
- setState(348);
- relationalExpression(0);
- }
- setState(353);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 26, _ctx);
- }
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
- return _localctx;
-}
-
-//----------------- RelationalExpressionContext
-//------------------------------------------------------------------
-
-TorqueParser::RelationalExpressionContext::RelationalExpressionContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::ShiftExpressionContext*
-TorqueParser::RelationalExpressionContext::shiftExpression() {
- return getRuleContext<TorqueParser::ShiftExpressionContext>(0);
-}
-
-TorqueParser::RelationalExpressionContext*
-TorqueParser::RelationalExpressionContext::relationalExpression() {
- return getRuleContext<TorqueParser::RelationalExpressionContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::RelationalExpressionContext::LESS_THAN() {
- return getToken(TorqueParser::LESS_THAN, 0);
-}
-
-tree::TerminalNode*
-TorqueParser::RelationalExpressionContext::LESS_THAN_EQUAL() {
- return getToken(TorqueParser::LESS_THAN_EQUAL, 0);
-}
-
-tree::TerminalNode* TorqueParser::RelationalExpressionContext::GREATER_THAN() {
- return getToken(TorqueParser::GREATER_THAN, 0);
-}
-
-tree::TerminalNode*
-TorqueParser::RelationalExpressionContext::GREATER_THAN_EQUAL() {
- return getToken(TorqueParser::GREATER_THAN_EQUAL, 0);
-}
-
-size_t TorqueParser::RelationalExpressionContext::getRuleIndex() const {
- return TorqueParser::RuleRelationalExpression;
-}
-
-void TorqueParser::RelationalExpressionContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->enterRelationalExpression(this);
-}
-
-void TorqueParser::RelationalExpressionContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitRelationalExpression(this);
-}
-
-antlrcpp::Any TorqueParser::RelationalExpressionContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitRelationalExpression(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::RelationalExpressionContext*
-TorqueParser::relationalExpression() {
- return relationalExpression(0);
-}
-
-TorqueParser::RelationalExpressionContext* TorqueParser::relationalExpression(
- int precedence) {
- ParserRuleContext* parentContext = _ctx;
- size_t parentState = getState();
- TorqueParser::RelationalExpressionContext* _localctx =
- _tracker.createInstance<RelationalExpressionContext>(_ctx, parentState);
- TorqueParser::RelationalExpressionContext* previousContext = _localctx;
- size_t startState = 36;
- enterRecursionRule(_localctx, 36, TorqueParser::RuleRelationalExpression,
- precedence);
-
- size_t _la = 0;
-
- auto onExit = finally([=] { unrollRecursionContexts(parentContext); });
- try {
- size_t alt;
- enterOuterAlt(_localctx, 1);
- setState(355);
- shiftExpression(0);
- _ctx->stop = _input->LT(-1);
- setState(362);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 27,
- _ctx);
- while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
- if (alt == 1) {
- if (!_parseListeners.empty()) triggerExitRuleEvent();
- previousContext = _localctx;
- _localctx = _tracker.createInstance<RelationalExpressionContext>(
- parentContext, parentState);
- pushNewRecursionContext(_localctx, startState,
- RuleRelationalExpression);
- setState(357);
-
- if (!(precpred(_ctx, 1)))
- throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(358);
- dynamic_cast<RelationalExpressionContext*>(_localctx)->op =
- _input->LT(1);
- _la = _input->LA(1);
- if (!(((((_la - 64) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 64)) &
- ((1ULL << (TorqueParser::LESS_THAN - 64)) |
- (1ULL << (TorqueParser::LESS_THAN_EQUAL - 64)) |
- (1ULL << (TorqueParser::GREATER_THAN - 64)) |
- (1ULL << (TorqueParser::GREATER_THAN_EQUAL - 64)))) != 0))) {
- dynamic_cast<RelationalExpressionContext*>(_localctx)->op =
- _errHandler->recoverInline(this);
- } else {
- _errHandler->reportMatch(this);
- consume();
- }
- setState(359);
- shiftExpression(0);
- }
- setState(364);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 27, _ctx);
- }
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
- return _localctx;
-}
-
-//----------------- ShiftExpressionContext
-//------------------------------------------------------------------
-
-TorqueParser::ShiftExpressionContext::ShiftExpressionContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::AdditiveExpressionContext*
-TorqueParser::ShiftExpressionContext::additiveExpression() {
- return getRuleContext<TorqueParser::AdditiveExpressionContext>(0);
-}
-
-TorqueParser::ShiftExpressionContext*
-TorqueParser::ShiftExpressionContext::shiftExpression() {
- return getRuleContext<TorqueParser::ShiftExpressionContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::ShiftExpressionContext::SHIFT_RIGHT() {
- return getToken(TorqueParser::SHIFT_RIGHT, 0);
-}
-
-tree::TerminalNode* TorqueParser::ShiftExpressionContext::SHIFT_LEFT() {
- return getToken(TorqueParser::SHIFT_LEFT, 0);
-}
-
-tree::TerminalNode*
-TorqueParser::ShiftExpressionContext::SHIFT_RIGHT_ARITHMETIC() {
- return getToken(TorqueParser::SHIFT_RIGHT_ARITHMETIC, 0);
-}
-
-size_t TorqueParser::ShiftExpressionContext::getRuleIndex() const {
- return TorqueParser::RuleShiftExpression;
-}
-
-void TorqueParser::ShiftExpressionContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterShiftExpression(this);
-}
-
-void TorqueParser::ShiftExpressionContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitShiftExpression(this);
-}
-
-antlrcpp::Any TorqueParser::ShiftExpressionContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitShiftExpression(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ShiftExpressionContext* TorqueParser::shiftExpression() {
- return shiftExpression(0);
-}
-
-TorqueParser::ShiftExpressionContext* TorqueParser::shiftExpression(
- int precedence) {
- ParserRuleContext* parentContext = _ctx;
- size_t parentState = getState();
- TorqueParser::ShiftExpressionContext* _localctx =
- _tracker.createInstance<ShiftExpressionContext>(_ctx, parentState);
- TorqueParser::ShiftExpressionContext* previousContext = _localctx;
- size_t startState = 38;
- enterRecursionRule(_localctx, 38, TorqueParser::RuleShiftExpression,
- precedence);
-
- size_t _la = 0;
-
- auto onExit = finally([=] { unrollRecursionContexts(parentContext); });
- try {
- size_t alt;
- enterOuterAlt(_localctx, 1);
- setState(366);
- additiveExpression(0);
- _ctx->stop = _input->LT(-1);
- setState(373);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 28,
- _ctx);
- while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
- if (alt == 1) {
- if (!_parseListeners.empty()) triggerExitRuleEvent();
- previousContext = _localctx;
- _localctx = _tracker.createInstance<ShiftExpressionContext>(
- parentContext, parentState);
- pushNewRecursionContext(_localctx, startState, RuleShiftExpression);
- setState(368);
-
- if (!(precpred(_ctx, 1)))
- throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(369);
- dynamic_cast<ShiftExpressionContext*>(_localctx)->op = _input->LT(1);
- _la = _input->LA(1);
- if (!(((((_la - 68) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 68)) &
- ((1ULL << (TorqueParser::SHIFT_LEFT - 68)) |
- (1ULL << (TorqueParser::SHIFT_RIGHT - 68)) |
- (1ULL << (TorqueParser::SHIFT_RIGHT_ARITHMETIC - 68)))) !=
- 0))) {
- dynamic_cast<ShiftExpressionContext*>(_localctx)->op =
- _errHandler->recoverInline(this);
- } else {
- _errHandler->reportMatch(this);
- consume();
- }
- setState(370);
- additiveExpression(0);
- }
- setState(375);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 28, _ctx);
- }
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
- return _localctx;
-}
-
-//----------------- AdditiveExpressionContext
-//------------------------------------------------------------------
-
-TorqueParser::AdditiveExpressionContext::AdditiveExpressionContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::MultiplicativeExpressionContext*
-TorqueParser::AdditiveExpressionContext::multiplicativeExpression() {
- return getRuleContext<TorqueParser::MultiplicativeExpressionContext>(0);
-}
-
-TorqueParser::AdditiveExpressionContext*
-TorqueParser::AdditiveExpressionContext::additiveExpression() {
- return getRuleContext<TorqueParser::AdditiveExpressionContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::AdditiveExpressionContext::PLUS() {
- return getToken(TorqueParser::PLUS, 0);
-}
-
-tree::TerminalNode* TorqueParser::AdditiveExpressionContext::MINUS() {
- return getToken(TorqueParser::MINUS, 0);
-}
-
-size_t TorqueParser::AdditiveExpressionContext::getRuleIndex() const {
- return TorqueParser::RuleAdditiveExpression;
-}
-
-void TorqueParser::AdditiveExpressionContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterAdditiveExpression(this);
-}
-
-void TorqueParser::AdditiveExpressionContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitAdditiveExpression(this);
-}
-
-antlrcpp::Any TorqueParser::AdditiveExpressionContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitAdditiveExpression(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::AdditiveExpressionContext* TorqueParser::additiveExpression() {
- return additiveExpression(0);
-}
-
-TorqueParser::AdditiveExpressionContext* TorqueParser::additiveExpression(
- int precedence) {
- ParserRuleContext* parentContext = _ctx;
- size_t parentState = getState();
- TorqueParser::AdditiveExpressionContext* _localctx =
- _tracker.createInstance<AdditiveExpressionContext>(_ctx, parentState);
- TorqueParser::AdditiveExpressionContext* previousContext = _localctx;
- size_t startState = 40;
- enterRecursionRule(_localctx, 40, TorqueParser::RuleAdditiveExpression,
- precedence);
-
- size_t _la = 0;
-
- auto onExit = finally([=] { unrollRecursionContexts(parentContext); });
- try {
- size_t alt;
- enterOuterAlt(_localctx, 1);
- setState(377);
- multiplicativeExpression(0);
- _ctx->stop = _input->LT(-1);
- setState(384);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 29,
- _ctx);
- while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
- if (alt == 1) {
- if (!_parseListeners.empty()) triggerExitRuleEvent();
- previousContext = _localctx;
- _localctx = _tracker.createInstance<AdditiveExpressionContext>(
- parentContext, parentState);
- pushNewRecursionContext(_localctx, startState, RuleAdditiveExpression);
- setState(379);
-
- if (!(precpred(_ctx, 1)))
- throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(380);
- dynamic_cast<AdditiveExpressionContext*>(_localctx)->op = _input->LT(1);
- _la = _input->LA(1);
- if (!(_la == TorqueParser::PLUS
-
- || _la == TorqueParser::MINUS)) {
- dynamic_cast<AdditiveExpressionContext*>(_localctx)->op =
- _errHandler->recoverInline(this);
- } else {
- _errHandler->reportMatch(this);
- consume();
- }
- setState(381);
- multiplicativeExpression(0);
- }
- setState(386);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 29, _ctx);
- }
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
- return _localctx;
-}
-
-//----------------- MultiplicativeExpressionContext
-//------------------------------------------------------------------
-
-TorqueParser::MultiplicativeExpressionContext::MultiplicativeExpressionContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::UnaryExpressionContext*
-TorqueParser::MultiplicativeExpressionContext::unaryExpression() {
- return getRuleContext<TorqueParser::UnaryExpressionContext>(0);
-}
-
-TorqueParser::MultiplicativeExpressionContext*
-TorqueParser::MultiplicativeExpressionContext::multiplicativeExpression() {
- return getRuleContext<TorqueParser::MultiplicativeExpressionContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::MultiplicativeExpressionContext::MULTIPLY() {
- return getToken(TorqueParser::MULTIPLY, 0);
-}
-
-tree::TerminalNode* TorqueParser::MultiplicativeExpressionContext::DIVIDE() {
- return getToken(TorqueParser::DIVIDE, 0);
-}
-
-tree::TerminalNode* TorqueParser::MultiplicativeExpressionContext::MODULO() {
- return getToken(TorqueParser::MODULO, 0);
-}
-
-size_t TorqueParser::MultiplicativeExpressionContext::getRuleIndex() const {
- return TorqueParser::RuleMultiplicativeExpression;
-}
-
-void TorqueParser::MultiplicativeExpressionContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->enterMultiplicativeExpression(this);
-}
-
-void TorqueParser::MultiplicativeExpressionContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->exitMultiplicativeExpression(this);
-}
-
-antlrcpp::Any TorqueParser::MultiplicativeExpressionContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitMultiplicativeExpression(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::MultiplicativeExpressionContext*
-TorqueParser::multiplicativeExpression() {
- return multiplicativeExpression(0);
-}
-
-TorqueParser::MultiplicativeExpressionContext*
-TorqueParser::multiplicativeExpression(int precedence) {
- ParserRuleContext* parentContext = _ctx;
- size_t parentState = getState();
- TorqueParser::MultiplicativeExpressionContext* _localctx =
- _tracker.createInstance<MultiplicativeExpressionContext>(_ctx,
- parentState);
- TorqueParser::MultiplicativeExpressionContext* previousContext = _localctx;
- size_t startState = 42;
- enterRecursionRule(_localctx, 42, TorqueParser::RuleMultiplicativeExpression,
- precedence);
-
- size_t _la = 0;
-
- auto onExit = finally([=] { unrollRecursionContexts(parentContext); });
- try {
- size_t alt;
- enterOuterAlt(_localctx, 1);
- setState(388);
- unaryExpression();
- _ctx->stop = _input->LT(-1);
- setState(395);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 30,
- _ctx);
- while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
- if (alt == 1) {
- if (!_parseListeners.empty()) triggerExitRuleEvent();
- previousContext = _localctx;
- _localctx = _tracker.createInstance<MultiplicativeExpressionContext>(
- parentContext, parentState);
- pushNewRecursionContext(_localctx, startState,
- RuleMultiplicativeExpression);
- setState(390);
-
- if (!(precpred(_ctx, 1)))
- throw FailedPredicateException(this, "precpred(_ctx, 1)");
- setState(391);
- dynamic_cast<MultiplicativeExpressionContext*>(_localctx)->op =
- _input->LT(1);
- _la = _input->LA(1);
- if (!((((_la & ~0x3fULL) == 0) &&
- ((1ULL << _la) & ((1ULL << TorqueParser::MULTIPLY) |
- (1ULL << TorqueParser::DIVIDE) |
- (1ULL << TorqueParser::MODULO))) != 0))) {
- dynamic_cast<MultiplicativeExpressionContext*>(_localctx)->op =
- _errHandler->recoverInline(this);
- } else {
- _errHandler->reportMatch(this);
- consume();
- }
- setState(392);
- unaryExpression();
- }
- setState(397);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 30, _ctx);
- }
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
- return _localctx;
-}
-
-//----------------- UnaryExpressionContext
-//------------------------------------------------------------------
-
-TorqueParser::UnaryExpressionContext::UnaryExpressionContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::AssignmentExpressionContext*
-TorqueParser::UnaryExpressionContext::assignmentExpression() {
- return getRuleContext<TorqueParser::AssignmentExpressionContext>(0);
-}
-
-TorqueParser::UnaryExpressionContext*
-TorqueParser::UnaryExpressionContext::unaryExpression() {
- return getRuleContext<TorqueParser::UnaryExpressionContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::UnaryExpressionContext::PLUS() {
- return getToken(TorqueParser::PLUS, 0);
-}
-
-tree::TerminalNode* TorqueParser::UnaryExpressionContext::MINUS() {
- return getToken(TorqueParser::MINUS, 0);
-}
-
-tree::TerminalNode* TorqueParser::UnaryExpressionContext::BIT_NOT() {
- return getToken(TorqueParser::BIT_NOT, 0);
-}
-
-tree::TerminalNode* TorqueParser::UnaryExpressionContext::NOT() {
- return getToken(TorqueParser::NOT, 0);
-}
-
-size_t TorqueParser::UnaryExpressionContext::getRuleIndex() const {
- return TorqueParser::RuleUnaryExpression;
-}
-
-void TorqueParser::UnaryExpressionContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterUnaryExpression(this);
-}
-
-void TorqueParser::UnaryExpressionContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitUnaryExpression(this);
-}
-
-antlrcpp::Any TorqueParser::UnaryExpressionContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitUnaryExpression(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::UnaryExpressionContext* TorqueParser::unaryExpression() {
- UnaryExpressionContext* _localctx =
- _tracker.createInstance<UnaryExpressionContext>(_ctx, getState());
- enterRule(_localctx, 44, TorqueParser::RuleUnaryExpression);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- setState(401);
- _errHandler->sync(this);
- switch (_input->LA(1)) {
- case TorqueParser::T__0:
- case TorqueParser::MAX:
- case TorqueParser::MIN:
- case TorqueParser::INCREMENT:
- case TorqueParser::DECREMENT:
- case TorqueParser::STRING_LITERAL:
- case TorqueParser::IDENTIFIER:
- case TorqueParser::DECIMAL_LITERAL: {
- enterOuterAlt(_localctx, 1);
- setState(398);
- assignmentExpression();
- break;
- }
-
- case TorqueParser::PLUS:
- case TorqueParser::MINUS:
- case TorqueParser::BIT_NOT:
- case TorqueParser::NOT: {
- enterOuterAlt(_localctx, 2);
- setState(399);
- dynamic_cast<UnaryExpressionContext*>(_localctx)->op = _input->LT(1);
- _la = _input->LA(1);
- if (!(((((_la - 53) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 53)) & ((1ULL << (TorqueParser::PLUS - 53)) |
- (1ULL << (TorqueParser::MINUS - 53)) |
- (1ULL << (TorqueParser::BIT_NOT - 53)) |
- (1ULL << (TorqueParser::NOT - 53)))) !=
- 0))) {
- dynamic_cast<UnaryExpressionContext*>(_localctx)->op =
- _errHandler->recoverInline(this);
- } else {
- _errHandler->reportMatch(this);
- consume();
- }
- setState(400);
- unaryExpression();
- break;
- }
-
- default:
- throw NoViableAltException(this);
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- LocationExpressionContext
-//------------------------------------------------------------------
-
-TorqueParser::LocationExpressionContext::LocationExpressionContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::LocationExpressionContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::PrimaryExpressionContext*
-TorqueParser::LocationExpressionContext::primaryExpression() {
- return getRuleContext<TorqueParser::PrimaryExpressionContext>(0);
-}
-
-TorqueParser::ExpressionContext*
-TorqueParser::LocationExpressionContext::expression() {
- return getRuleContext<TorqueParser::ExpressionContext>(0);
-}
-
-TorqueParser::LocationExpressionContext*
-TorqueParser::LocationExpressionContext::locationExpression() {
- return getRuleContext<TorqueParser::LocationExpressionContext>(0);
-}
-
-size_t TorqueParser::LocationExpressionContext::getRuleIndex() const {
- return TorqueParser::RuleLocationExpression;
-}
-
-void TorqueParser::LocationExpressionContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterLocationExpression(this);
-}
-
-void TorqueParser::LocationExpressionContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitLocationExpression(this);
-}
-
-antlrcpp::Any TorqueParser::LocationExpressionContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitLocationExpression(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::LocationExpressionContext* TorqueParser::locationExpression() {
- return locationExpression(0);
-}
-
-TorqueParser::LocationExpressionContext* TorqueParser::locationExpression(
- int precedence) {
- ParserRuleContext* parentContext = _ctx;
- size_t parentState = getState();
- TorqueParser::LocationExpressionContext* _localctx =
- _tracker.createInstance<LocationExpressionContext>(_ctx, parentState);
- TorqueParser::LocationExpressionContext* previousContext = _localctx;
- size_t startState = 46;
- enterRecursionRule(_localctx, 46, TorqueParser::RuleLocationExpression,
- precedence);
-
- auto onExit = finally([=] { unrollRecursionContexts(parentContext); });
- try {
- size_t alt;
- enterOuterAlt(_localctx, 1);
- setState(414);
- _errHandler->sync(this);
- switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 32, _ctx)) {
- case 1: {
- setState(404);
- match(TorqueParser::IDENTIFIER);
- break;
- }
-
- case 2: {
- setState(405);
- primaryExpression();
- setState(406);
- match(TorqueParser::T__9);
- setState(407);
- match(TorqueParser::IDENTIFIER);
- break;
- }
-
- case 3: {
- setState(409);
- primaryExpression();
- setState(410);
- match(TorqueParser::T__10);
- setState(411);
- expression();
- setState(412);
- match(TorqueParser::T__11);
- break;
- }
- }
- _ctx->stop = _input->LT(-1);
- setState(426);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 34,
- _ctx);
- while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER) {
- if (alt == 1) {
- if (!_parseListeners.empty()) triggerExitRuleEvent();
- previousContext = _localctx;
- setState(424);
- _errHandler->sync(this);
- switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 33, _ctx)) {
- case 1: {
- _localctx = _tracker.createInstance<LocationExpressionContext>(
- parentContext, parentState);
- pushNewRecursionContext(_localctx, startState,
- RuleLocationExpression);
- setState(416);
-
- if (!(precpred(_ctx, 4)))
- throw FailedPredicateException(this, "precpred(_ctx, 4)");
- setState(417);
- match(TorqueParser::T__9);
- setState(418);
- match(TorqueParser::IDENTIFIER);
- break;
- }
-
- case 2: {
- _localctx = _tracker.createInstance<LocationExpressionContext>(
- parentContext, parentState);
- pushNewRecursionContext(_localctx, startState,
- RuleLocationExpression);
- setState(419);
-
- if (!(precpred(_ctx, 2)))
- throw FailedPredicateException(this, "precpred(_ctx, 2)");
- setState(420);
- match(TorqueParser::T__10);
- setState(421);
- expression();
- setState(422);
- match(TorqueParser::T__11);
- break;
- }
- }
- }
- setState(428);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 34, _ctx);
- }
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
- return _localctx;
-}
-
-//----------------- IncrementDecrementContext
-//------------------------------------------------------------------
-
-TorqueParser::IncrementDecrementContext::IncrementDecrementContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::IncrementDecrementContext::INCREMENT() {
- return getToken(TorqueParser::INCREMENT, 0);
-}
-
-TorqueParser::LocationExpressionContext*
-TorqueParser::IncrementDecrementContext::locationExpression() {
- return getRuleContext<TorqueParser::LocationExpressionContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::IncrementDecrementContext::DECREMENT() {
- return getToken(TorqueParser::DECREMENT, 0);
-}
-
-size_t TorqueParser::IncrementDecrementContext::getRuleIndex() const {
- return TorqueParser::RuleIncrementDecrement;
-}
-
-void TorqueParser::IncrementDecrementContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterIncrementDecrement(this);
-}
-
-void TorqueParser::IncrementDecrementContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitIncrementDecrement(this);
-}
-
-antlrcpp::Any TorqueParser::IncrementDecrementContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitIncrementDecrement(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::IncrementDecrementContext* TorqueParser::incrementDecrement() {
- IncrementDecrementContext* _localctx =
- _tracker.createInstance<IncrementDecrementContext>(_ctx, getState());
- enterRule(_localctx, 48, TorqueParser::RuleIncrementDecrement);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- setState(439);
- _errHandler->sync(this);
- switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 35, _ctx)) {
- case 1: {
- enterOuterAlt(_localctx, 1);
- setState(429);
- match(TorqueParser::INCREMENT);
- setState(430);
- locationExpression(0);
- break;
- }
-
- case 2: {
- enterOuterAlt(_localctx, 2);
- setState(431);
- match(TorqueParser::DECREMENT);
- setState(432);
- locationExpression(0);
- break;
- }
-
- case 3: {
- enterOuterAlt(_localctx, 3);
- setState(433);
- locationExpression(0);
- setState(434);
- dynamic_cast<IncrementDecrementContext*>(_localctx)->op =
- match(TorqueParser::INCREMENT);
- break;
- }
-
- case 4: {
- enterOuterAlt(_localctx, 4);
- setState(436);
- locationExpression(0);
- setState(437);
- dynamic_cast<IncrementDecrementContext*>(_localctx)->op =
- match(TorqueParser::DECREMENT);
- break;
- }
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- AssignmentContext
-//------------------------------------------------------------------
-
-TorqueParser::AssignmentContext::AssignmentContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::IncrementDecrementContext*
-TorqueParser::AssignmentContext::incrementDecrement() {
- return getRuleContext<TorqueParser::IncrementDecrementContext>(0);
-}
-
-TorqueParser::LocationExpressionContext*
-TorqueParser::AssignmentContext::locationExpression() {
- return getRuleContext<TorqueParser::LocationExpressionContext>(0);
-}
-
-TorqueParser::ExpressionContext* TorqueParser::AssignmentContext::expression() {
- return getRuleContext<TorqueParser::ExpressionContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::AssignmentContext::ASSIGNMENT() {
- return getToken(TorqueParser::ASSIGNMENT, 0);
-}
-
-tree::TerminalNode* TorqueParser::AssignmentContext::ASSIGNMENT_OPERATOR() {
- return getToken(TorqueParser::ASSIGNMENT_OPERATOR, 0);
-}
-
-size_t TorqueParser::AssignmentContext::getRuleIndex() const {
- return TorqueParser::RuleAssignment;
-}
-
-void TorqueParser::AssignmentContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterAssignment(this);
-}
-
-void TorqueParser::AssignmentContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitAssignment(this);
-}
-
-antlrcpp::Any TorqueParser::AssignmentContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitAssignment(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::AssignmentContext* TorqueParser::assignment() {
- AssignmentContext* _localctx =
- _tracker.createInstance<AssignmentContext>(_ctx, getState());
- enterRule(_localctx, 50, TorqueParser::RuleAssignment);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- setState(447);
- _errHandler->sync(this);
- switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 37, _ctx)) {
- case 1: {
- enterOuterAlt(_localctx, 1);
- setState(441);
- incrementDecrement();
- break;
- }
-
- case 2: {
- enterOuterAlt(_localctx, 2);
- setState(442);
- locationExpression(0);
- setState(445);
- _errHandler->sync(this);
-
- switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 36, _ctx)) {
- case 1: {
- setState(443);
- _la = _input->LA(1);
- if (!(_la == TorqueParser::ASSIGNMENT
-
- || _la == TorqueParser::ASSIGNMENT_OPERATOR)) {
- _errHandler->recoverInline(this);
- } else {
- _errHandler->reportMatch(this);
- consume();
- }
- setState(444);
- expression();
- break;
- }
- }
- break;
- }
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- AssignmentExpressionContext
-//------------------------------------------------------------------
-
-TorqueParser::AssignmentExpressionContext::AssignmentExpressionContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::FunctionPointerExpressionContext*
-TorqueParser::AssignmentExpressionContext::functionPointerExpression() {
- return getRuleContext<TorqueParser::FunctionPointerExpressionContext>(0);
-}
-
-TorqueParser::AssignmentContext*
-TorqueParser::AssignmentExpressionContext::assignment() {
- return getRuleContext<TorqueParser::AssignmentContext>(0);
-}
-
-size_t TorqueParser::AssignmentExpressionContext::getRuleIndex() const {
- return TorqueParser::RuleAssignmentExpression;
-}
-
-void TorqueParser::AssignmentExpressionContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->enterAssignmentExpression(this);
-}
-
-void TorqueParser::AssignmentExpressionContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitAssignmentExpression(this);
-}
-
-antlrcpp::Any TorqueParser::AssignmentExpressionContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitAssignmentExpression(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::AssignmentExpressionContext*
-TorqueParser::assignmentExpression() {
- AssignmentExpressionContext* _localctx =
- _tracker.createInstance<AssignmentExpressionContext>(_ctx, getState());
- enterRule(_localctx, 52, TorqueParser::RuleAssignmentExpression);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- setState(451);
- _errHandler->sync(this);
- switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 38, _ctx)) {
- case 1: {
- enterOuterAlt(_localctx, 1);
- setState(449);
- functionPointerExpression();
- break;
- }
-
- case 2: {
- enterOuterAlt(_localctx, 2);
- setState(450);
- assignment();
- break;
- }
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- StructExpressionContext
-//------------------------------------------------------------------
-
-TorqueParser::StructExpressionContext::StructExpressionContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::StructExpressionContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-std::vector<TorqueParser::ExpressionContext*>
-TorqueParser::StructExpressionContext::expression() {
- return getRuleContexts<TorqueParser::ExpressionContext>();
-}
-
-TorqueParser::ExpressionContext*
-TorqueParser::StructExpressionContext::expression(size_t i) {
- return getRuleContext<TorqueParser::ExpressionContext>(i);
-}
-
-size_t TorqueParser::StructExpressionContext::getRuleIndex() const {
- return TorqueParser::RuleStructExpression;
-}
-
-void TorqueParser::StructExpressionContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterStructExpression(this);
-}
-
-void TorqueParser::StructExpressionContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitStructExpression(this);
-}
-
-antlrcpp::Any TorqueParser::StructExpressionContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitStructExpression(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::StructExpressionContext* TorqueParser::structExpression() {
- StructExpressionContext* _localctx =
- _tracker.createInstance<StructExpressionContext>(_ctx, getState());
- enterRule(_localctx, 54, TorqueParser::RuleStructExpression);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(453);
- match(TorqueParser::IDENTIFIER);
- setState(454);
- match(TorqueParser::T__12);
- setState(463);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if ((((_la & ~0x3fULL) == 0) &&
- ((1ULL << _la) &
- ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::PLUS) |
- (1ULL << TorqueParser::MINUS) | (1ULL << TorqueParser::BIT_NOT) |
- (1ULL << TorqueParser::MAX) | (1ULL << TorqueParser::MIN))) != 0) ||
- ((((_la - 73) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 73)) &
- ((1ULL << (TorqueParser::INCREMENT - 73)) |
- (1ULL << (TorqueParser::DECREMENT - 73)) |
- (1ULL << (TorqueParser::NOT - 73)) |
- (1ULL << (TorqueParser::STRING_LITERAL - 73)) |
- (1ULL << (TorqueParser::IDENTIFIER - 73)) |
- (1ULL << (TorqueParser::DECIMAL_LITERAL - 73)))) != 0)) {
- setState(455);
- expression();
- setState(460);
- _errHandler->sync(this);
- _la = _input->LA(1);
- while (_la == TorqueParser::T__3) {
- setState(456);
- match(TorqueParser::T__3);
- setState(457);
- expression();
- setState(462);
- _errHandler->sync(this);
- _la = _input->LA(1);
- }
- }
- setState(465);
- match(TorqueParser::T__13);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- FunctionPointerExpressionContext
-//------------------------------------------------------------------
-
-TorqueParser::FunctionPointerExpressionContext::
- FunctionPointerExpressionContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::PrimaryExpressionContext*
-TorqueParser::FunctionPointerExpressionContext::primaryExpression() {
- return getRuleContext<TorqueParser::PrimaryExpressionContext>(0);
-}
-
-tree::TerminalNode*
-TorqueParser::FunctionPointerExpressionContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::GenericSpecializationTypeListContext* TorqueParser::
- FunctionPointerExpressionContext::genericSpecializationTypeList() {
- return getRuleContext<TorqueParser::GenericSpecializationTypeListContext>(0);
-}
-
-size_t TorqueParser::FunctionPointerExpressionContext::getRuleIndex() const {
- return TorqueParser::RuleFunctionPointerExpression;
-}
-
-void TorqueParser::FunctionPointerExpressionContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->enterFunctionPointerExpression(this);
-}
-
-void TorqueParser::FunctionPointerExpressionContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->exitFunctionPointerExpression(this);
-}
-
-antlrcpp::Any TorqueParser::FunctionPointerExpressionContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitFunctionPointerExpression(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::FunctionPointerExpressionContext*
-TorqueParser::functionPointerExpression() {
- FunctionPointerExpressionContext* _localctx =
- _tracker.createInstance<FunctionPointerExpressionContext>(_ctx,
- getState());
- enterRule(_localctx, 56, TorqueParser::RuleFunctionPointerExpression);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- setState(472);
- _errHandler->sync(this);
- switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 42, _ctx)) {
- case 1: {
- enterOuterAlt(_localctx, 1);
- setState(467);
- primaryExpression();
- break;
- }
-
- case 2: {
- enterOuterAlt(_localctx, 2);
- setState(468);
- match(TorqueParser::IDENTIFIER);
- setState(470);
- _errHandler->sync(this);
-
- switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 41, _ctx)) {
- case 1: {
- setState(469);
- genericSpecializationTypeList();
- break;
- }
- }
- break;
- }
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- PrimaryExpressionContext
-//------------------------------------------------------------------
-
-TorqueParser::PrimaryExpressionContext::PrimaryExpressionContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::HelperCallContext*
-TorqueParser::PrimaryExpressionContext::helperCall() {
- return getRuleContext<TorqueParser::HelperCallContext>(0);
-}
-
-TorqueParser::StructExpressionContext*
-TorqueParser::PrimaryExpressionContext::structExpression() {
- return getRuleContext<TorqueParser::StructExpressionContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::PrimaryExpressionContext::DECIMAL_LITERAL() {
- return getToken(TorqueParser::DECIMAL_LITERAL, 0);
-}
-
-tree::TerminalNode* TorqueParser::PrimaryExpressionContext::STRING_LITERAL() {
- return getToken(TorqueParser::STRING_LITERAL, 0);
-}
-
-TorqueParser::ExpressionContext*
-TorqueParser::PrimaryExpressionContext::expression() {
- return getRuleContext<TorqueParser::ExpressionContext>(0);
-}
-
-size_t TorqueParser::PrimaryExpressionContext::getRuleIndex() const {
- return TorqueParser::RulePrimaryExpression;
-}
-
-void TorqueParser::PrimaryExpressionContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterPrimaryExpression(this);
-}
-
-void TorqueParser::PrimaryExpressionContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitPrimaryExpression(this);
-}
-
-antlrcpp::Any TorqueParser::PrimaryExpressionContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitPrimaryExpression(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::PrimaryExpressionContext* TorqueParser::primaryExpression() {
- PrimaryExpressionContext* _localctx =
- _tracker.createInstance<PrimaryExpressionContext>(_ctx, getState());
- enterRule(_localctx, 58, TorqueParser::RulePrimaryExpression);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- setState(482);
- _errHandler->sync(this);
- switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 43, _ctx)) {
- case 1: {
- enterOuterAlt(_localctx, 1);
- setState(474);
- helperCall();
- break;
- }
-
- case 2: {
- enterOuterAlt(_localctx, 2);
- setState(475);
- structExpression();
- break;
- }
-
- case 3: {
- enterOuterAlt(_localctx, 3);
- setState(476);
- match(TorqueParser::DECIMAL_LITERAL);
- break;
- }
-
- case 4: {
- enterOuterAlt(_localctx, 4);
- setState(477);
- match(TorqueParser::STRING_LITERAL);
- break;
- }
-
- case 5: {
- enterOuterAlt(_localctx, 5);
- setState(478);
- match(TorqueParser::T__0);
- setState(479);
- expression();
- setState(480);
- match(TorqueParser::T__1);
- break;
- }
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ForInitializationContext
-//------------------------------------------------------------------
-
-TorqueParser::ForInitializationContext::ForInitializationContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::VariableDeclarationWithInitializationContext* TorqueParser::
- ForInitializationContext::variableDeclarationWithInitialization() {
- return getRuleContext<
- TorqueParser::VariableDeclarationWithInitializationContext>(0);
-}
-
-size_t TorqueParser::ForInitializationContext::getRuleIndex() const {
- return TorqueParser::RuleForInitialization;
-}
-
-void TorqueParser::ForInitializationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterForInitialization(this);
-}
-
-void TorqueParser::ForInitializationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitForInitialization(this);
-}
-
-antlrcpp::Any TorqueParser::ForInitializationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitForInitialization(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ForInitializationContext* TorqueParser::forInitialization() {
- ForInitializationContext* _localctx =
- _tracker.createInstance<ForInitializationContext>(_ctx, getState());
- enterRule(_localctx, 60, TorqueParser::RuleForInitialization);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(485);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::LET
-
- || _la == TorqueParser::CONST) {
- setState(484);
- variableDeclarationWithInitialization();
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ForLoopContext
-//------------------------------------------------------------------
-
-TorqueParser::ForLoopContext::ForLoopContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::ForLoopContext::FOR() {
- return getToken(TorqueParser::FOR, 0);
-}
-
-TorqueParser::ForInitializationContext*
-TorqueParser::ForLoopContext::forInitialization() {
- return getRuleContext<TorqueParser::ForInitializationContext>(0);
-}
-
-TorqueParser::ExpressionContext* TorqueParser::ForLoopContext::expression() {
- return getRuleContext<TorqueParser::ExpressionContext>(0);
-}
-
-TorqueParser::AssignmentContext* TorqueParser::ForLoopContext::assignment() {
- return getRuleContext<TorqueParser::AssignmentContext>(0);
-}
-
-TorqueParser::StatementBlockContext*
-TorqueParser::ForLoopContext::statementBlock() {
- return getRuleContext<TorqueParser::StatementBlockContext>(0);
-}
-
-size_t TorqueParser::ForLoopContext::getRuleIndex() const {
- return TorqueParser::RuleForLoop;
-}
-
-void TorqueParser::ForLoopContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterForLoop(this);
-}
-
-void TorqueParser::ForLoopContext::exitRule(tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitForLoop(this);
-}
-
-antlrcpp::Any TorqueParser::ForLoopContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitForLoop(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ForLoopContext* TorqueParser::forLoop() {
- ForLoopContext* _localctx =
- _tracker.createInstance<ForLoopContext>(_ctx, getState());
- enterRule(_localctx, 62, TorqueParser::RuleForLoop);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(487);
- match(TorqueParser::FOR);
- setState(488);
- match(TorqueParser::T__0);
- setState(489);
- forInitialization();
- setState(490);
- match(TorqueParser::T__14);
- setState(491);
- expression();
- setState(492);
- match(TorqueParser::T__14);
- setState(493);
- assignment();
- setState(494);
- match(TorqueParser::T__1);
- setState(495);
- statementBlock();
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- RangeSpecifierContext
-//------------------------------------------------------------------
-
-TorqueParser::RangeSpecifierContext::RangeSpecifierContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-std::vector<TorqueParser::ExpressionContext*>
-TorqueParser::RangeSpecifierContext::expression() {
- return getRuleContexts<TorqueParser::ExpressionContext>();
-}
-
-TorqueParser::ExpressionContext*
-TorqueParser::RangeSpecifierContext::expression(size_t i) {
- return getRuleContext<TorqueParser::ExpressionContext>(i);
-}
-
-size_t TorqueParser::RangeSpecifierContext::getRuleIndex() const {
- return TorqueParser::RuleRangeSpecifier;
-}
-
-void TorqueParser::RangeSpecifierContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterRangeSpecifier(this);
-}
-
-void TorqueParser::RangeSpecifierContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitRangeSpecifier(this);
-}
-
-antlrcpp::Any TorqueParser::RangeSpecifierContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitRangeSpecifier(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::RangeSpecifierContext* TorqueParser::rangeSpecifier() {
- RangeSpecifierContext* _localctx =
- _tracker.createInstance<RangeSpecifierContext>(_ctx, getState());
- enterRule(_localctx, 64, TorqueParser::RuleRangeSpecifier);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(497);
- match(TorqueParser::T__10);
- setState(499);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if ((((_la & ~0x3fULL) == 0) &&
- ((1ULL << _la) &
- ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::PLUS) |
- (1ULL << TorqueParser::MINUS) | (1ULL << TorqueParser::BIT_NOT) |
- (1ULL << TorqueParser::MAX) | (1ULL << TorqueParser::MIN))) != 0) ||
- ((((_la - 73) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 73)) &
- ((1ULL << (TorqueParser::INCREMENT - 73)) |
- (1ULL << (TorqueParser::DECREMENT - 73)) |
- (1ULL << (TorqueParser::NOT - 73)) |
- (1ULL << (TorqueParser::STRING_LITERAL - 73)) |
- (1ULL << (TorqueParser::IDENTIFIER - 73)) |
- (1ULL << (TorqueParser::DECIMAL_LITERAL - 73)))) != 0)) {
- setState(498);
- dynamic_cast<RangeSpecifierContext*>(_localctx)->begin = expression();
- }
- setState(501);
- match(TorqueParser::T__4);
- setState(503);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if ((((_la & ~0x3fULL) == 0) &&
- ((1ULL << _la) &
- ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::PLUS) |
- (1ULL << TorqueParser::MINUS) | (1ULL << TorqueParser::BIT_NOT) |
- (1ULL << TorqueParser::MAX) | (1ULL << TorqueParser::MIN))) != 0) ||
- ((((_la - 73) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 73)) &
- ((1ULL << (TorqueParser::INCREMENT - 73)) |
- (1ULL << (TorqueParser::DECREMENT - 73)) |
- (1ULL << (TorqueParser::NOT - 73)) |
- (1ULL << (TorqueParser::STRING_LITERAL - 73)) |
- (1ULL << (TorqueParser::IDENTIFIER - 73)) |
- (1ULL << (TorqueParser::DECIMAL_LITERAL - 73)))) != 0)) {
- setState(502);
- dynamic_cast<RangeSpecifierContext*>(_localctx)->end = expression();
- }
- setState(505);
- match(TorqueParser::T__11);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ForOfRangeContext
-//------------------------------------------------------------------
-
-TorqueParser::ForOfRangeContext::ForOfRangeContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::RangeSpecifierContext*
-TorqueParser::ForOfRangeContext::rangeSpecifier() {
- return getRuleContext<TorqueParser::RangeSpecifierContext>(0);
-}
-
-size_t TorqueParser::ForOfRangeContext::getRuleIndex() const {
- return TorqueParser::RuleForOfRange;
-}
-
-void TorqueParser::ForOfRangeContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterForOfRange(this);
-}
-
-void TorqueParser::ForOfRangeContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitForOfRange(this);
-}
-
-antlrcpp::Any TorqueParser::ForOfRangeContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitForOfRange(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ForOfRangeContext* TorqueParser::forOfRange() {
- ForOfRangeContext* _localctx =
- _tracker.createInstance<ForOfRangeContext>(_ctx, getState());
- enterRule(_localctx, 66, TorqueParser::RuleForOfRange);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(508);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::T__10) {
- setState(507);
- rangeSpecifier();
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ForOfLoopContext
-//------------------------------------------------------------------
-
-TorqueParser::ForOfLoopContext::ForOfLoopContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::ForOfLoopContext::FOR() {
- return getToken(TorqueParser::FOR, 0);
-}
-
-TorqueParser::VariableDeclarationContext*
-TorqueParser::ForOfLoopContext::variableDeclaration() {
- return getRuleContext<TorqueParser::VariableDeclarationContext>(0);
-}
-
-TorqueParser::ExpressionContext* TorqueParser::ForOfLoopContext::expression() {
- return getRuleContext<TorqueParser::ExpressionContext>(0);
-}
-
-TorqueParser::ForOfRangeContext* TorqueParser::ForOfLoopContext::forOfRange() {
- return getRuleContext<TorqueParser::ForOfRangeContext>(0);
-}
-
-TorqueParser::StatementBlockContext*
-TorqueParser::ForOfLoopContext::statementBlock() {
- return getRuleContext<TorqueParser::StatementBlockContext>(0);
-}
-
-size_t TorqueParser::ForOfLoopContext::getRuleIndex() const {
- return TorqueParser::RuleForOfLoop;
-}
-
-void TorqueParser::ForOfLoopContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterForOfLoop(this);
-}
-
-void TorqueParser::ForOfLoopContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitForOfLoop(this);
-}
-
-antlrcpp::Any TorqueParser::ForOfLoopContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitForOfLoop(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ForOfLoopContext* TorqueParser::forOfLoop() {
- ForOfLoopContext* _localctx =
- _tracker.createInstance<ForOfLoopContext>(_ctx, getState());
- enterRule(_localctx, 68, TorqueParser::RuleForOfLoop);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(510);
- match(TorqueParser::FOR);
- setState(511);
- match(TorqueParser::T__0);
- setState(512);
- variableDeclaration();
- setState(513);
- match(TorqueParser::T__15);
- setState(514);
- expression();
- setState(515);
- forOfRange();
- setState(516);
- match(TorqueParser::T__1);
- setState(517);
- statementBlock();
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ArgumentContext
-//------------------------------------------------------------------
-
-TorqueParser::ArgumentContext::ArgumentContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::ExpressionContext* TorqueParser::ArgumentContext::expression() {
- return getRuleContext<TorqueParser::ExpressionContext>(0);
-}
-
-size_t TorqueParser::ArgumentContext::getRuleIndex() const {
- return TorqueParser::RuleArgument;
-}
-
-void TorqueParser::ArgumentContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterArgument(this);
-}
-
-void TorqueParser::ArgumentContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitArgument(this);
-}
-
-antlrcpp::Any TorqueParser::ArgumentContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitArgument(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ArgumentContext* TorqueParser::argument() {
- ArgumentContext* _localctx =
- _tracker.createInstance<ArgumentContext>(_ctx, getState());
- enterRule(_localctx, 70, TorqueParser::RuleArgument);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(519);
- expression();
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ArgumentListContext
-//------------------------------------------------------------------
-
-TorqueParser::ArgumentListContext::ArgumentListContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-std::vector<TorqueParser::ArgumentContext*>
-TorqueParser::ArgumentListContext::argument() {
- return getRuleContexts<TorqueParser::ArgumentContext>();
-}
-
-TorqueParser::ArgumentContext* TorqueParser::ArgumentListContext::argument(
- size_t i) {
- return getRuleContext<TorqueParser::ArgumentContext>(i);
-}
-
-size_t TorqueParser::ArgumentListContext::getRuleIndex() const {
- return TorqueParser::RuleArgumentList;
-}
-
-void TorqueParser::ArgumentListContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterArgumentList(this);
-}
-
-void TorqueParser::ArgumentListContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitArgumentList(this);
-}
-
-antlrcpp::Any TorqueParser::ArgumentListContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitArgumentList(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ArgumentListContext* TorqueParser::argumentList() {
- ArgumentListContext* _localctx =
- _tracker.createInstance<ArgumentListContext>(_ctx, getState());
- enterRule(_localctx, 72, TorqueParser::RuleArgumentList);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(521);
- match(TorqueParser::T__0);
- setState(523);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if ((((_la & ~0x3fULL) == 0) &&
- ((1ULL << _la) &
- ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::PLUS) |
- (1ULL << TorqueParser::MINUS) | (1ULL << TorqueParser::BIT_NOT) |
- (1ULL << TorqueParser::MAX) | (1ULL << TorqueParser::MIN))) != 0) ||
- ((((_la - 73) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 73)) &
- ((1ULL << (TorqueParser::INCREMENT - 73)) |
- (1ULL << (TorqueParser::DECREMENT - 73)) |
- (1ULL << (TorqueParser::NOT - 73)) |
- (1ULL << (TorqueParser::STRING_LITERAL - 73)) |
- (1ULL << (TorqueParser::IDENTIFIER - 73)) |
- (1ULL << (TorqueParser::DECIMAL_LITERAL - 73)))) != 0)) {
- setState(522);
- argument();
- }
- setState(529);
- _errHandler->sync(this);
- _la = _input->LA(1);
- while (_la == TorqueParser::T__3) {
- setState(525);
- match(TorqueParser::T__3);
- setState(526);
- argument();
- setState(531);
- _errHandler->sync(this);
- _la = _input->LA(1);
- }
- setState(532);
- match(TorqueParser::T__1);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- HelperCallContext
-//------------------------------------------------------------------
-
-TorqueParser::HelperCallContext::HelperCallContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::ArgumentListContext*
-TorqueParser::HelperCallContext::argumentList() {
- return getRuleContext<TorqueParser::ArgumentListContext>(0);
-}
-
-TorqueParser::OptionalOtherwiseContext*
-TorqueParser::HelperCallContext::optionalOtherwise() {
- return getRuleContext<TorqueParser::OptionalOtherwiseContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::HelperCallContext::MIN() {
- return getToken(TorqueParser::MIN, 0);
-}
-
-tree::TerminalNode* TorqueParser::HelperCallContext::MAX() {
- return getToken(TorqueParser::MAX, 0);
-}
-
-tree::TerminalNode* TorqueParser::HelperCallContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::GenericSpecializationTypeListContext*
-TorqueParser::HelperCallContext::genericSpecializationTypeList() {
- return getRuleContext<TorqueParser::GenericSpecializationTypeListContext>(0);
-}
-
-size_t TorqueParser::HelperCallContext::getRuleIndex() const {
- return TorqueParser::RuleHelperCall;
-}
-
-void TorqueParser::HelperCallContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterHelperCall(this);
-}
-
-void TorqueParser::HelperCallContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitHelperCall(this);
-}
-
-antlrcpp::Any TorqueParser::HelperCallContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitHelperCall(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::HelperCallContext* TorqueParser::helperCall() {
- HelperCallContext* _localctx =
- _tracker.createInstance<HelperCallContext>(_ctx, getState());
- enterRule(_localctx, 74, TorqueParser::RuleHelperCall);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(534);
- _la = _input->LA(1);
- if (!(((((_la - 61) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 61)) &
- ((1ULL << (TorqueParser::MAX - 61)) |
- (1ULL << (TorqueParser::MIN - 61)) |
- (1ULL << (TorqueParser::IDENTIFIER - 61)))) != 0))) {
- _errHandler->recoverInline(this);
- } else {
- _errHandler->reportMatch(this);
- consume();
- }
- setState(536);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::LESS_THAN) {
- setState(535);
- genericSpecializationTypeList();
- }
- setState(538);
- argumentList();
- setState(539);
- optionalOtherwise();
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- LabelReferenceContext
-//------------------------------------------------------------------
-
-TorqueParser::LabelReferenceContext::LabelReferenceContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::LabelReferenceContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-size_t TorqueParser::LabelReferenceContext::getRuleIndex() const {
- return TorqueParser::RuleLabelReference;
-}
-
-void TorqueParser::LabelReferenceContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterLabelReference(this);
-}
-
-void TorqueParser::LabelReferenceContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitLabelReference(this);
-}
-
-antlrcpp::Any TorqueParser::LabelReferenceContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitLabelReference(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::LabelReferenceContext* TorqueParser::labelReference() {
- LabelReferenceContext* _localctx =
- _tracker.createInstance<LabelReferenceContext>(_ctx, getState());
- enterRule(_localctx, 76, TorqueParser::RuleLabelReference);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(541);
- match(TorqueParser::IDENTIFIER);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- VariableDeclarationContext
-//------------------------------------------------------------------
-
-TorqueParser::VariableDeclarationContext::VariableDeclarationContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::VariableDeclarationContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::TypeContext* TorqueParser::VariableDeclarationContext::type() {
- return getRuleContext<TorqueParser::TypeContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::VariableDeclarationContext::LET() {
- return getToken(TorqueParser::LET, 0);
-}
-
-tree::TerminalNode* TorqueParser::VariableDeclarationContext::CONST() {
- return getToken(TorqueParser::CONST, 0);
-}
-
-size_t TorqueParser::VariableDeclarationContext::getRuleIndex() const {
- return TorqueParser::RuleVariableDeclaration;
-}
-
-void TorqueParser::VariableDeclarationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterVariableDeclaration(this);
-}
-
-void TorqueParser::VariableDeclarationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitVariableDeclaration(this);
-}
-
-antlrcpp::Any TorqueParser::VariableDeclarationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitVariableDeclaration(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::VariableDeclarationContext* TorqueParser::variableDeclaration() {
- VariableDeclarationContext* _localctx =
- _tracker.createInstance<VariableDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 78, TorqueParser::RuleVariableDeclaration);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(543);
- _la = _input->LA(1);
- if (!(_la == TorqueParser::LET
-
- || _la == TorqueParser::CONST)) {
- _errHandler->recoverInline(this);
- } else {
- _errHandler->reportMatch(this);
- consume();
- }
- setState(544);
- match(TorqueParser::IDENTIFIER);
- setState(545);
- match(TorqueParser::T__4);
- setState(546);
- type(0);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- VariableDeclarationWithInitializationContext
-//------------------------------------------------------------------
-
-TorqueParser::VariableDeclarationWithInitializationContext::
- VariableDeclarationWithInitializationContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::VariableDeclarationContext* TorqueParser::
- VariableDeclarationWithInitializationContext::variableDeclaration() {
- return getRuleContext<TorqueParser::VariableDeclarationContext>(0);
-}
-
-tree::TerminalNode*
-TorqueParser::VariableDeclarationWithInitializationContext::ASSIGNMENT() {
- return getToken(TorqueParser::ASSIGNMENT, 0);
-}
-
-TorqueParser::ExpressionContext*
-TorqueParser::VariableDeclarationWithInitializationContext::expression() {
- return getRuleContext<TorqueParser::ExpressionContext>(0);
-}
-
-size_t
-TorqueParser::VariableDeclarationWithInitializationContext::getRuleIndex()
- const {
- return TorqueParser::RuleVariableDeclarationWithInitialization;
-}
-
-void TorqueParser::VariableDeclarationWithInitializationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->enterVariableDeclarationWithInitialization(this);
-}
-
-void TorqueParser::VariableDeclarationWithInitializationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->exitVariableDeclarationWithInitialization(this);
-}
-
-antlrcpp::Any
-TorqueParser::VariableDeclarationWithInitializationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitVariableDeclarationWithInitialization(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::VariableDeclarationWithInitializationContext*
-TorqueParser::variableDeclarationWithInitialization() {
- VariableDeclarationWithInitializationContext* _localctx =
- _tracker.createInstance<VariableDeclarationWithInitializationContext>(
- _ctx, getState());
- enterRule(_localctx, 80,
- TorqueParser::RuleVariableDeclarationWithInitialization);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(548);
- variableDeclaration();
- setState(551);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::ASSIGNMENT) {
- setState(549);
- match(TorqueParser::ASSIGNMENT);
- setState(550);
- expression();
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- HelperCallStatementContext
-//------------------------------------------------------------------
-
-TorqueParser::HelperCallStatementContext::HelperCallStatementContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::HelperCallContext*
-TorqueParser::HelperCallStatementContext::helperCall() {
- return getRuleContext<TorqueParser::HelperCallContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::HelperCallStatementContext::TAIL() {
- return getToken(TorqueParser::TAIL, 0);
-}
-
-size_t TorqueParser::HelperCallStatementContext::getRuleIndex() const {
- return TorqueParser::RuleHelperCallStatement;
-}
-
-void TorqueParser::HelperCallStatementContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterHelperCallStatement(this);
-}
-
-void TorqueParser::HelperCallStatementContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitHelperCallStatement(this);
-}
-
-antlrcpp::Any TorqueParser::HelperCallStatementContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitHelperCallStatement(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::HelperCallStatementContext* TorqueParser::helperCallStatement() {
- HelperCallStatementContext* _localctx =
- _tracker.createInstance<HelperCallStatementContext>(_ctx, getState());
- enterRule(_localctx, 82, TorqueParser::RuleHelperCallStatement);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(554);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::TAIL) {
- setState(553);
- match(TorqueParser::TAIL);
- }
- setState(556);
- helperCall();
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ExpressionStatementContext
-//------------------------------------------------------------------
-
-TorqueParser::ExpressionStatementContext::ExpressionStatementContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::AssignmentContext*
-TorqueParser::ExpressionStatementContext::assignment() {
- return getRuleContext<TorqueParser::AssignmentContext>(0);
-}
-
-size_t TorqueParser::ExpressionStatementContext::getRuleIndex() const {
- return TorqueParser::RuleExpressionStatement;
-}
-
-void TorqueParser::ExpressionStatementContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterExpressionStatement(this);
-}
-
-void TorqueParser::ExpressionStatementContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitExpressionStatement(this);
-}
-
-antlrcpp::Any TorqueParser::ExpressionStatementContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitExpressionStatement(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ExpressionStatementContext* TorqueParser::expressionStatement() {
- ExpressionStatementContext* _localctx =
- _tracker.createInstance<ExpressionStatementContext>(_ctx, getState());
- enterRule(_localctx, 84, TorqueParser::RuleExpressionStatement);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(558);
- assignment();
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- IfStatementContext
-//------------------------------------------------------------------
-
-TorqueParser::IfStatementContext::IfStatementContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::IfStatementContext::IF() {
- return getToken(TorqueParser::IF, 0);
-}
-
-TorqueParser::ExpressionContext*
-TorqueParser::IfStatementContext::expression() {
- return getRuleContext<TorqueParser::ExpressionContext>(0);
-}
-
-std::vector<TorqueParser::StatementBlockContext*>
-TorqueParser::IfStatementContext::statementBlock() {
- return getRuleContexts<TorqueParser::StatementBlockContext>();
-}
-
-TorqueParser::StatementBlockContext*
-TorqueParser::IfStatementContext::statementBlock(size_t i) {
- return getRuleContext<TorqueParser::StatementBlockContext>(i);
-}
-
-tree::TerminalNode* TorqueParser::IfStatementContext::CONSTEXPR() {
- return getToken(TorqueParser::CONSTEXPR, 0);
-}
-
-size_t TorqueParser::IfStatementContext::getRuleIndex() const {
- return TorqueParser::RuleIfStatement;
-}
-
-void TorqueParser::IfStatementContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterIfStatement(this);
-}
-
-void TorqueParser::IfStatementContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitIfStatement(this);
-}
-
-antlrcpp::Any TorqueParser::IfStatementContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitIfStatement(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::IfStatementContext* TorqueParser::ifStatement() {
- IfStatementContext* _localctx =
- _tracker.createInstance<IfStatementContext>(_ctx, getState());
- enterRule(_localctx, 86, TorqueParser::RuleIfStatement);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(560);
- match(TorqueParser::IF);
- setState(562);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::CONSTEXPR) {
- setState(561);
- match(TorqueParser::CONSTEXPR);
- }
- setState(564);
- match(TorqueParser::T__0);
- setState(565);
- expression();
- setState(566);
- match(TorqueParser::T__1);
- setState(567);
- statementBlock();
- setState(570);
- _errHandler->sync(this);
-
- switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 54, _ctx)) {
- case 1: {
- setState(568);
- match(TorqueParser::T__16);
- setState(569);
- statementBlock();
- break;
- }
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- WhileLoopContext
-//------------------------------------------------------------------
-
-TorqueParser::WhileLoopContext::WhileLoopContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::WhileLoopContext::WHILE() {
- return getToken(TorqueParser::WHILE, 0);
-}
-
-TorqueParser::ExpressionContext* TorqueParser::WhileLoopContext::expression() {
- return getRuleContext<TorqueParser::ExpressionContext>(0);
-}
-
-TorqueParser::StatementBlockContext*
-TorqueParser::WhileLoopContext::statementBlock() {
- return getRuleContext<TorqueParser::StatementBlockContext>(0);
-}
-
-size_t TorqueParser::WhileLoopContext::getRuleIndex() const {
- return TorqueParser::RuleWhileLoop;
-}
-
-void TorqueParser::WhileLoopContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterWhileLoop(this);
-}
-
-void TorqueParser::WhileLoopContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitWhileLoop(this);
-}
-
-antlrcpp::Any TorqueParser::WhileLoopContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitWhileLoop(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::WhileLoopContext* TorqueParser::whileLoop() {
- WhileLoopContext* _localctx =
- _tracker.createInstance<WhileLoopContext>(_ctx, getState());
- enterRule(_localctx, 88, TorqueParser::RuleWhileLoop);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(572);
- match(TorqueParser::WHILE);
- setState(573);
- match(TorqueParser::T__0);
- setState(574);
- expression();
- setState(575);
- match(TorqueParser::T__1);
- setState(576);
- statementBlock();
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ReturnStatementContext
-//------------------------------------------------------------------
-
-TorqueParser::ReturnStatementContext::ReturnStatementContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::ReturnStatementContext::RETURN() {
- return getToken(TorqueParser::RETURN, 0);
-}
-
-TorqueParser::ExpressionContext*
-TorqueParser::ReturnStatementContext::expression() {
- return getRuleContext<TorqueParser::ExpressionContext>(0);
-}
-
-size_t TorqueParser::ReturnStatementContext::getRuleIndex() const {
- return TorqueParser::RuleReturnStatement;
-}
-
-void TorqueParser::ReturnStatementContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterReturnStatement(this);
-}
-
-void TorqueParser::ReturnStatementContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitReturnStatement(this);
-}
-
-antlrcpp::Any TorqueParser::ReturnStatementContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitReturnStatement(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ReturnStatementContext* TorqueParser::returnStatement() {
- ReturnStatementContext* _localctx =
- _tracker.createInstance<ReturnStatementContext>(_ctx, getState());
- enterRule(_localctx, 90, TorqueParser::RuleReturnStatement);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(578);
- match(TorqueParser::RETURN);
- setState(580);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if ((((_la & ~0x3fULL) == 0) &&
- ((1ULL << _la) &
- ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::PLUS) |
- (1ULL << TorqueParser::MINUS) | (1ULL << TorqueParser::BIT_NOT) |
- (1ULL << TorqueParser::MAX) | (1ULL << TorqueParser::MIN))) != 0) ||
- ((((_la - 73) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 73)) &
- ((1ULL << (TorqueParser::INCREMENT - 73)) |
- (1ULL << (TorqueParser::DECREMENT - 73)) |
- (1ULL << (TorqueParser::NOT - 73)) |
- (1ULL << (TorqueParser::STRING_LITERAL - 73)) |
- (1ULL << (TorqueParser::IDENTIFIER - 73)) |
- (1ULL << (TorqueParser::DECIMAL_LITERAL - 73)))) != 0)) {
- setState(579);
- expression();
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- BreakStatementContext
-//------------------------------------------------------------------
-
-TorqueParser::BreakStatementContext::BreakStatementContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::BreakStatementContext::BREAK() {
- return getToken(TorqueParser::BREAK, 0);
-}
-
-size_t TorqueParser::BreakStatementContext::getRuleIndex() const {
- return TorqueParser::RuleBreakStatement;
-}
-
-void TorqueParser::BreakStatementContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterBreakStatement(this);
-}
-
-void TorqueParser::BreakStatementContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitBreakStatement(this);
-}
-
-antlrcpp::Any TorqueParser::BreakStatementContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitBreakStatement(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::BreakStatementContext* TorqueParser::breakStatement() {
- BreakStatementContext* _localctx =
- _tracker.createInstance<BreakStatementContext>(_ctx, getState());
- enterRule(_localctx, 92, TorqueParser::RuleBreakStatement);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(582);
- match(TorqueParser::BREAK);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ContinueStatementContext
-//------------------------------------------------------------------
-
-TorqueParser::ContinueStatementContext::ContinueStatementContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::ContinueStatementContext::CONTINUE() {
- return getToken(TorqueParser::CONTINUE, 0);
-}
-
-size_t TorqueParser::ContinueStatementContext::getRuleIndex() const {
- return TorqueParser::RuleContinueStatement;
-}
-
-void TorqueParser::ContinueStatementContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterContinueStatement(this);
-}
-
-void TorqueParser::ContinueStatementContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitContinueStatement(this);
-}
-
-antlrcpp::Any TorqueParser::ContinueStatementContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitContinueStatement(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ContinueStatementContext* TorqueParser::continueStatement() {
- ContinueStatementContext* _localctx =
- _tracker.createInstance<ContinueStatementContext>(_ctx, getState());
- enterRule(_localctx, 94, TorqueParser::RuleContinueStatement);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(584);
- match(TorqueParser::CONTINUE);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- GotoStatementContext
-//------------------------------------------------------------------
-
-TorqueParser::GotoStatementContext::GotoStatementContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::GotoStatementContext::GOTO() {
- return getToken(TorqueParser::GOTO, 0);
-}
-
-TorqueParser::LabelReferenceContext*
-TorqueParser::GotoStatementContext::labelReference() {
- return getRuleContext<TorqueParser::LabelReferenceContext>(0);
-}
-
-TorqueParser::ArgumentListContext*
-TorqueParser::GotoStatementContext::argumentList() {
- return getRuleContext<TorqueParser::ArgumentListContext>(0);
-}
-
-size_t TorqueParser::GotoStatementContext::getRuleIndex() const {
- return TorqueParser::RuleGotoStatement;
-}
-
-void TorqueParser::GotoStatementContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterGotoStatement(this);
-}
-
-void TorqueParser::GotoStatementContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitGotoStatement(this);
-}
-
-antlrcpp::Any TorqueParser::GotoStatementContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitGotoStatement(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::GotoStatementContext* TorqueParser::gotoStatement() {
- GotoStatementContext* _localctx =
- _tracker.createInstance<GotoStatementContext>(_ctx, getState());
- enterRule(_localctx, 96, TorqueParser::RuleGotoStatement);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(586);
- match(TorqueParser::GOTO);
- setState(587);
- labelReference();
- setState(589);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::T__0) {
- setState(588);
- argumentList();
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- HandlerWithStatementContext
-//------------------------------------------------------------------
-
-TorqueParser::HandlerWithStatementContext::HandlerWithStatementContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::HandlerWithStatementContext::LABEL() {
- return getToken(TorqueParser::LABEL, 0);
-}
-
-TorqueParser::LabelDeclarationContext*
-TorqueParser::HandlerWithStatementContext::labelDeclaration() {
- return getRuleContext<TorqueParser::LabelDeclarationContext>(0);
-}
-
-TorqueParser::StatementBlockContext*
-TorqueParser::HandlerWithStatementContext::statementBlock() {
- return getRuleContext<TorqueParser::StatementBlockContext>(0);
-}
-
-size_t TorqueParser::HandlerWithStatementContext::getRuleIndex() const {
- return TorqueParser::RuleHandlerWithStatement;
-}
-
-void TorqueParser::HandlerWithStatementContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->enterHandlerWithStatement(this);
-}
-
-void TorqueParser::HandlerWithStatementContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitHandlerWithStatement(this);
-}
-
-antlrcpp::Any TorqueParser::HandlerWithStatementContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitHandlerWithStatement(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::HandlerWithStatementContext*
-TorqueParser::handlerWithStatement() {
- HandlerWithStatementContext* _localctx =
- _tracker.createInstance<HandlerWithStatementContext>(_ctx, getState());
- enterRule(_localctx, 98, TorqueParser::RuleHandlerWithStatement);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(591);
- match(TorqueParser::LABEL);
- setState(592);
- labelDeclaration();
- setState(593);
- statementBlock();
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- TryLabelStatementContext
-//------------------------------------------------------------------
-
-TorqueParser::TryLabelStatementContext::TryLabelStatementContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::TryLabelStatementContext::TRY() {
- return getToken(TorqueParser::TRY, 0);
-}
-
-TorqueParser::StatementBlockContext*
-TorqueParser::TryLabelStatementContext::statementBlock() {
- return getRuleContext<TorqueParser::StatementBlockContext>(0);
-}
-
-std::vector<TorqueParser::HandlerWithStatementContext*>
-TorqueParser::TryLabelStatementContext::handlerWithStatement() {
- return getRuleContexts<TorqueParser::HandlerWithStatementContext>();
-}
-
-TorqueParser::HandlerWithStatementContext*
-TorqueParser::TryLabelStatementContext::handlerWithStatement(size_t i) {
- return getRuleContext<TorqueParser::HandlerWithStatementContext>(i);
-}
-
-size_t TorqueParser::TryLabelStatementContext::getRuleIndex() const {
- return TorqueParser::RuleTryLabelStatement;
-}
-
-void TorqueParser::TryLabelStatementContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterTryLabelStatement(this);
-}
-
-void TorqueParser::TryLabelStatementContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitTryLabelStatement(this);
-}
-
-antlrcpp::Any TorqueParser::TryLabelStatementContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitTryLabelStatement(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::TryLabelStatementContext* TorqueParser::tryLabelStatement() {
- TryLabelStatementContext* _localctx =
- _tracker.createInstance<TryLabelStatementContext>(_ctx, getState());
- enterRule(_localctx, 100, TorqueParser::RuleTryLabelStatement);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- size_t alt;
- enterOuterAlt(_localctx, 1);
- setState(595);
- match(TorqueParser::TRY);
- setState(596);
- statementBlock();
- setState(598);
- _errHandler->sync(this);
- alt = 1;
- do {
- switch (alt) {
- case 1: {
- setState(597);
- handlerWithStatement();
- break;
- }
-
- default:
- throw NoViableAltException(this);
- }
- setState(600);
- _errHandler->sync(this);
- alt = getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 57, _ctx);
- } while (alt != 2 && alt != atn::ATN::INVALID_ALT_NUMBER);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- DiagnosticStatementContext
-//------------------------------------------------------------------
-
-TorqueParser::DiagnosticStatementContext::DiagnosticStatementContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::ExpressionContext*
-TorqueParser::DiagnosticStatementContext::expression() {
- return getRuleContext<TorqueParser::ExpressionContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::DiagnosticStatementContext::ASSERT_TOKEN() {
- return getToken(TorqueParser::ASSERT_TOKEN, 0);
-}
-
-tree::TerminalNode* TorqueParser::DiagnosticStatementContext::CHECK_TOKEN() {
- return getToken(TorqueParser::CHECK_TOKEN, 0);
-}
-
-tree::TerminalNode*
-TorqueParser::DiagnosticStatementContext::UNREACHABLE_TOKEN() {
- return getToken(TorqueParser::UNREACHABLE_TOKEN, 0);
-}
-
-tree::TerminalNode* TorqueParser::DiagnosticStatementContext::DEBUG_TOKEN() {
- return getToken(TorqueParser::DEBUG_TOKEN, 0);
-}
-
-size_t TorqueParser::DiagnosticStatementContext::getRuleIndex() const {
- return TorqueParser::RuleDiagnosticStatement;
-}
-
-void TorqueParser::DiagnosticStatementContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterDiagnosticStatement(this);
-}
-
-void TorqueParser::DiagnosticStatementContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitDiagnosticStatement(this);
-}
-
-antlrcpp::Any TorqueParser::DiagnosticStatementContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitDiagnosticStatement(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::DiagnosticStatementContext* TorqueParser::diagnosticStatement() {
- DiagnosticStatementContext* _localctx =
- _tracker.createInstance<DiagnosticStatementContext>(_ctx, getState());
- enterRule(_localctx, 102, TorqueParser::RuleDiagnosticStatement);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- setState(609);
- _errHandler->sync(this);
- switch (_input->LA(1)) {
- case TorqueParser::ASSERT_TOKEN:
- case TorqueParser::CHECK_TOKEN: {
- enterOuterAlt(_localctx, 1);
- setState(602);
- _la = _input->LA(1);
- if (!(_la == TorqueParser::ASSERT_TOKEN
-
- || _la == TorqueParser::CHECK_TOKEN)) {
- _errHandler->recoverInline(this);
- } else {
- _errHandler->reportMatch(this);
- consume();
- }
- setState(603);
- match(TorqueParser::T__0);
- setState(604);
- expression();
- setState(605);
- match(TorqueParser::T__1);
- break;
- }
-
- case TorqueParser::UNREACHABLE_TOKEN: {
- enterOuterAlt(_localctx, 2);
- setState(607);
- match(TorqueParser::UNREACHABLE_TOKEN);
- break;
- }
-
- case TorqueParser::DEBUG_TOKEN: {
- enterOuterAlt(_localctx, 3);
- setState(608);
- match(TorqueParser::DEBUG_TOKEN);
- break;
- }
-
- default:
- throw NoViableAltException(this);
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- StatementContext
-//------------------------------------------------------------------
-
-TorqueParser::StatementContext::StatementContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::VariableDeclarationWithInitializationContext*
-TorqueParser::StatementContext::variableDeclarationWithInitialization() {
- return getRuleContext<
- TorqueParser::VariableDeclarationWithInitializationContext>(0);
-}
-
-TorqueParser::HelperCallStatementContext*
-TorqueParser::StatementContext::helperCallStatement() {
- return getRuleContext<TorqueParser::HelperCallStatementContext>(0);
-}
-
-TorqueParser::ExpressionStatementContext*
-TorqueParser::StatementContext::expressionStatement() {
- return getRuleContext<TorqueParser::ExpressionStatementContext>(0);
-}
-
-TorqueParser::ReturnStatementContext*
-TorqueParser::StatementContext::returnStatement() {
- return getRuleContext<TorqueParser::ReturnStatementContext>(0);
-}
-
-TorqueParser::BreakStatementContext*
-TorqueParser::StatementContext::breakStatement() {
- return getRuleContext<TorqueParser::BreakStatementContext>(0);
-}
-
-TorqueParser::ContinueStatementContext*
-TorqueParser::StatementContext::continueStatement() {
- return getRuleContext<TorqueParser::ContinueStatementContext>(0);
-}
-
-TorqueParser::GotoStatementContext*
-TorqueParser::StatementContext::gotoStatement() {
- return getRuleContext<TorqueParser::GotoStatementContext>(0);
-}
-
-TorqueParser::IfStatementContext*
-TorqueParser::StatementContext::ifStatement() {
- return getRuleContext<TorqueParser::IfStatementContext>(0);
-}
-
-TorqueParser::DiagnosticStatementContext*
-TorqueParser::StatementContext::diagnosticStatement() {
- return getRuleContext<TorqueParser::DiagnosticStatementContext>(0);
-}
-
-TorqueParser::WhileLoopContext* TorqueParser::StatementContext::whileLoop() {
- return getRuleContext<TorqueParser::WhileLoopContext>(0);
-}
-
-TorqueParser::ForOfLoopContext* TorqueParser::StatementContext::forOfLoop() {
- return getRuleContext<TorqueParser::ForOfLoopContext>(0);
-}
-
-TorqueParser::ForLoopContext* TorqueParser::StatementContext::forLoop() {
- return getRuleContext<TorqueParser::ForLoopContext>(0);
-}
-
-TorqueParser::TryLabelStatementContext*
-TorqueParser::StatementContext::tryLabelStatement() {
- return getRuleContext<TorqueParser::TryLabelStatementContext>(0);
-}
-
-size_t TorqueParser::StatementContext::getRuleIndex() const {
- return TorqueParser::RuleStatement;
-}
-
-void TorqueParser::StatementContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterStatement(this);
-}
-
-void TorqueParser::StatementContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitStatement(this);
-}
-
-antlrcpp::Any TorqueParser::StatementContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitStatement(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::StatementContext* TorqueParser::statement() {
- StatementContext* _localctx =
- _tracker.createInstance<StatementContext>(_ctx, getState());
- enterRule(_localctx, 104, TorqueParser::RuleStatement);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- setState(640);
- _errHandler->sync(this);
- switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 59, _ctx)) {
- case 1: {
- enterOuterAlt(_localctx, 1);
- setState(611);
- variableDeclarationWithInitialization();
- setState(612);
- match(TorqueParser::T__14);
- break;
- }
-
- case 2: {
- enterOuterAlt(_localctx, 2);
- setState(614);
- helperCallStatement();
- setState(615);
- match(TorqueParser::T__14);
- break;
- }
-
- case 3: {
- enterOuterAlt(_localctx, 3);
- setState(617);
- expressionStatement();
- setState(618);
- match(TorqueParser::T__14);
- break;
- }
-
- case 4: {
- enterOuterAlt(_localctx, 4);
- setState(620);
- returnStatement();
- setState(621);
- match(TorqueParser::T__14);
- break;
- }
-
- case 5: {
- enterOuterAlt(_localctx, 5);
- setState(623);
- breakStatement();
- setState(624);
- match(TorqueParser::T__14);
- break;
- }
-
- case 6: {
- enterOuterAlt(_localctx, 6);
- setState(626);
- continueStatement();
- setState(627);
- match(TorqueParser::T__14);
- break;
- }
-
- case 7: {
- enterOuterAlt(_localctx, 7);
- setState(629);
- gotoStatement();
- setState(630);
- match(TorqueParser::T__14);
- break;
- }
-
- case 8: {
- enterOuterAlt(_localctx, 8);
- setState(632);
- ifStatement();
- break;
- }
-
- case 9: {
- enterOuterAlt(_localctx, 9);
- setState(633);
- diagnosticStatement();
- setState(634);
- match(TorqueParser::T__14);
- break;
- }
-
- case 10: {
- enterOuterAlt(_localctx, 10);
- setState(636);
- whileLoop();
- break;
- }
-
- case 11: {
- enterOuterAlt(_localctx, 11);
- setState(637);
- forOfLoop();
- break;
- }
-
- case 12: {
- enterOuterAlt(_localctx, 12);
- setState(638);
- forLoop();
- break;
- }
-
- case 13: {
- enterOuterAlt(_localctx, 13);
- setState(639);
- tryLabelStatement();
- break;
- }
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- StatementListContext
-//------------------------------------------------------------------
-
-TorqueParser::StatementListContext::StatementListContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-std::vector<TorqueParser::StatementContext*>
-TorqueParser::StatementListContext::statement() {
- return getRuleContexts<TorqueParser::StatementContext>();
-}
-
-TorqueParser::StatementContext* TorqueParser::StatementListContext::statement(
- size_t i) {
- return getRuleContext<TorqueParser::StatementContext>(i);
-}
-
-size_t TorqueParser::StatementListContext::getRuleIndex() const {
- return TorqueParser::RuleStatementList;
-}
-
-void TorqueParser::StatementListContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterStatementList(this);
-}
-
-void TorqueParser::StatementListContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitStatementList(this);
-}
-
-antlrcpp::Any TorqueParser::StatementListContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitStatementList(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::StatementListContext* TorqueParser::statementList() {
- StatementListContext* _localctx =
- _tracker.createInstance<StatementListContext>(_ctx, getState());
- enterRule(_localctx, 106, TorqueParser::RuleStatementList);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(645);
- _errHandler->sync(this);
- _la = _input->LA(1);
- while (
- (((_la & ~0x3fULL) == 0) &&
- ((1ULL << _la) &
- ((1ULL << TorqueParser::T__0) | (1ULL << TorqueParser::IF) |
- (1ULL << TorqueParser::FOR) | (1ULL << TorqueParser::WHILE) |
- (1ULL << TorqueParser::RETURN) | (1ULL << TorqueParser::CONTINUE) |
- (1ULL << TorqueParser::BREAK) | (1ULL << TorqueParser::GOTO) |
- (1ULL << TorqueParser::TRY) | (1ULL << TorqueParser::TAIL) |
- (1ULL << TorqueParser::LET) | (1ULL << TorqueParser::CONST) |
- (1ULL << TorqueParser::ASSERT_TOKEN) |
- (1ULL << TorqueParser::CHECK_TOKEN) |
- (1ULL << TorqueParser::UNREACHABLE_TOKEN) |
- (1ULL << TorqueParser::DEBUG_TOKEN) | (1ULL << TorqueParser::MAX) |
- (1ULL << TorqueParser::MIN))) != 0) ||
- ((((_la - 73) & ~0x3fULL) == 0) &&
- ((1ULL << (_la - 73)) &
- ((1ULL << (TorqueParser::INCREMENT - 73)) |
- (1ULL << (TorqueParser::DECREMENT - 73)) |
- (1ULL << (TorqueParser::STRING_LITERAL - 73)) |
- (1ULL << (TorqueParser::IDENTIFIER - 73)) |
- (1ULL << (TorqueParser::DECIMAL_LITERAL - 73)))) != 0)) {
- setState(642);
- statement();
- setState(647);
- _errHandler->sync(this);
- _la = _input->LA(1);
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- StatementScopeContext
-//------------------------------------------------------------------
-
-TorqueParser::StatementScopeContext::StatementScopeContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::StatementListContext*
-TorqueParser::StatementScopeContext::statementList() {
- return getRuleContext<TorqueParser::StatementListContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::StatementScopeContext::DEFERRED() {
- return getToken(TorqueParser::DEFERRED, 0);
-}
-
-size_t TorqueParser::StatementScopeContext::getRuleIndex() const {
- return TorqueParser::RuleStatementScope;
-}
-
-void TorqueParser::StatementScopeContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterStatementScope(this);
-}
-
-void TorqueParser::StatementScopeContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitStatementScope(this);
-}
-
-antlrcpp::Any TorqueParser::StatementScopeContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitStatementScope(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::StatementScopeContext* TorqueParser::statementScope() {
- StatementScopeContext* _localctx =
- _tracker.createInstance<StatementScopeContext>(_ctx, getState());
- enterRule(_localctx, 108, TorqueParser::RuleStatementScope);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(649);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::DEFERRED) {
- setState(648);
- match(TorqueParser::DEFERRED);
- }
- setState(651);
- match(TorqueParser::T__12);
- setState(652);
- statementList();
- setState(653);
- match(TorqueParser::T__13);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- StatementBlockContext
-//------------------------------------------------------------------
-
-TorqueParser::StatementBlockContext::StatementBlockContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::StatementContext*
-TorqueParser::StatementBlockContext::statement() {
- return getRuleContext<TorqueParser::StatementContext>(0);
-}
-
-TorqueParser::StatementScopeContext*
-TorqueParser::StatementBlockContext::statementScope() {
- return getRuleContext<TorqueParser::StatementScopeContext>(0);
-}
-
-size_t TorqueParser::StatementBlockContext::getRuleIndex() const {
- return TorqueParser::RuleStatementBlock;
-}
-
-void TorqueParser::StatementBlockContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterStatementBlock(this);
-}
-
-void TorqueParser::StatementBlockContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitStatementBlock(this);
-}
-
-antlrcpp::Any TorqueParser::StatementBlockContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitStatementBlock(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::StatementBlockContext* TorqueParser::statementBlock() {
- StatementBlockContext* _localctx =
- _tracker.createInstance<StatementBlockContext>(_ctx, getState());
- enterRule(_localctx, 110, TorqueParser::RuleStatementBlock);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- setState(657);
- _errHandler->sync(this);
- switch (_input->LA(1)) {
- case TorqueParser::T__0:
- case TorqueParser::IF:
- case TorqueParser::FOR:
- case TorqueParser::WHILE:
- case TorqueParser::RETURN:
- case TorqueParser::CONTINUE:
- case TorqueParser::BREAK:
- case TorqueParser::GOTO:
- case TorqueParser::TRY:
- case TorqueParser::TAIL:
- case TorqueParser::LET:
- case TorqueParser::CONST:
- case TorqueParser::ASSERT_TOKEN:
- case TorqueParser::CHECK_TOKEN:
- case TorqueParser::UNREACHABLE_TOKEN:
- case TorqueParser::DEBUG_TOKEN:
- case TorqueParser::MAX:
- case TorqueParser::MIN:
- case TorqueParser::INCREMENT:
- case TorqueParser::DECREMENT:
- case TorqueParser::STRING_LITERAL:
- case TorqueParser::IDENTIFIER:
- case TorqueParser::DECIMAL_LITERAL: {
- enterOuterAlt(_localctx, 1);
- setState(655);
- statement();
- break;
- }
-
- case TorqueParser::T__12:
- case TorqueParser::DEFERRED: {
- enterOuterAlt(_localctx, 2);
- setState(656);
- statementScope();
- break;
- }
-
- default:
- throw NoViableAltException(this);
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- HelperBodyContext
-//------------------------------------------------------------------
-
-TorqueParser::HelperBodyContext::HelperBodyContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::StatementScopeContext*
-TorqueParser::HelperBodyContext::statementScope() {
- return getRuleContext<TorqueParser::StatementScopeContext>(0);
-}
-
-size_t TorqueParser::HelperBodyContext::getRuleIndex() const {
- return TorqueParser::RuleHelperBody;
-}
-
-void TorqueParser::HelperBodyContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterHelperBody(this);
-}
-
-void TorqueParser::HelperBodyContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitHelperBody(this);
-}
-
-antlrcpp::Any TorqueParser::HelperBodyContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitHelperBody(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::HelperBodyContext* TorqueParser::helperBody() {
- HelperBodyContext* _localctx =
- _tracker.createInstance<HelperBodyContext>(_ctx, getState());
- enterRule(_localctx, 112, TorqueParser::RuleHelperBody);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(659);
- statementScope();
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- FieldDeclarationContext
-//------------------------------------------------------------------
-
-TorqueParser::FieldDeclarationContext::FieldDeclarationContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::FieldDeclarationContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::TypeContext* TorqueParser::FieldDeclarationContext::type() {
- return getRuleContext<TorqueParser::TypeContext>(0);
-}
-
-size_t TorqueParser::FieldDeclarationContext::getRuleIndex() const {
- return TorqueParser::RuleFieldDeclaration;
-}
-
-void TorqueParser::FieldDeclarationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterFieldDeclaration(this);
-}
-
-void TorqueParser::FieldDeclarationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitFieldDeclaration(this);
-}
-
-antlrcpp::Any TorqueParser::FieldDeclarationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitFieldDeclaration(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::FieldDeclarationContext* TorqueParser::fieldDeclaration() {
- FieldDeclarationContext* _localctx =
- _tracker.createInstance<FieldDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 114, TorqueParser::RuleFieldDeclaration);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(661);
- match(TorqueParser::IDENTIFIER);
- setState(662);
- match(TorqueParser::T__4);
- setState(663);
- type(0);
- setState(664);
- match(TorqueParser::T__14);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- FieldListDeclarationContext
-//------------------------------------------------------------------
-
-TorqueParser::FieldListDeclarationContext::FieldListDeclarationContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-std::vector<TorqueParser::FieldDeclarationContext*>
-TorqueParser::FieldListDeclarationContext::fieldDeclaration() {
- return getRuleContexts<TorqueParser::FieldDeclarationContext>();
-}
-
-TorqueParser::FieldDeclarationContext*
-TorqueParser::FieldListDeclarationContext::fieldDeclaration(size_t i) {
- return getRuleContext<TorqueParser::FieldDeclarationContext>(i);
-}
-
-size_t TorqueParser::FieldListDeclarationContext::getRuleIndex() const {
- return TorqueParser::RuleFieldListDeclaration;
-}
-
-void TorqueParser::FieldListDeclarationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->enterFieldListDeclaration(this);
-}
-
-void TorqueParser::FieldListDeclarationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitFieldListDeclaration(this);
-}
-
-antlrcpp::Any TorqueParser::FieldListDeclarationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitFieldListDeclaration(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::FieldListDeclarationContext*
-TorqueParser::fieldListDeclaration() {
- FieldListDeclarationContext* _localctx =
- _tracker.createInstance<FieldListDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 116, TorqueParser::RuleFieldListDeclaration);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(669);
- _errHandler->sync(this);
- _la = _input->LA(1);
- while (_la == TorqueParser::IDENTIFIER) {
- setState(666);
- fieldDeclaration();
- setState(671);
- _errHandler->sync(this);
- _la = _input->LA(1);
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ExtendsDeclarationContext
-//------------------------------------------------------------------
-
-TorqueParser::ExtendsDeclarationContext::ExtendsDeclarationContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::ExtendsDeclarationContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-size_t TorqueParser::ExtendsDeclarationContext::getRuleIndex() const {
- return TorqueParser::RuleExtendsDeclaration;
-}
-
-void TorqueParser::ExtendsDeclarationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterExtendsDeclaration(this);
-}
-
-void TorqueParser::ExtendsDeclarationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitExtendsDeclaration(this);
-}
-
-antlrcpp::Any TorqueParser::ExtendsDeclarationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitExtendsDeclaration(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ExtendsDeclarationContext* TorqueParser::extendsDeclaration() {
- ExtendsDeclarationContext* _localctx =
- _tracker.createInstance<ExtendsDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 118, TorqueParser::RuleExtendsDeclaration);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(672);
- match(TorqueParser::T__17);
- setState(673);
- match(TorqueParser::IDENTIFIER);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- GeneratesDeclarationContext
-//------------------------------------------------------------------
-
-TorqueParser::GeneratesDeclarationContext::GeneratesDeclarationContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode*
-TorqueParser::GeneratesDeclarationContext::STRING_LITERAL() {
- return getToken(TorqueParser::STRING_LITERAL, 0);
-}
-
-size_t TorqueParser::GeneratesDeclarationContext::getRuleIndex() const {
- return TorqueParser::RuleGeneratesDeclaration;
-}
-
-void TorqueParser::GeneratesDeclarationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->enterGeneratesDeclaration(this);
-}
-
-void TorqueParser::GeneratesDeclarationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitGeneratesDeclaration(this);
-}
-
-antlrcpp::Any TorqueParser::GeneratesDeclarationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitGeneratesDeclaration(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::GeneratesDeclarationContext*
-TorqueParser::generatesDeclaration() {
- GeneratesDeclarationContext* _localctx =
- _tracker.createInstance<GeneratesDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 120, TorqueParser::RuleGeneratesDeclaration);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(675);
- match(TorqueParser::T__18);
- setState(676);
- match(TorqueParser::STRING_LITERAL);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ConstexprDeclarationContext
-//------------------------------------------------------------------
-
-TorqueParser::ConstexprDeclarationContext::ConstexprDeclarationContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode*
-TorqueParser::ConstexprDeclarationContext::STRING_LITERAL() {
- return getToken(TorqueParser::STRING_LITERAL, 0);
-}
-
-size_t TorqueParser::ConstexprDeclarationContext::getRuleIndex() const {
- return TorqueParser::RuleConstexprDeclaration;
-}
-
-void TorqueParser::ConstexprDeclarationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->enterConstexprDeclaration(this);
-}
-
-void TorqueParser::ConstexprDeclarationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitConstexprDeclaration(this);
-}
-
-antlrcpp::Any TorqueParser::ConstexprDeclarationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitConstexprDeclaration(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ConstexprDeclarationContext*
-TorqueParser::constexprDeclaration() {
- ConstexprDeclarationContext* _localctx =
- _tracker.createInstance<ConstexprDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 122, TorqueParser::RuleConstexprDeclaration);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(678);
- match(TorqueParser::CONSTEXPR);
- setState(679);
- match(TorqueParser::STRING_LITERAL);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- TypeDeclarationContext
-//------------------------------------------------------------------
-
-TorqueParser::TypeDeclarationContext::TypeDeclarationContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::TypeDeclarationContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::ExtendsDeclarationContext*
-TorqueParser::TypeDeclarationContext::extendsDeclaration() {
- return getRuleContext<TorqueParser::ExtendsDeclarationContext>(0);
-}
-
-TorqueParser::GeneratesDeclarationContext*
-TorqueParser::TypeDeclarationContext::generatesDeclaration() {
- return getRuleContext<TorqueParser::GeneratesDeclarationContext>(0);
-}
-
-TorqueParser::ConstexprDeclarationContext*
-TorqueParser::TypeDeclarationContext::constexprDeclaration() {
- return getRuleContext<TorqueParser::ConstexprDeclarationContext>(0);
-}
-
-size_t TorqueParser::TypeDeclarationContext::getRuleIndex() const {
- return TorqueParser::RuleTypeDeclaration;
-}
-
-void TorqueParser::TypeDeclarationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterTypeDeclaration(this);
-}
-
-void TorqueParser::TypeDeclarationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitTypeDeclaration(this);
-}
-
-antlrcpp::Any TorqueParser::TypeDeclarationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitTypeDeclaration(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::TypeDeclarationContext* TorqueParser::typeDeclaration() {
- TypeDeclarationContext* _localctx =
- _tracker.createInstance<TypeDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 124, TorqueParser::RuleTypeDeclaration);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(681);
- match(TorqueParser::T__5);
- setState(682);
- match(TorqueParser::IDENTIFIER);
- setState(684);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::T__17) {
- setState(683);
- extendsDeclaration();
- }
- setState(687);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::T__18) {
- setState(686);
- generatesDeclaration();
- }
- setState(690);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::CONSTEXPR) {
- setState(689);
- constexprDeclaration();
- }
- setState(692);
- match(TorqueParser::T__14);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- TypeAliasDeclarationContext
-//------------------------------------------------------------------
-
-TorqueParser::TypeAliasDeclarationContext::TypeAliasDeclarationContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::TypeAliasDeclarationContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::TypeContext* TorqueParser::TypeAliasDeclarationContext::type() {
- return getRuleContext<TorqueParser::TypeContext>(0);
-}
-
-size_t TorqueParser::TypeAliasDeclarationContext::getRuleIndex() const {
- return TorqueParser::RuleTypeAliasDeclaration;
-}
-
-void TorqueParser::TypeAliasDeclarationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->enterTypeAliasDeclaration(this);
-}
-
-void TorqueParser::TypeAliasDeclarationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitTypeAliasDeclaration(this);
-}
-
-antlrcpp::Any TorqueParser::TypeAliasDeclarationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitTypeAliasDeclaration(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::TypeAliasDeclarationContext*
-TorqueParser::typeAliasDeclaration() {
- TypeAliasDeclarationContext* _localctx =
- _tracker.createInstance<TypeAliasDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 126, TorqueParser::RuleTypeAliasDeclaration);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(694);
- match(TorqueParser::T__5);
- setState(695);
- match(TorqueParser::IDENTIFIER);
- setState(696);
- match(TorqueParser::ASSIGNMENT);
- setState(697);
- type(0);
- setState(698);
- match(TorqueParser::T__14);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ExternalBuiltinContext
-//------------------------------------------------------------------
-
-TorqueParser::ExternalBuiltinContext::ExternalBuiltinContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::ExternalBuiltinContext::EXTERN() {
- return getToken(TorqueParser::EXTERN, 0);
-}
-
-tree::TerminalNode* TorqueParser::ExternalBuiltinContext::BUILTIN() {
- return getToken(TorqueParser::BUILTIN, 0);
-}
-
-tree::TerminalNode* TorqueParser::ExternalBuiltinContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::OptionalGenericTypeListContext*
-TorqueParser::ExternalBuiltinContext::optionalGenericTypeList() {
- return getRuleContext<TorqueParser::OptionalGenericTypeListContext>(0);
-}
-
-TorqueParser::TypeListContext*
-TorqueParser::ExternalBuiltinContext::typeList() {
- return getRuleContext<TorqueParser::TypeListContext>(0);
-}
-
-TorqueParser::OptionalTypeContext*
-TorqueParser::ExternalBuiltinContext::optionalType() {
- return getRuleContext<TorqueParser::OptionalTypeContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::ExternalBuiltinContext::JAVASCRIPT() {
- return getToken(TorqueParser::JAVASCRIPT, 0);
-}
-
-size_t TorqueParser::ExternalBuiltinContext::getRuleIndex() const {
- return TorqueParser::RuleExternalBuiltin;
-}
-
-void TorqueParser::ExternalBuiltinContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterExternalBuiltin(this);
-}
-
-void TorqueParser::ExternalBuiltinContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitExternalBuiltin(this);
-}
-
-antlrcpp::Any TorqueParser::ExternalBuiltinContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitExternalBuiltin(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ExternalBuiltinContext* TorqueParser::externalBuiltin() {
- ExternalBuiltinContext* _localctx =
- _tracker.createInstance<ExternalBuiltinContext>(_ctx, getState());
- enterRule(_localctx, 128, TorqueParser::RuleExternalBuiltin);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(700);
- match(TorqueParser::EXTERN);
- setState(702);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::JAVASCRIPT) {
- setState(701);
- match(TorqueParser::JAVASCRIPT);
- }
- setState(704);
- match(TorqueParser::BUILTIN);
- setState(705);
- match(TorqueParser::IDENTIFIER);
- setState(706);
- optionalGenericTypeList();
- setState(707);
- match(TorqueParser::T__0);
- setState(708);
- typeList();
- setState(709);
- match(TorqueParser::T__1);
- setState(710);
- optionalType();
- setState(711);
- match(TorqueParser::T__14);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ExternalMacroContext
-//------------------------------------------------------------------
-
-TorqueParser::ExternalMacroContext::ExternalMacroContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::ExternalMacroContext::EXTERN() {
- return getToken(TorqueParser::EXTERN, 0);
-}
-
-tree::TerminalNode* TorqueParser::ExternalMacroContext::MACRO() {
- return getToken(TorqueParser::MACRO, 0);
-}
-
-tree::TerminalNode* TorqueParser::ExternalMacroContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::OptionalGenericTypeListContext*
-TorqueParser::ExternalMacroContext::optionalGenericTypeList() {
- return getRuleContext<TorqueParser::OptionalGenericTypeListContext>(0);
-}
-
-TorqueParser::TypeListMaybeVarArgsContext*
-TorqueParser::ExternalMacroContext::typeListMaybeVarArgs() {
- return getRuleContext<TorqueParser::TypeListMaybeVarArgsContext>(0);
-}
-
-TorqueParser::OptionalTypeContext*
-TorqueParser::ExternalMacroContext::optionalType() {
- return getRuleContext<TorqueParser::OptionalTypeContext>(0);
-}
-
-TorqueParser::OptionalLabelListContext*
-TorqueParser::ExternalMacroContext::optionalLabelList() {
- return getRuleContext<TorqueParser::OptionalLabelListContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::ExternalMacroContext::STRING_LITERAL() {
- return getToken(TorqueParser::STRING_LITERAL, 0);
-}
-
-
-size_t TorqueParser::ExternalMacroContext::getRuleIndex() const {
- return TorqueParser::RuleExternalMacro;
-}
-
-void TorqueParser::ExternalMacroContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterExternalMacro(this);
-}
-
-void TorqueParser::ExternalMacroContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitExternalMacro(this);
-}
-
-antlrcpp::Any TorqueParser::ExternalMacroContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitExternalMacro(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ExternalMacroContext* TorqueParser::externalMacro() {
- ExternalMacroContext* _localctx =
- _tracker.createInstance<ExternalMacroContext>(_ctx, getState());
- enterRule(_localctx, 130, TorqueParser::RuleExternalMacro);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(713);
- match(TorqueParser::EXTERN);
- setState(716);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::T__19) {
- setState(714);
- match(TorqueParser::T__19);
- setState(715);
- match(TorqueParser::STRING_LITERAL);
- }
- setState(718);
- match(TorqueParser::MACRO);
- setState(719);
- match(TorqueParser::IDENTIFIER);
- setState(720);
- optionalGenericTypeList();
- setState(721);
- typeListMaybeVarArgs();
- setState(722);
- optionalType();
- setState(723);
- optionalLabelList();
- setState(724);
- match(TorqueParser::T__14);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ExternalRuntimeContext
-//------------------------------------------------------------------
-
-TorqueParser::ExternalRuntimeContext::ExternalRuntimeContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::ExternalRuntimeContext::EXTERN() {
- return getToken(TorqueParser::EXTERN, 0);
-}
-
-tree::TerminalNode* TorqueParser::ExternalRuntimeContext::RUNTIME() {
- return getToken(TorqueParser::RUNTIME, 0);
-}
-
-tree::TerminalNode* TorqueParser::ExternalRuntimeContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::TypeListMaybeVarArgsContext*
-TorqueParser::ExternalRuntimeContext::typeListMaybeVarArgs() {
- return getRuleContext<TorqueParser::TypeListMaybeVarArgsContext>(0);
-}
-
-TorqueParser::OptionalTypeContext*
-TorqueParser::ExternalRuntimeContext::optionalType() {
- return getRuleContext<TorqueParser::OptionalTypeContext>(0);
-}
-
-size_t TorqueParser::ExternalRuntimeContext::getRuleIndex() const {
- return TorqueParser::RuleExternalRuntime;
-}
-
-void TorqueParser::ExternalRuntimeContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterExternalRuntime(this);
-}
-
-void TorqueParser::ExternalRuntimeContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitExternalRuntime(this);
-}
-
-antlrcpp::Any TorqueParser::ExternalRuntimeContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitExternalRuntime(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ExternalRuntimeContext* TorqueParser::externalRuntime() {
- ExternalRuntimeContext* _localctx =
- _tracker.createInstance<ExternalRuntimeContext>(_ctx, getState());
- enterRule(_localctx, 132, TorqueParser::RuleExternalRuntime);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(726);
- match(TorqueParser::EXTERN);
- setState(727);
- match(TorqueParser::RUNTIME);
- setState(728);
- match(TorqueParser::IDENTIFIER);
- setState(729);
- typeListMaybeVarArgs();
- setState(730);
- optionalType();
- setState(731);
- match(TorqueParser::T__14);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- BuiltinDeclarationContext
-//------------------------------------------------------------------
-
-TorqueParser::BuiltinDeclarationContext::BuiltinDeclarationContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::BuiltinDeclarationContext::BUILTIN() {
- return getToken(TorqueParser::BUILTIN, 0);
-}
-
-tree::TerminalNode* TorqueParser::BuiltinDeclarationContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::OptionalGenericTypeListContext*
-TorqueParser::BuiltinDeclarationContext::optionalGenericTypeList() {
- return getRuleContext<TorqueParser::OptionalGenericTypeListContext>(0);
-}
-
-TorqueParser::ParameterListContext*
-TorqueParser::BuiltinDeclarationContext::parameterList() {
- return getRuleContext<TorqueParser::ParameterListContext>(0);
-}
-
-TorqueParser::OptionalTypeContext*
-TorqueParser::BuiltinDeclarationContext::optionalType() {
- return getRuleContext<TorqueParser::OptionalTypeContext>(0);
-}
-
-TorqueParser::HelperBodyContext*
-TorqueParser::BuiltinDeclarationContext::helperBody() {
- return getRuleContext<TorqueParser::HelperBodyContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::BuiltinDeclarationContext::JAVASCRIPT() {
- return getToken(TorqueParser::JAVASCRIPT, 0);
-}
-
-size_t TorqueParser::BuiltinDeclarationContext::getRuleIndex() const {
- return TorqueParser::RuleBuiltinDeclaration;
-}
-
-void TorqueParser::BuiltinDeclarationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterBuiltinDeclaration(this);
-}
-
-void TorqueParser::BuiltinDeclarationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitBuiltinDeclaration(this);
-}
-
-antlrcpp::Any TorqueParser::BuiltinDeclarationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitBuiltinDeclaration(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::BuiltinDeclarationContext* TorqueParser::builtinDeclaration() {
- BuiltinDeclarationContext* _localctx =
- _tracker.createInstance<BuiltinDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 134, TorqueParser::RuleBuiltinDeclaration);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(734);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::JAVASCRIPT) {
- setState(733);
- match(TorqueParser::JAVASCRIPT);
- }
- setState(736);
- match(TorqueParser::BUILTIN);
- setState(737);
- match(TorqueParser::IDENTIFIER);
- setState(738);
- optionalGenericTypeList();
- setState(739);
- parameterList();
- setState(740);
- optionalType();
- setState(743);
- _errHandler->sync(this);
- switch (_input->LA(1)) {
- case TorqueParser::T__12:
- case TorqueParser::DEFERRED: {
- setState(741);
- helperBody();
- break;
- }
-
- case TorqueParser::T__14: {
- setState(742);
- match(TorqueParser::T__14);
- break;
- }
-
- default:
- throw NoViableAltException(this);
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- GenericSpecializationContext
-//------------------------------------------------------------------
-
-TorqueParser::GenericSpecializationContext::GenericSpecializationContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::GenericSpecializationContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::GenericSpecializationTypeListContext*
-TorqueParser::GenericSpecializationContext::genericSpecializationTypeList() {
- return getRuleContext<TorqueParser::GenericSpecializationTypeListContext>(0);
-}
-
-TorqueParser::ParameterListContext*
-TorqueParser::GenericSpecializationContext::parameterList() {
- return getRuleContext<TorqueParser::ParameterListContext>(0);
-}
-
-TorqueParser::OptionalTypeContext*
-TorqueParser::GenericSpecializationContext::optionalType() {
- return getRuleContext<TorqueParser::OptionalTypeContext>(0);
-}
-
-TorqueParser::OptionalLabelListContext*
-TorqueParser::GenericSpecializationContext::optionalLabelList() {
- return getRuleContext<TorqueParser::OptionalLabelListContext>(0);
-}
-
-TorqueParser::HelperBodyContext*
-TorqueParser::GenericSpecializationContext::helperBody() {
- return getRuleContext<TorqueParser::HelperBodyContext>(0);
-}
-
-size_t TorqueParser::GenericSpecializationContext::getRuleIndex() const {
- return TorqueParser::RuleGenericSpecialization;
-}
-
-void TorqueParser::GenericSpecializationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->enterGenericSpecialization(this);
-}
-
-void TorqueParser::GenericSpecializationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->exitGenericSpecialization(this);
-}
-
-antlrcpp::Any TorqueParser::GenericSpecializationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitGenericSpecialization(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::GenericSpecializationContext*
-TorqueParser::genericSpecialization() {
- GenericSpecializationContext* _localctx =
- _tracker.createInstance<GenericSpecializationContext>(_ctx, getState());
- enterRule(_localctx, 136, TorqueParser::RuleGenericSpecialization);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(745);
- match(TorqueParser::IDENTIFIER);
- setState(746);
- genericSpecializationTypeList();
- setState(747);
- parameterList();
- setState(748);
- optionalType();
- setState(749);
- optionalLabelList();
- setState(750);
- helperBody();
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- MacroDeclarationContext
-//------------------------------------------------------------------
-
-TorqueParser::MacroDeclarationContext::MacroDeclarationContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::MacroDeclarationContext::MACRO() {
- return getToken(TorqueParser::MACRO, 0);
-}
-
-tree::TerminalNode* TorqueParser::MacroDeclarationContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::OptionalGenericTypeListContext*
-TorqueParser::MacroDeclarationContext::optionalGenericTypeList() {
- return getRuleContext<TorqueParser::OptionalGenericTypeListContext>(0);
-}
-
-TorqueParser::ParameterListContext*
-TorqueParser::MacroDeclarationContext::parameterList() {
- return getRuleContext<TorqueParser::ParameterListContext>(0);
-}
-
-TorqueParser::OptionalTypeContext*
-TorqueParser::MacroDeclarationContext::optionalType() {
- return getRuleContext<TorqueParser::OptionalTypeContext>(0);
-}
-
-TorqueParser::OptionalLabelListContext*
-TorqueParser::MacroDeclarationContext::optionalLabelList() {
- return getRuleContext<TorqueParser::OptionalLabelListContext>(0);
-}
-
-TorqueParser::HelperBodyContext*
-TorqueParser::MacroDeclarationContext::helperBody() {
- return getRuleContext<TorqueParser::HelperBodyContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::MacroDeclarationContext::STRING_LITERAL() {
- return getToken(TorqueParser::STRING_LITERAL, 0);
-}
-
-size_t TorqueParser::MacroDeclarationContext::getRuleIndex() const {
- return TorqueParser::RuleMacroDeclaration;
-}
-
-void TorqueParser::MacroDeclarationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterMacroDeclaration(this);
-}
-
-void TorqueParser::MacroDeclarationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitMacroDeclaration(this);
-}
-
-antlrcpp::Any TorqueParser::MacroDeclarationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitMacroDeclaration(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::MacroDeclarationContext* TorqueParser::macroDeclaration() {
- MacroDeclarationContext* _localctx =
- _tracker.createInstance<MacroDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 138, TorqueParser::RuleMacroDeclaration);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(754);
- _errHandler->sync(this);
-
- _la = _input->LA(1);
- if (_la == TorqueParser::T__19) {
- setState(752);
- match(TorqueParser::T__19);
- setState(753);
- match(TorqueParser::STRING_LITERAL);
- }
- setState(756);
- match(TorqueParser::MACRO);
- setState(757);
- match(TorqueParser::IDENTIFIER);
- setState(758);
- optionalGenericTypeList();
- setState(759);
- parameterList();
- setState(760);
- optionalType();
- setState(761);
- optionalLabelList();
- setState(764);
- _errHandler->sync(this);
- switch (_input->LA(1)) {
- case TorqueParser::T__12:
- case TorqueParser::DEFERRED: {
- setState(762);
- helperBody();
- break;
- }
-
- case TorqueParser::T__14: {
- setState(763);
- match(TorqueParser::T__14);
- break;
- }
-
- default:
- throw NoViableAltException(this);
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ExternConstDeclarationContext
-//------------------------------------------------------------------
-
-TorqueParser::ExternConstDeclarationContext::ExternConstDeclarationContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::ExternConstDeclarationContext::CONST() {
- return getToken(TorqueParser::CONST, 0);
-}
-
-tree::TerminalNode* TorqueParser::ExternConstDeclarationContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::TypeContext* TorqueParser::ExternConstDeclarationContext::type() {
- return getRuleContext<TorqueParser::TypeContext>(0);
-}
-
-TorqueParser::GeneratesDeclarationContext*
-TorqueParser::ExternConstDeclarationContext::generatesDeclaration() {
- return getRuleContext<TorqueParser::GeneratesDeclarationContext>(0);
-}
-
-size_t TorqueParser::ExternConstDeclarationContext::getRuleIndex() const {
- return TorqueParser::RuleExternConstDeclaration;
-}
-
-void TorqueParser::ExternConstDeclarationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->enterExternConstDeclaration(this);
-}
-
-void TorqueParser::ExternConstDeclarationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr)
- parserListener->exitExternConstDeclaration(this);
-}
-
-antlrcpp::Any TorqueParser::ExternConstDeclarationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitExternConstDeclaration(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ExternConstDeclarationContext*
-TorqueParser::externConstDeclaration() {
- ExternConstDeclarationContext* _localctx =
- _tracker.createInstance<ExternConstDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 140, TorqueParser::RuleExternConstDeclaration);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(766);
- match(TorqueParser::CONST);
- setState(767);
- match(TorqueParser::IDENTIFIER);
- setState(768);
- match(TorqueParser::T__4);
- setState(769);
- type(0);
- setState(770);
- generatesDeclaration();
- setState(771);
- match(TorqueParser::T__14);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ConstDeclarationContext
-//------------------------------------------------------------------
-
-TorqueParser::ConstDeclarationContext::ConstDeclarationContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::ConstDeclarationContext::CONST() {
- return getToken(TorqueParser::CONST, 0);
-}
-
-tree::TerminalNode* TorqueParser::ConstDeclarationContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::TypeContext* TorqueParser::ConstDeclarationContext::type() {
- return getRuleContext<TorqueParser::TypeContext>(0);
-}
-
-tree::TerminalNode* TorqueParser::ConstDeclarationContext::ASSIGNMENT() {
- return getToken(TorqueParser::ASSIGNMENT, 0);
-}
-
-TorqueParser::ExpressionContext*
-TorqueParser::ConstDeclarationContext::expression() {
- return getRuleContext<TorqueParser::ExpressionContext>(0);
-}
-
-size_t TorqueParser::ConstDeclarationContext::getRuleIndex() const {
- return TorqueParser::RuleConstDeclaration;
-}
-
-void TorqueParser::ConstDeclarationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterConstDeclaration(this);
-}
-
-void TorqueParser::ConstDeclarationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitConstDeclaration(this);
-}
-
-antlrcpp::Any TorqueParser::ConstDeclarationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitConstDeclaration(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ConstDeclarationContext* TorqueParser::constDeclaration() {
- ConstDeclarationContext* _localctx =
- _tracker.createInstance<ConstDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 142, TorqueParser::RuleConstDeclaration);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(773);
- match(TorqueParser::CONST);
- setState(774);
- match(TorqueParser::IDENTIFIER);
- setState(775);
- match(TorqueParser::T__4);
- setState(776);
- type(0);
- setState(777);
- match(TorqueParser::ASSIGNMENT);
- setState(778);
- expression();
- setState(779);
- match(TorqueParser::T__14);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- StructDeclarationContext
-//------------------------------------------------------------------
-
-TorqueParser::StructDeclarationContext::StructDeclarationContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::StructDeclarationContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-TorqueParser::FieldListDeclarationContext*
-TorqueParser::StructDeclarationContext::fieldListDeclaration() {
- return getRuleContext<TorqueParser::FieldListDeclarationContext>(0);
-}
-
-size_t TorqueParser::StructDeclarationContext::getRuleIndex() const {
- return TorqueParser::RuleStructDeclaration;
-}
-
-void TorqueParser::StructDeclarationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterStructDeclaration(this);
-}
-
-void TorqueParser::StructDeclarationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitStructDeclaration(this);
-}
-
-antlrcpp::Any TorqueParser::StructDeclarationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitStructDeclaration(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::StructDeclarationContext* TorqueParser::structDeclaration() {
- StructDeclarationContext* _localctx =
- _tracker.createInstance<StructDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 144, TorqueParser::RuleStructDeclaration);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(781);
- match(TorqueParser::T__20);
- setState(782);
- match(TorqueParser::IDENTIFIER);
- setState(783);
- match(TorqueParser::T__12);
- setState(784);
- fieldListDeclaration();
- setState(785);
- match(TorqueParser::T__13);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- DeclarationContext
-//------------------------------------------------------------------
-
-TorqueParser::DeclarationContext::DeclarationContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-TorqueParser::StructDeclarationContext*
-TorqueParser::DeclarationContext::structDeclaration() {
- return getRuleContext<TorqueParser::StructDeclarationContext>(0);
-}
-
-TorqueParser::TypeDeclarationContext*
-TorqueParser::DeclarationContext::typeDeclaration() {
- return getRuleContext<TorqueParser::TypeDeclarationContext>(0);
-}
-
-TorqueParser::TypeAliasDeclarationContext*
-TorqueParser::DeclarationContext::typeAliasDeclaration() {
- return getRuleContext<TorqueParser::TypeAliasDeclarationContext>(0);
-}
-
-TorqueParser::BuiltinDeclarationContext*
-TorqueParser::DeclarationContext::builtinDeclaration() {
- return getRuleContext<TorqueParser::BuiltinDeclarationContext>(0);
-}
-
-TorqueParser::GenericSpecializationContext*
-TorqueParser::DeclarationContext::genericSpecialization() {
- return getRuleContext<TorqueParser::GenericSpecializationContext>(0);
-}
-
-TorqueParser::MacroDeclarationContext*
-TorqueParser::DeclarationContext::macroDeclaration() {
- return getRuleContext<TorqueParser::MacroDeclarationContext>(0);
-}
-
-TorqueParser::ExternalMacroContext*
-TorqueParser::DeclarationContext::externalMacro() {
- return getRuleContext<TorqueParser::ExternalMacroContext>(0);
-}
-
-TorqueParser::ExternalBuiltinContext*
-TorqueParser::DeclarationContext::externalBuiltin() {
- return getRuleContext<TorqueParser::ExternalBuiltinContext>(0);
-}
-
-TorqueParser::ExternalRuntimeContext*
-TorqueParser::DeclarationContext::externalRuntime() {
- return getRuleContext<TorqueParser::ExternalRuntimeContext>(0);
-}
-
-TorqueParser::ExternConstDeclarationContext*
-TorqueParser::DeclarationContext::externConstDeclaration() {
- return getRuleContext<TorqueParser::ExternConstDeclarationContext>(0);
-}
-
-TorqueParser::ConstDeclarationContext*
-TorqueParser::DeclarationContext::constDeclaration() {
- return getRuleContext<TorqueParser::ConstDeclarationContext>(0);
-}
-
-size_t TorqueParser::DeclarationContext::getRuleIndex() const {
- return TorqueParser::RuleDeclaration;
-}
-
-void TorqueParser::DeclarationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterDeclaration(this);
-}
-
-void TorqueParser::DeclarationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitDeclaration(this);
-}
-
-antlrcpp::Any TorqueParser::DeclarationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitDeclaration(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::DeclarationContext* TorqueParser::declaration() {
- DeclarationContext* _localctx =
- _tracker.createInstance<DeclarationContext>(_ctx, getState());
- enterRule(_localctx, 146, TorqueParser::RuleDeclaration);
-
- auto onExit = finally([=] { exitRule(); });
- try {
- setState(798);
- _errHandler->sync(this);
- switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
- _input, 73, _ctx)) {
- case 1: {
- enterOuterAlt(_localctx, 1);
- setState(787);
- structDeclaration();
- break;
- }
-
- case 2: {
- enterOuterAlt(_localctx, 2);
- setState(788);
- typeDeclaration();
- break;
- }
-
- case 3: {
- enterOuterAlt(_localctx, 3);
- setState(789);
- typeAliasDeclaration();
- break;
- }
-
- case 4: {
- enterOuterAlt(_localctx, 4);
- setState(790);
- builtinDeclaration();
- break;
- }
-
- case 5: {
- enterOuterAlt(_localctx, 5);
- setState(791);
- genericSpecialization();
- break;
- }
-
- case 6: {
- enterOuterAlt(_localctx, 6);
- setState(792);
- macroDeclaration();
- break;
- }
-
- case 7: {
- enterOuterAlt(_localctx, 7);
- setState(793);
- externalMacro();
- break;
- }
-
- case 8: {
- enterOuterAlt(_localctx, 8);
- setState(794);
- externalBuiltin();
- break;
- }
-
- case 9: {
- enterOuterAlt(_localctx, 9);
- setState(795);
- externalRuntime();
- break;
- }
-
- case 10: {
- enterOuterAlt(_localctx, 10);
- setState(796);
- externConstDeclaration();
- break;
- }
-
- case 11: {
- enterOuterAlt(_localctx, 11);
- setState(797);
- constDeclaration();
- break;
- }
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- ModuleDeclarationContext
-//------------------------------------------------------------------
-
-TorqueParser::ModuleDeclarationContext::ModuleDeclarationContext(
- ParserRuleContext* parent, size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-tree::TerminalNode* TorqueParser::ModuleDeclarationContext::MODULE() {
- return getToken(TorqueParser::MODULE, 0);
-}
-
-tree::TerminalNode* TorqueParser::ModuleDeclarationContext::IDENTIFIER() {
- return getToken(TorqueParser::IDENTIFIER, 0);
-}
-
-std::vector<TorqueParser::DeclarationContext*>
-TorqueParser::ModuleDeclarationContext::declaration() {
- return getRuleContexts<TorqueParser::DeclarationContext>();
-}
-
-TorqueParser::DeclarationContext*
-TorqueParser::ModuleDeclarationContext::declaration(size_t i) {
- return getRuleContext<TorqueParser::DeclarationContext>(i);
-}
-
-size_t TorqueParser::ModuleDeclarationContext::getRuleIndex() const {
- return TorqueParser::RuleModuleDeclaration;
-}
-
-void TorqueParser::ModuleDeclarationContext::enterRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterModuleDeclaration(this);
-}
-
-void TorqueParser::ModuleDeclarationContext::exitRule(
- tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitModuleDeclaration(this);
-}
-
-antlrcpp::Any TorqueParser::ModuleDeclarationContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitModuleDeclaration(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::ModuleDeclarationContext* TorqueParser::moduleDeclaration() {
- ModuleDeclarationContext* _localctx =
- _tracker.createInstance<ModuleDeclarationContext>(_ctx, getState());
- enterRule(_localctx, 148, TorqueParser::RuleModuleDeclaration);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(800);
- match(TorqueParser::MODULE);
- setState(801);
- match(TorqueParser::IDENTIFIER);
- setState(802);
- match(TorqueParser::T__12);
- setState(806);
- _errHandler->sync(this);
- _la = _input->LA(1);
- while (
- (((_la & ~0x3fULL) == 0) &&
- ((1ULL << _la) &
- ((1ULL << TorqueParser::T__5) | (1ULL << TorqueParser::T__19) |
- (1ULL << TorqueParser::T__20) | (1ULL << TorqueParser::MACRO) |
- (1ULL << TorqueParser::BUILTIN) |
- (1ULL << TorqueParser::JAVASCRIPT) | (1ULL << TorqueParser::CONST) |
- (1ULL << TorqueParser::EXTERN))) != 0) ||
- _la == TorqueParser::IDENTIFIER) {
- setState(803);
- declaration();
- setState(808);
- _errHandler->sync(this);
- _la = _input->LA(1);
- }
- setState(809);
- match(TorqueParser::T__13);
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-//----------------- FileContext
-//------------------------------------------------------------------
-
-TorqueParser::FileContext::FileContext(ParserRuleContext* parent,
- size_t invokingState)
- : ParserRuleContext(parent, invokingState) {}
-
-std::vector<TorqueParser::ModuleDeclarationContext*>
-TorqueParser::FileContext::moduleDeclaration() {
- return getRuleContexts<TorqueParser::ModuleDeclarationContext>();
-}
-
-TorqueParser::ModuleDeclarationContext*
-TorqueParser::FileContext::moduleDeclaration(size_t i) {
- return getRuleContext<TorqueParser::ModuleDeclarationContext>(i);
-}
-
-std::vector<TorqueParser::DeclarationContext*>
-TorqueParser::FileContext::declaration() {
- return getRuleContexts<TorqueParser::DeclarationContext>();
-}
-
-TorqueParser::DeclarationContext* TorqueParser::FileContext::declaration(
- size_t i) {
- return getRuleContext<TorqueParser::DeclarationContext>(i);
-}
-
-size_t TorqueParser::FileContext::getRuleIndex() const {
- return TorqueParser::RuleFile;
-}
-
-void TorqueParser::FileContext::enterRule(tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->enterFile(this);
-}
-
-void TorqueParser::FileContext::exitRule(tree::ParseTreeListener* listener) {
- auto parserListener = dynamic_cast<TorqueListener*>(listener);
- if (parserListener != nullptr) parserListener->exitFile(this);
-}
-
-antlrcpp::Any TorqueParser::FileContext::accept(
- tree::ParseTreeVisitor* visitor) {
- if (auto parserVisitor = dynamic_cast<TorqueVisitor*>(visitor))
- return parserVisitor->visitFile(this);
- else
- return visitor->visitChildren(this);
-}
-
-TorqueParser::FileContext* TorqueParser::file() {
- FileContext* _localctx =
- _tracker.createInstance<FileContext>(_ctx, getState());
- enterRule(_localctx, 150, TorqueParser::RuleFile);
- size_t _la = 0;
-
- auto onExit = finally([=] { exitRule(); });
- try {
- enterOuterAlt(_localctx, 1);
- setState(815);
- _errHandler->sync(this);
- _la = _input->LA(1);
- while (
- (((_la & ~0x3fULL) == 0) &&
- ((1ULL << _la) &
- ((1ULL << TorqueParser::T__5) | (1ULL << TorqueParser::T__19) |
- (1ULL << TorqueParser::T__20) | (1ULL << TorqueParser::MACRO) |
- (1ULL << TorqueParser::BUILTIN) | (1ULL << TorqueParser::MODULE) |
- (1ULL << TorqueParser::JAVASCRIPT) | (1ULL << TorqueParser::CONST) |
- (1ULL << TorqueParser::EXTERN))) != 0) ||
- _la == TorqueParser::IDENTIFIER) {
- setState(813);
- _errHandler->sync(this);
- switch (_input->LA(1)) {
- case TorqueParser::MODULE: {
- setState(811);
- moduleDeclaration();
- break;
- }
-
- case TorqueParser::T__5:
- case TorqueParser::T__19:
- case TorqueParser::T__20:
- case TorqueParser::MACRO:
- case TorqueParser::BUILTIN:
- case TorqueParser::JAVASCRIPT:
- case TorqueParser::CONST:
- case TorqueParser::EXTERN:
- case TorqueParser::IDENTIFIER: {
- setState(812);
- declaration();
- break;
- }
-
- default:
- throw NoViableAltException(this);
- }
- setState(817);
- _errHandler->sync(this);
- _la = _input->LA(1);
- }
-
- } catch (RecognitionException& e) {
- _errHandler->reportError(this, e);
- _localctx->exception = std::current_exception();
- _errHandler->recover(this, _localctx->exception);
- }
-
- return _localctx;
-}
-
-bool TorqueParser::sempred(RuleContext* context, size_t ruleIndex,
- size_t predicateIndex) {
- switch (ruleIndex) {
- case 0:
- return typeSempred(dynamic_cast<TypeContext*>(context), predicateIndex);
- case 13:
- return conditionalExpressionSempred(
- dynamic_cast<ConditionalExpressionContext*>(context), predicateIndex);
- case 14:
- return logicalORExpressionSempred(
- dynamic_cast<LogicalORExpressionContext*>(context), predicateIndex);
- case 15:
- return logicalANDExpressionSempred(
- dynamic_cast<LogicalANDExpressionContext*>(context), predicateIndex);
- case 16:
- return bitwiseExpressionSempred(
- dynamic_cast<BitwiseExpressionContext*>(context), predicateIndex);
- case 17:
- return equalityExpressionSempred(
- dynamic_cast<EqualityExpressionContext*>(context), predicateIndex);
- case 18:
- return relationalExpressionSempred(
- dynamic_cast<RelationalExpressionContext*>(context), predicateIndex);
- case 19:
- return shiftExpressionSempred(
- dynamic_cast<ShiftExpressionContext*>(context), predicateIndex);
- case 20:
- return additiveExpressionSempred(
- dynamic_cast<AdditiveExpressionContext*>(context), predicateIndex);
- case 21:
- return multiplicativeExpressionSempred(
- dynamic_cast<MultiplicativeExpressionContext*>(context),
- predicateIndex);
- case 23:
- return locationExpressionSempred(
- dynamic_cast<LocationExpressionContext*>(context), predicateIndex);
-
- default:
- break;
- }
- return true;
-}
-
-bool TorqueParser::typeSempred(TypeContext* _localctx, size_t predicateIndex) {
- switch (predicateIndex) {
- case 0:
- return precpred(_ctx, 2);
-
- default:
- break;
- }
- return true;
-}
-
-bool TorqueParser::conditionalExpressionSempred(
- ConditionalExpressionContext* _localctx, size_t predicateIndex) {
- switch (predicateIndex) {
- case 1:
- return precpred(_ctx, 1);
-
- default:
- break;
- }
- return true;
-}
-
-bool TorqueParser::logicalORExpressionSempred(
- LogicalORExpressionContext* _localctx, size_t predicateIndex) {
- switch (predicateIndex) {
- case 2:
- return precpred(_ctx, 1);
-
- default:
- break;
- }
- return true;
-}
-
-bool TorqueParser::logicalANDExpressionSempred(
- LogicalANDExpressionContext* _localctx, size_t predicateIndex) {
- switch (predicateIndex) {
- case 3:
- return precpred(_ctx, 1);
-
- default:
- break;
- }
- return true;
-}
-
-bool TorqueParser::bitwiseExpressionSempred(BitwiseExpressionContext* _localctx,
- size_t predicateIndex) {
- switch (predicateIndex) {
- case 4:
- return precpred(_ctx, 1);
-
- default:
- break;
- }
- return true;
-}
-
-bool TorqueParser::equalityExpressionSempred(
- EqualityExpressionContext* _localctx, size_t predicateIndex) {
- switch (predicateIndex) {
- case 5:
- return precpred(_ctx, 1);
-
- default:
- break;
- }
- return true;
-}
-
-bool TorqueParser::relationalExpressionSempred(
- RelationalExpressionContext* _localctx, size_t predicateIndex) {
- switch (predicateIndex) {
- case 6:
- return precpred(_ctx, 1);
-
- default:
- break;
- }
- return true;
-}
-
-bool TorqueParser::shiftExpressionSempred(ShiftExpressionContext* _localctx,
- size_t predicateIndex) {
- switch (predicateIndex) {
- case 7:
- return precpred(_ctx, 1);
-
- default:
- break;
- }
- return true;
-}
-
-bool TorqueParser::additiveExpressionSempred(
- AdditiveExpressionContext* _localctx, size_t predicateIndex) {
- switch (predicateIndex) {
- case 8:
- return precpred(_ctx, 1);
-
- default:
- break;
- }
- return true;
-}
-
-bool TorqueParser::multiplicativeExpressionSempred(
- MultiplicativeExpressionContext* _localctx, size_t predicateIndex) {
- switch (predicateIndex) {
- case 9:
- return precpred(_ctx, 1);
-
- default:
- break;
- }
- return true;
-}
-
-bool TorqueParser::locationExpressionSempred(
- LocationExpressionContext* _localctx, size_t predicateIndex) {
- switch (predicateIndex) {
- case 10:
- return precpred(_ctx, 4);
- case 11:
- return precpred(_ctx, 2);
-
- default:
- break;
- }
- return true;
-}
-
-// Static vars and initialization.
-std::vector<dfa::DFA> TorqueParser::_decisionToDFA;
-atn::PredictionContextCache TorqueParser::_sharedContextCache;
-
-// We own the ATN which in turn owns the ATN states.
-atn::ATN TorqueParser::_atn;
-std::vector<uint16_t> TorqueParser::_serializedATN;
-
-std::vector<std::string> TorqueParser::_ruleNames = {
- "type",
- "typeList",
- "genericSpecializationTypeList",
- "optionalGenericTypeList",
- "typeListMaybeVarArgs",
- "labelParameter",
- "optionalType",
- "optionalLabelList",
- "optionalOtherwise",
- "parameter",
- "parameterList",
- "labelDeclaration",
- "expression",
- "conditionalExpression",
- "logicalORExpression",
- "logicalANDExpression",
- "bitwiseExpression",
- "equalityExpression",
- "relationalExpression",
- "shiftExpression",
- "additiveExpression",
- "multiplicativeExpression",
- "unaryExpression",
- "locationExpression",
- "incrementDecrement",
- "assignment",
- "assignmentExpression",
- "structExpression",
- "functionPointerExpression",
- "primaryExpression",
- "forInitialization",
- "forLoop",
- "rangeSpecifier",
- "forOfRange",
- "forOfLoop",
- "argument",
- "argumentList",
- "helperCall",
- "labelReference",
- "variableDeclaration",
- "variableDeclarationWithInitialization",
- "helperCallStatement",
- "expressionStatement",
- "ifStatement",
- "whileLoop",
- "returnStatement",
- "breakStatement",
- "continueStatement",
- "gotoStatement",
- "handlerWithStatement",
- "tryLabelStatement",
- "diagnosticStatement",
- "statement",
- "statementList",
- "statementScope",
- "statementBlock",
- "helperBody",
- "fieldDeclaration",
- "fieldListDeclaration",
- "extendsDeclaration",
- "generatesDeclaration",
- "constexprDeclaration",
- "typeDeclaration",
- "typeAliasDeclaration",
- "externalBuiltin",
- "externalMacro",
- "externalRuntime",
- "builtinDeclaration",
- "genericSpecialization",
- "macroDeclaration",
- "externConstDeclaration",
- "constDeclaration",
- "structDeclaration",
- "declaration",
- "moduleDeclaration",
- "file"};
-
-std::vector<std::string> TorqueParser::_literalNames = {"",
- "'('",
- "')'",
- "'=>'",
- "','",
- "':'",
- "'type'",
- "'?'",
- "'||'",
- "'&&'",
- "'.'",
- "'['",
- "']'",
- "'{'",
- "'}'",
- "';'",
- "'of'",
- "'else'",
- "'extends'",
- "'generates'",
- "'operator'",
- "'struct'",
- "'macro'",
- "'builtin'",
- "'runtime'",
- "'module'",
- "'javascript'",
- "'deferred'",
- "'if'",
- "'for'",
- "'while'",
- "'return'",
- "'constexpr'",
- "'continue'",
- "'break'",
- "'goto'",
- "'otherwise'",
- "'try'",
- "'label'",
- "'labels'",
- "'tail'",
- "'isnt'",
- "'is'",
- "'let'",
- "'const'",
- "'extern'",
- "'assert'",
- "'check'",
- "'unreachable'",
- "'debug'",
- "'='",
- "",
- "'=='",
- "'+'",
- "'-'",
- "'*'",
- "'/'",
- "'%'",
- "'|'",
- "'&'",
- "'~'",
- "'max'",
- "'min'",
- "'!='",
- "'<'",
- "'<='",
- "'>'",
- "'>='",
- "'<<'",
- "'>>'",
- "'>>>'",
- "'...'",
- "",
- "'++'",
- "'--'",
- "'!'"};
-
-std::vector<std::string> TorqueParser::_symbolicNames = {
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "MACRO",
- "BUILTIN",
- "RUNTIME",
- "MODULE",
- "JAVASCRIPT",
- "DEFERRED",
- "IF",
- "FOR",
- "WHILE",
- "RETURN",
- "CONSTEXPR",
- "CONTINUE",
- "BREAK",
- "GOTO",
- "OTHERWISE",
- "TRY",
- "LABEL",
- "LABELS",
- "TAIL",
- "ISNT",
- "IS",
- "LET",
- "CONST",
- "EXTERN",
- "ASSERT_TOKEN",
- "CHECK_TOKEN",
- "UNREACHABLE_TOKEN",
- "DEBUG_TOKEN",
- "ASSIGNMENT",
- "ASSIGNMENT_OPERATOR",
- "EQUAL",
- "PLUS",
- "MINUS",
- "MULTIPLY",
- "DIVIDE",
- "MODULO",
- "BIT_OR",
- "BIT_AND",
- "BIT_NOT",
- "MAX",
- "MIN",
- "NOT_EQUAL",
- "LESS_THAN",
- "LESS_THAN_EQUAL",
- "GREATER_THAN",
- "GREATER_THAN_EQUAL",
- "SHIFT_LEFT",
- "SHIFT_RIGHT",
- "SHIFT_RIGHT_ARITHMETIC",
- "VARARGS",
- "EQUALITY_OPERATOR",
- "INCREMENT",
- "DECREMENT",
- "NOT",
- "STRING_LITERAL",
- "IDENTIFIER",
- "WS",
- "BLOCK_COMMENT",
- "LINE_COMMENT",
- "DECIMAL_LITERAL"};
-
-dfa::Vocabulary TorqueParser::_vocabulary(_literalNames, _symbolicNames);
-
-std::vector<std::string> TorqueParser::_tokenNames;
-
-TorqueParser::Initializer::Initializer() {
- for (size_t i = 0; i < _symbolicNames.size(); ++i) {
- std::string name = _vocabulary.getLiteralName(i);
- if (name.empty()) {
- name = _vocabulary.getSymbolicName(i);
- }
-
- if (name.empty()) {
- _tokenNames.push_back("<INVALID>");
- } else {
- _tokenNames.push_back(name);
- }
- }
-
- _serializedATN = {
- 0x3, 0x608b, 0xa72a, 0x8133, 0xb9ed, 0x417c, 0x3be7, 0x7786, 0x5964,
- 0x3, 0x53, 0x335, 0x4, 0x2, 0x9, 0x2, 0x4, 0x3,
- 0x9, 0x3, 0x4, 0x4, 0x9, 0x4, 0x4, 0x5, 0x9,
- 0x5, 0x4, 0x6, 0x9, 0x6, 0x4, 0x7, 0x9, 0x7,
- 0x4, 0x8, 0x9, 0x8, 0x4, 0x9, 0x9, 0x9, 0x4,
- 0xa, 0x9, 0xa, 0x4, 0xb, 0x9, 0xb, 0x4, 0xc,
- 0x9, 0xc, 0x4, 0xd, 0x9, 0xd, 0x4, 0xe, 0x9,
- 0xe, 0x4, 0xf, 0x9, 0xf, 0x4, 0x10, 0x9, 0x10,
- 0x4, 0x11, 0x9, 0x11, 0x4, 0x12, 0x9, 0x12, 0x4,
- 0x13, 0x9, 0x13, 0x4, 0x14, 0x9, 0x14, 0x4, 0x15,
- 0x9, 0x15, 0x4, 0x16, 0x9, 0x16, 0x4, 0x17, 0x9,
- 0x17, 0x4, 0x18, 0x9, 0x18, 0x4, 0x19, 0x9, 0x19,
- 0x4, 0x1a, 0x9, 0x1a, 0x4, 0x1b, 0x9, 0x1b, 0x4,
- 0x1c, 0x9, 0x1c, 0x4, 0x1d, 0x9, 0x1d, 0x4, 0x1e,
- 0x9, 0x1e, 0x4, 0x1f, 0x9, 0x1f, 0x4, 0x20, 0x9,
- 0x20, 0x4, 0x21, 0x9, 0x21, 0x4, 0x22, 0x9, 0x22,
- 0x4, 0x23, 0x9, 0x23, 0x4, 0x24, 0x9, 0x24, 0x4,
- 0x25, 0x9, 0x25, 0x4, 0x26, 0x9, 0x26, 0x4, 0x27,
- 0x9, 0x27, 0x4, 0x28, 0x9, 0x28, 0x4, 0x29, 0x9,
- 0x29, 0x4, 0x2a, 0x9, 0x2a, 0x4, 0x2b, 0x9, 0x2b,
- 0x4, 0x2c, 0x9, 0x2c, 0x4, 0x2d, 0x9, 0x2d, 0x4,
- 0x2e, 0x9, 0x2e, 0x4, 0x2f, 0x9, 0x2f, 0x4, 0x30,
- 0x9, 0x30, 0x4, 0x31, 0x9, 0x31, 0x4, 0x32, 0x9,
- 0x32, 0x4, 0x33, 0x9, 0x33, 0x4, 0x34, 0x9, 0x34,
- 0x4, 0x35, 0x9, 0x35, 0x4, 0x36, 0x9, 0x36, 0x4,
- 0x37, 0x9, 0x37, 0x4, 0x38, 0x9, 0x38, 0x4, 0x39,
- 0x9, 0x39, 0x4, 0x3a, 0x9, 0x3a, 0x4, 0x3b, 0x9,
- 0x3b, 0x4, 0x3c, 0x9, 0x3c, 0x4, 0x3d, 0x9, 0x3d,
- 0x4, 0x3e, 0x9, 0x3e, 0x4, 0x3f, 0x9, 0x3f, 0x4,
- 0x40, 0x9, 0x40, 0x4, 0x41, 0x9, 0x41, 0x4, 0x42,
- 0x9, 0x42, 0x4, 0x43, 0x9, 0x43, 0x4, 0x44, 0x9,
- 0x44, 0x4, 0x45, 0x9, 0x45, 0x4, 0x46, 0x9, 0x46,
- 0x4, 0x47, 0x9, 0x47, 0x4, 0x48, 0x9, 0x48, 0x4,
- 0x49, 0x9, 0x49, 0x4, 0x4a, 0x9, 0x4a, 0x4, 0x4b,
- 0x9, 0x4b, 0x4, 0x4c, 0x9, 0x4c, 0x4, 0x4d, 0x9,
- 0x4d, 0x3, 0x2, 0x3, 0x2, 0x5, 0x2, 0x9d, 0xa,
- 0x2, 0x3, 0x2, 0x3, 0x2, 0x3, 0x2, 0x3, 0x2,
- 0x3, 0x2, 0x3, 0x2, 0x3, 0x2, 0x3, 0x2, 0x3,
- 0x2, 0x3, 0x2, 0x3, 0x2, 0x3, 0x2, 0x5, 0x2,
- 0xab, 0xa, 0x2, 0x3, 0x2, 0x3, 0x2, 0x3, 0x2,
- 0x7, 0x2, 0xb0, 0xa, 0x2, 0xc, 0x2, 0xe, 0x2,
- 0xb3, 0xb, 0x2, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3,
- 0x7, 0x3, 0xb8, 0xa, 0x3, 0xc, 0x3, 0xe, 0x3,
- 0xbb, 0xb, 0x3, 0x5, 0x3, 0xbd, 0xa, 0x3, 0x3,
- 0x4, 0x3, 0x4, 0x3, 0x4, 0x3, 0x4, 0x3, 0x5,
- 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3,
- 0x5, 0x3, 0x5, 0x3, 0x5, 0x7, 0x5, 0xcb, 0xa,
- 0x5, 0xc, 0x5, 0xe, 0x5, 0xce, 0xb, 0x5, 0x3,
- 0x5, 0x5, 0x5, 0xd1, 0xa, 0x5, 0x3, 0x6, 0x3,
- 0x6, 0x5, 0x6, 0xd5, 0xa, 0x6, 0x3, 0x6, 0x3,
- 0x6, 0x7, 0x6, 0xd9, 0xa, 0x6, 0xc, 0x6, 0xe,
- 0x6, 0xdc, 0xb, 0x6, 0x3, 0x6, 0x3, 0x6, 0x5,
- 0x6, 0xe0, 0xa, 0x6, 0x3, 0x6, 0x3, 0x6, 0x3,
- 0x6, 0x3, 0x6, 0x5, 0x6, 0xe6, 0xa, 0x6, 0x3,
- 0x7, 0x3, 0x7, 0x3, 0x7, 0x3, 0x7, 0x3, 0x7,
- 0x5, 0x7, 0xed, 0xa, 0x7, 0x3, 0x8, 0x3, 0x8,
- 0x5, 0x8, 0xf1, 0xa, 0x8, 0x3, 0x9, 0x3, 0x9,
- 0x3, 0x9, 0x3, 0x9, 0x7, 0x9, 0xf7, 0xa, 0x9,
- 0xc, 0x9, 0xe, 0x9, 0xfa, 0xb, 0x9, 0x5, 0x9,
- 0xfc, 0xa, 0x9, 0x3, 0xa, 0x3, 0xa, 0x3, 0xa,
- 0x3, 0xa, 0x7, 0xa, 0x102, 0xa, 0xa, 0xc, 0xa,
- 0xe, 0xa, 0x105, 0xb, 0xa, 0x5, 0xa, 0x107, 0xa,
- 0xa, 0x3, 0xb, 0x3, 0xb, 0x3, 0xb, 0x5, 0xb,
- 0x10c, 0xa, 0xb, 0x3, 0xc, 0x3, 0xc, 0x5, 0xc,
- 0x110, 0xa, 0xc, 0x3, 0xc, 0x3, 0xc, 0x7, 0xc,
- 0x114, 0xa, 0xc, 0xc, 0xc, 0xe, 0xc, 0x117, 0xb,
- 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc,
- 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3, 0xc, 0x3,
- 0xc, 0x3, 0xc, 0x5, 0xc, 0x123, 0xa, 0xc, 0x3,
- 0xd, 0x3, 0xd, 0x5, 0xd, 0x127, 0xa, 0xd, 0x3,
- 0xe, 0x3, 0xe, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf,
- 0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3,
- 0xf, 0x3, 0xf, 0x7, 0xf, 0x134, 0xa, 0xf, 0xc,
- 0xf, 0xe, 0xf, 0x137, 0xb, 0xf, 0x3, 0x10, 0x3,
- 0x10, 0x3, 0x10, 0x3, 0x10, 0x3, 0x10, 0x3, 0x10,
- 0x7, 0x10, 0x13f, 0xa, 0x10, 0xc, 0x10, 0xe, 0x10,
- 0x142, 0xb, 0x10, 0x3, 0x11, 0x3, 0x11, 0x3, 0x11,
- 0x3, 0x11, 0x3, 0x11, 0x3, 0x11, 0x7, 0x11, 0x14a,
- 0xa, 0x11, 0xc, 0x11, 0xe, 0x11, 0x14d, 0xb, 0x11,
- 0x3, 0x12, 0x3, 0x12, 0x3, 0x12, 0x3, 0x12, 0x3,
- 0x12, 0x3, 0x12, 0x7, 0x12, 0x155, 0xa, 0x12, 0xc,
- 0x12, 0xe, 0x12, 0x158, 0xb, 0x12, 0x3, 0x13, 0x3,
- 0x13, 0x3, 0x13, 0x3, 0x13, 0x3, 0x13, 0x3, 0x13,
- 0x7, 0x13, 0x160, 0xa, 0x13, 0xc, 0x13, 0xe, 0x13,
- 0x163, 0xb, 0x13, 0x3, 0x14, 0x3, 0x14, 0x3, 0x14,
- 0x3, 0x14, 0x3, 0x14, 0x3, 0x14, 0x7, 0x14, 0x16b,
- 0xa, 0x14, 0xc, 0x14, 0xe, 0x14, 0x16e, 0xb, 0x14,
- 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3, 0x15, 0x3,
- 0x15, 0x3, 0x15, 0x7, 0x15, 0x176, 0xa, 0x15, 0xc,
- 0x15, 0xe, 0x15, 0x179, 0xb, 0x15, 0x3, 0x16, 0x3,
- 0x16, 0x3, 0x16, 0x3, 0x16, 0x3, 0x16, 0x3, 0x16,
- 0x7, 0x16, 0x181, 0xa, 0x16, 0xc, 0x16, 0xe, 0x16,
- 0x184, 0xb, 0x16, 0x3, 0x17, 0x3, 0x17, 0x3, 0x17,
- 0x3, 0x17, 0x3, 0x17, 0x3, 0x17, 0x7, 0x17, 0x18c,
- 0xa, 0x17, 0xc, 0x17, 0xe, 0x17, 0x18f, 0xb, 0x17,
- 0x3, 0x18, 0x3, 0x18, 0x3, 0x18, 0x5, 0x18, 0x194,
- 0xa, 0x18, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3,
- 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19,
- 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x5, 0x19, 0x1a1,
- 0xa, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3,
- 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19, 0x3, 0x19,
- 0x7, 0x19, 0x1ab, 0xa, 0x19, 0xc, 0x19, 0xe, 0x19,
- 0x1ae, 0xb, 0x19, 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1a,
- 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x3,
- 0x1a, 0x3, 0x1a, 0x3, 0x1a, 0x5, 0x1a, 0x1ba, 0xa,
- 0x1a, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1b,
- 0x5, 0x1b, 0x1c0, 0xa, 0x1b, 0x5, 0x1b, 0x1c2, 0xa,
- 0x1b, 0x3, 0x1c, 0x3, 0x1c, 0x5, 0x1c, 0x1c6, 0xa,
- 0x1c, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1d,
- 0x3, 0x1d, 0x7, 0x1d, 0x1cd, 0xa, 0x1d, 0xc, 0x1d,
- 0xe, 0x1d, 0x1d0, 0xb, 0x1d, 0x5, 0x1d, 0x1d2, 0xa,
- 0x1d, 0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1e, 0x3, 0x1e,
- 0x3, 0x1e, 0x5, 0x1e, 0x1d9, 0xa, 0x1e, 0x5, 0x1e,
- 0x1db, 0xa, 0x1e, 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x1f,
- 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x1f, 0x3, 0x1f, 0x3,
- 0x1f, 0x5, 0x1f, 0x1e5, 0xa, 0x1f, 0x3, 0x20, 0x5,
- 0x20, 0x1e8, 0xa, 0x20, 0x3, 0x21, 0x3, 0x21, 0x3,
- 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21,
- 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x22, 0x3,
- 0x22, 0x5, 0x22, 0x1f6, 0xa, 0x22, 0x3, 0x22, 0x3,
- 0x22, 0x5, 0x22, 0x1fa, 0xa, 0x22, 0x3, 0x22, 0x3,
- 0x22, 0x3, 0x23, 0x5, 0x23, 0x1ff, 0xa, 0x23, 0x3,
- 0x24, 0x3, 0x24, 0x3, 0x24, 0x3, 0x24, 0x3, 0x24,
- 0x3, 0x24, 0x3, 0x24, 0x3, 0x24, 0x3, 0x24, 0x3,
- 0x25, 0x3, 0x25, 0x3, 0x26, 0x3, 0x26, 0x5, 0x26,
- 0x20e, 0xa, 0x26, 0x3, 0x26, 0x3, 0x26, 0x7, 0x26,
- 0x212, 0xa, 0x26, 0xc, 0x26, 0xe, 0x26, 0x215, 0xb,
- 0x26, 0x3, 0x26, 0x3, 0x26, 0x3, 0x27, 0x3, 0x27,
- 0x5, 0x27, 0x21b, 0xa, 0x27, 0x3, 0x27, 0x3, 0x27,
- 0x3, 0x27, 0x3, 0x28, 0x3, 0x28, 0x3, 0x29, 0x3,
- 0x29, 0x3, 0x29, 0x3, 0x29, 0x3, 0x29, 0x3, 0x2a,
- 0x3, 0x2a, 0x3, 0x2a, 0x5, 0x2a, 0x22a, 0xa, 0x2a,
- 0x3, 0x2b, 0x5, 0x2b, 0x22d, 0xa, 0x2b, 0x3, 0x2b,
- 0x3, 0x2b, 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2d, 0x3,
- 0x2d, 0x5, 0x2d, 0x235, 0xa, 0x2d, 0x3, 0x2d, 0x3,
- 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2d,
- 0x5, 0x2d, 0x23d, 0xa, 0x2d, 0x3, 0x2e, 0x3, 0x2e,
- 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x3,
- 0x2f, 0x3, 0x2f, 0x5, 0x2f, 0x247, 0xa, 0x2f, 0x3,
- 0x30, 0x3, 0x30, 0x3, 0x31, 0x3, 0x31, 0x3, 0x32,
- 0x3, 0x32, 0x3, 0x32, 0x5, 0x32, 0x250, 0xa, 0x32,
- 0x3, 0x33, 0x3, 0x33, 0x3, 0x33, 0x3, 0x33, 0x3,
- 0x34, 0x3, 0x34, 0x3, 0x34, 0x6, 0x34, 0x259, 0xa,
- 0x34, 0xd, 0x34, 0xe, 0x34, 0x25a, 0x3, 0x35, 0x3,
- 0x35, 0x3, 0x35, 0x3, 0x35, 0x3, 0x35, 0x3, 0x35,
- 0x3, 0x35, 0x5, 0x35, 0x264, 0xa, 0x35, 0x3, 0x36,
- 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3,
- 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36,
- 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3,
- 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36,
- 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3,
- 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36, 0x3, 0x36,
- 0x3, 0x36, 0x5, 0x36, 0x283, 0xa, 0x36, 0x3, 0x37,
- 0x7, 0x37, 0x286, 0xa, 0x37, 0xc, 0x37, 0xe, 0x37,
- 0x289, 0xb, 0x37, 0x3, 0x38, 0x5, 0x38, 0x28c, 0xa,
- 0x38, 0x3, 0x38, 0x3, 0x38, 0x3, 0x38, 0x3, 0x38,
- 0x3, 0x39, 0x3, 0x39, 0x5, 0x39, 0x294, 0xa, 0x39,
- 0x3, 0x3a, 0x3, 0x3a, 0x3, 0x3b, 0x3, 0x3b, 0x3,
- 0x3b, 0x3, 0x3b, 0x3, 0x3b, 0x3, 0x3c, 0x7, 0x3c,
- 0x29e, 0xa, 0x3c, 0xc, 0x3c, 0xe, 0x3c, 0x2a1, 0xb,
- 0x3c, 0x3, 0x3d, 0x3, 0x3d, 0x3, 0x3d, 0x3, 0x3e,
- 0x3, 0x3e, 0x3, 0x3e, 0x3, 0x3f, 0x3, 0x3f, 0x3,
- 0x3f, 0x3, 0x40, 0x3, 0x40, 0x3, 0x40, 0x5, 0x40,
- 0x2af, 0xa, 0x40, 0x3, 0x40, 0x5, 0x40, 0x2b2, 0xa,
- 0x40, 0x3, 0x40, 0x5, 0x40, 0x2b5, 0xa, 0x40, 0x3,
- 0x40, 0x3, 0x40, 0x3, 0x41, 0x3, 0x41, 0x3, 0x41,
- 0x3, 0x41, 0x3, 0x41, 0x3, 0x41, 0x3, 0x42, 0x3,
- 0x42, 0x5, 0x42, 0x2c1, 0xa, 0x42, 0x3, 0x42, 0x3,
- 0x42, 0x3, 0x42, 0x3, 0x42, 0x3, 0x42, 0x3, 0x42,
- 0x3, 0x42, 0x3, 0x42, 0x3, 0x42, 0x3, 0x43, 0x3,
- 0x43, 0x3, 0x43, 0x5, 0x43, 0x2cf, 0xa, 0x43, 0x3,
- 0x43, 0x3, 0x43, 0x3, 0x43, 0x3, 0x43, 0x3, 0x43,
- 0x3, 0x43, 0x3, 0x43, 0x3, 0x43, 0x3, 0x44, 0x3,
- 0x44, 0x3, 0x44, 0x3, 0x44, 0x3, 0x44, 0x3, 0x44,
- 0x3, 0x44, 0x3, 0x45, 0x5, 0x45, 0x2e1, 0xa, 0x45,
- 0x3, 0x45, 0x3, 0x45, 0x3, 0x45, 0x3, 0x45, 0x3,
- 0x45, 0x3, 0x45, 0x3, 0x45, 0x5, 0x45, 0x2ea, 0xa,
- 0x45, 0x3, 0x46, 0x3, 0x46, 0x3, 0x46, 0x3, 0x46,
- 0x3, 0x46, 0x3, 0x46, 0x3, 0x46, 0x3, 0x47, 0x3,
- 0x47, 0x5, 0x47, 0x2f5, 0xa, 0x47, 0x3, 0x47, 0x3,
- 0x47, 0x3, 0x47, 0x3, 0x47, 0x3, 0x47, 0x3, 0x47,
- 0x3, 0x47, 0x3, 0x47, 0x5, 0x47, 0x2ff, 0xa, 0x47,
- 0x3, 0x48, 0x3, 0x48, 0x3, 0x48, 0x3, 0x48, 0x3,
- 0x48, 0x3, 0x48, 0x3, 0x48, 0x3, 0x49, 0x3, 0x49,
- 0x3, 0x49, 0x3, 0x49, 0x3, 0x49, 0x3, 0x49, 0x3,
- 0x49, 0x3, 0x49, 0x3, 0x4a, 0x3, 0x4a, 0x3, 0x4a,
- 0x3, 0x4a, 0x3, 0x4a, 0x3, 0x4a, 0x3, 0x4b, 0x3,
- 0x4b, 0x3, 0x4b, 0x3, 0x4b, 0x3, 0x4b, 0x3, 0x4b,
- 0x3, 0x4b, 0x3, 0x4b, 0x3, 0x4b, 0x3, 0x4b, 0x3,
- 0x4b, 0x5, 0x4b, 0x321, 0xa, 0x4b, 0x3, 0x4c, 0x3,
- 0x4c, 0x3, 0x4c, 0x3, 0x4c, 0x7, 0x4c, 0x327, 0xa,
- 0x4c, 0xc, 0x4c, 0xe, 0x4c, 0x32a, 0xb, 0x4c, 0x3,
- 0x4c, 0x3, 0x4c, 0x3, 0x4d, 0x3, 0x4d, 0x7, 0x4d,
- 0x330, 0xa, 0x4d, 0xc, 0x4d, 0xe, 0x4d, 0x333, 0xb,
- 0x4d, 0x3, 0x4d, 0x2, 0xd, 0x2, 0x1c, 0x1e, 0x20,
- 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x30, 0x4e, 0x2,
- 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14,
- 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26,
- 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38,
- 0x3a, 0x3c, 0x3e, 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a,
- 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c,
- 0x5e, 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
- 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e, 0x80,
- 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92,
- 0x94, 0x96, 0x98, 0x2, 0xd, 0x3, 0x2, 0x3c, 0x3d,
- 0x4, 0x2, 0x36, 0x36, 0x41, 0x41, 0x3, 0x2, 0x42,
- 0x45, 0x3, 0x2, 0x46, 0x48, 0x3, 0x2, 0x37, 0x38,
- 0x3, 0x2, 0x39, 0x3b, 0x5, 0x2, 0x37, 0x38, 0x3e,
- 0x3e, 0x4d, 0x4d, 0x3, 0x2, 0x34, 0x35, 0x4, 0x2,
- 0x3f, 0x40, 0x4f, 0x4f, 0x3, 0x2, 0x2d, 0x2e, 0x3,
- 0x2, 0x30, 0x31, 0x2, 0x351, 0x2, 0xaa, 0x3, 0x2,
- 0x2, 0x2, 0x4, 0xbc, 0x3, 0x2, 0x2, 0x2, 0x6,
- 0xbe, 0x3, 0x2, 0x2, 0x2, 0x8, 0xd0, 0x3, 0x2,
- 0x2, 0x2, 0xa, 0xe5, 0x3, 0x2, 0x2, 0x2, 0xc,
- 0xe7, 0x3, 0x2, 0x2, 0x2, 0xe, 0xf0, 0x3, 0x2,
- 0x2, 0x2, 0x10, 0xfb, 0x3, 0x2, 0x2, 0x2, 0x12,
- 0x106, 0x3, 0x2, 0x2, 0x2, 0x14, 0x108, 0x3, 0x2,
- 0x2, 0x2, 0x16, 0x122, 0x3, 0x2, 0x2, 0x2, 0x18,
- 0x124, 0x3, 0x2, 0x2, 0x2, 0x1a, 0x128, 0x3, 0x2,
- 0x2, 0x2, 0x1c, 0x12a, 0x3, 0x2, 0x2, 0x2, 0x1e,
- 0x138, 0x3, 0x2, 0x2, 0x2, 0x20, 0x143, 0x3, 0x2,
- 0x2, 0x2, 0x22, 0x14e, 0x3, 0x2, 0x2, 0x2, 0x24,
- 0x159, 0x3, 0x2, 0x2, 0x2, 0x26, 0x164, 0x3, 0x2,
- 0x2, 0x2, 0x28, 0x16f, 0x3, 0x2, 0x2, 0x2, 0x2a,
- 0x17a, 0x3, 0x2, 0x2, 0x2, 0x2c, 0x185, 0x3, 0x2,
- 0x2, 0x2, 0x2e, 0x193, 0x3, 0x2, 0x2, 0x2, 0x30,
- 0x1a0, 0x3, 0x2, 0x2, 0x2, 0x32, 0x1b9, 0x3, 0x2,
- 0x2, 0x2, 0x34, 0x1c1, 0x3, 0x2, 0x2, 0x2, 0x36,
- 0x1c5, 0x3, 0x2, 0x2, 0x2, 0x38, 0x1c7, 0x3, 0x2,
- 0x2, 0x2, 0x3a, 0x1da, 0x3, 0x2, 0x2, 0x2, 0x3c,
- 0x1e4, 0x3, 0x2, 0x2, 0x2, 0x3e, 0x1e7, 0x3, 0x2,
- 0x2, 0x2, 0x40, 0x1e9, 0x3, 0x2, 0x2, 0x2, 0x42,
- 0x1f3, 0x3, 0x2, 0x2, 0x2, 0x44, 0x1fe, 0x3, 0x2,
- 0x2, 0x2, 0x46, 0x200, 0x3, 0x2, 0x2, 0x2, 0x48,
- 0x209, 0x3, 0x2, 0x2, 0x2, 0x4a, 0x20b, 0x3, 0x2,
- 0x2, 0x2, 0x4c, 0x218, 0x3, 0x2, 0x2, 0x2, 0x4e,
- 0x21f, 0x3, 0x2, 0x2, 0x2, 0x50, 0x221, 0x3, 0x2,
- 0x2, 0x2, 0x52, 0x226, 0x3, 0x2, 0x2, 0x2, 0x54,
- 0x22c, 0x3, 0x2, 0x2, 0x2, 0x56, 0x230, 0x3, 0x2,
- 0x2, 0x2, 0x58, 0x232, 0x3, 0x2, 0x2, 0x2, 0x5a,
- 0x23e, 0x3, 0x2, 0x2, 0x2, 0x5c, 0x244, 0x3, 0x2,
- 0x2, 0x2, 0x5e, 0x248, 0x3, 0x2, 0x2, 0x2, 0x60,
- 0x24a, 0x3, 0x2, 0x2, 0x2, 0x62, 0x24c, 0x3, 0x2,
- 0x2, 0x2, 0x64, 0x251, 0x3, 0x2, 0x2, 0x2, 0x66,
- 0x255, 0x3, 0x2, 0x2, 0x2, 0x68, 0x263, 0x3, 0x2,
- 0x2, 0x2, 0x6a, 0x282, 0x3, 0x2, 0x2, 0x2, 0x6c,
- 0x287, 0x3, 0x2, 0x2, 0x2, 0x6e, 0x28b, 0x3, 0x2,
- 0x2, 0x2, 0x70, 0x293, 0x3, 0x2, 0x2, 0x2, 0x72,
- 0x295, 0x3, 0x2, 0x2, 0x2, 0x74, 0x297, 0x3, 0x2,
- 0x2, 0x2, 0x76, 0x29f, 0x3, 0x2, 0x2, 0x2, 0x78,
- 0x2a2, 0x3, 0x2, 0x2, 0x2, 0x7a, 0x2a5, 0x3, 0x2,
- 0x2, 0x2, 0x7c, 0x2a8, 0x3, 0x2, 0x2, 0x2, 0x7e,
- 0x2ab, 0x3, 0x2, 0x2, 0x2, 0x80, 0x2b8, 0x3, 0x2,
- 0x2, 0x2, 0x82, 0x2be, 0x3, 0x2, 0x2, 0x2, 0x84,
- 0x2cb, 0x3, 0x2, 0x2, 0x2, 0x86, 0x2d8, 0x3, 0x2,
- 0x2, 0x2, 0x88, 0x2e0, 0x3, 0x2, 0x2, 0x2, 0x8a,
- 0x2eb, 0x3, 0x2, 0x2, 0x2, 0x8c, 0x2f4, 0x3, 0x2,
- 0x2, 0x2, 0x8e, 0x300, 0x3, 0x2, 0x2, 0x2, 0x90,
- 0x307, 0x3, 0x2, 0x2, 0x2, 0x92, 0x30f, 0x3, 0x2,
- 0x2, 0x2, 0x94, 0x320, 0x3, 0x2, 0x2, 0x2, 0x96,
- 0x322, 0x3, 0x2, 0x2, 0x2, 0x98, 0x331, 0x3, 0x2,
- 0x2, 0x2, 0x9a, 0x9c, 0x8, 0x2, 0x1, 0x2, 0x9b,
- 0x9d, 0x7, 0x22, 0x2, 0x2, 0x9c, 0x9b, 0x3, 0x2,
- 0x2, 0x2, 0x9c, 0x9d, 0x3, 0x2, 0x2, 0x2, 0x9d,
- 0x9e, 0x3, 0x2, 0x2, 0x2, 0x9e, 0xab, 0x7, 0x4f,
- 0x2, 0x2, 0x9f, 0xa0, 0x7, 0x19, 0x2, 0x2, 0xa0,
- 0xa1, 0x7, 0x3, 0x2, 0x2, 0xa1, 0xa2, 0x5, 0x4,
- 0x3, 0x2, 0xa2, 0xa3, 0x7, 0x4, 0x2, 0x2, 0xa3,
- 0xa4, 0x7, 0x5, 0x2, 0x2, 0xa4, 0xa5, 0x5, 0x2,
- 0x2, 0x5, 0xa5, 0xab, 0x3, 0x2, 0x2, 0x2, 0xa6,
- 0xa7, 0x7, 0x3, 0x2, 0x2, 0xa7, 0xa8, 0x5, 0x2,
- 0x2, 0x2, 0xa8, 0xa9, 0x7, 0x4, 0x2, 0x2, 0xa9,
- 0xab, 0x3, 0x2, 0x2, 0x2, 0xaa, 0x9a, 0x3, 0x2,
- 0x2, 0x2, 0xaa, 0x9f, 0x3, 0x2, 0x2, 0x2, 0xaa,
- 0xa6, 0x3, 0x2, 0x2, 0x2, 0xab, 0xb1, 0x3, 0x2,
- 0x2, 0x2, 0xac, 0xad, 0xc, 0x4, 0x2, 0x2, 0xad,
- 0xae, 0x7, 0x3c, 0x2, 0x2, 0xae, 0xb0, 0x5, 0x2,
- 0x2, 0x5, 0xaf, 0xac, 0x3, 0x2, 0x2, 0x2, 0xb0,
- 0xb3, 0x3, 0x2, 0x2, 0x2, 0xb1, 0xaf, 0x3, 0x2,
- 0x2, 0x2, 0xb1, 0xb2, 0x3, 0x2, 0x2, 0x2, 0xb2,
- 0x3, 0x3, 0x2, 0x2, 0x2, 0xb3, 0xb1, 0x3, 0x2,
- 0x2, 0x2, 0xb4, 0xb9, 0x5, 0x2, 0x2, 0x2, 0xb5,
- 0xb6, 0x7, 0x6, 0x2, 0x2, 0xb6, 0xb8, 0x5, 0x2,
- 0x2, 0x2, 0xb7, 0xb5, 0x3, 0x2, 0x2, 0x2, 0xb8,
- 0xbb, 0x3, 0x2, 0x2, 0x2, 0xb9, 0xb7, 0x3, 0x2,
- 0x2, 0x2, 0xb9, 0xba, 0x3, 0x2, 0x2, 0x2, 0xba,
- 0xbd, 0x3, 0x2, 0x2, 0x2, 0xbb, 0xb9, 0x3, 0x2,
- 0x2, 0x2, 0xbc, 0xb4, 0x3, 0x2, 0x2, 0x2, 0xbc,
- 0xbd, 0x3, 0x2, 0x2, 0x2, 0xbd, 0x5, 0x3, 0x2,
- 0x2, 0x2, 0xbe, 0xbf, 0x7, 0x42, 0x2, 0x2, 0xbf,
- 0xc0, 0x5, 0x4, 0x3, 0x2, 0xc0, 0xc1, 0x7, 0x44,
- 0x2, 0x2, 0xc1, 0x7, 0x3, 0x2, 0x2, 0x2, 0xc2,
- 0xc3, 0x7, 0x42, 0x2, 0x2, 0xc3, 0xc4, 0x7, 0x4f,
- 0x2, 0x2, 0xc4, 0xc5, 0x7, 0x7, 0x2, 0x2, 0xc5,
- 0xcc, 0x7, 0x8, 0x2, 0x2, 0xc6, 0xc7, 0x7, 0x6,
- 0x2, 0x2, 0xc7, 0xc8, 0x7, 0x4f, 0x2, 0x2, 0xc8,
- 0xc9, 0x7, 0x7, 0x2, 0x2, 0xc9, 0xcb, 0x7, 0x8,
- 0x2, 0x2, 0xca, 0xc6, 0x3, 0x2, 0x2, 0x2, 0xcb,
- 0xce, 0x3, 0x2, 0x2, 0x2, 0xcc, 0xca, 0x3, 0x2,
- 0x2, 0x2, 0xcc, 0xcd, 0x3, 0x2, 0x2, 0x2, 0xcd,
- 0xcf, 0x3, 0x2, 0x2, 0x2, 0xce, 0xcc, 0x3, 0x2,
- 0x2, 0x2, 0xcf, 0xd1, 0x7, 0x44, 0x2, 0x2, 0xd0,
- 0xc2, 0x3, 0x2, 0x2, 0x2, 0xd0, 0xd1, 0x3, 0x2,
- 0x2, 0x2, 0xd1, 0x9, 0x3, 0x2, 0x2, 0x2, 0xd2,
- 0xd4, 0x7, 0x3, 0x2, 0x2, 0xd3, 0xd5, 0x5, 0x2,
- 0x2, 0x2, 0xd4, 0xd3, 0x3, 0x2, 0x2, 0x2, 0xd4,
- 0xd5, 0x3, 0x2, 0x2, 0x2, 0xd5, 0xda, 0x3, 0x2,
- 0x2, 0x2, 0xd6, 0xd7, 0x7, 0x6, 0x2, 0x2, 0xd7,
- 0xd9, 0x5, 0x2, 0x2, 0x2, 0xd8, 0xd6, 0x3, 0x2,
- 0x2, 0x2, 0xd9, 0xdc, 0x3, 0x2, 0x2, 0x2, 0xda,
- 0xd8, 0x3, 0x2, 0x2, 0x2, 0xda, 0xdb, 0x3, 0x2,
- 0x2, 0x2, 0xdb, 0xdf, 0x3, 0x2, 0x2, 0x2, 0xdc,
- 0xda, 0x3, 0x2, 0x2, 0x2, 0xdd, 0xde, 0x7, 0x6,
- 0x2, 0x2, 0xde, 0xe0, 0x7, 0x49, 0x2, 0x2, 0xdf,
- 0xdd, 0x3, 0x2, 0x2, 0x2, 0xdf, 0xe0, 0x3, 0x2,
- 0x2, 0x2, 0xe0, 0xe1, 0x3, 0x2, 0x2, 0x2, 0xe1,
- 0xe6, 0x7, 0x4, 0x2, 0x2, 0xe2, 0xe3, 0x7, 0x3,
- 0x2, 0x2, 0xe3, 0xe4, 0x7, 0x49, 0x2, 0x2, 0xe4,
- 0xe6, 0x7, 0x4, 0x2, 0x2, 0xe5, 0xd2, 0x3, 0x2,
- 0x2, 0x2, 0xe5, 0xe2, 0x3, 0x2, 0x2, 0x2, 0xe6,
- 0xb, 0x3, 0x2, 0x2, 0x2, 0xe7, 0xec, 0x7, 0x4f,
- 0x2, 0x2, 0xe8, 0xe9, 0x7, 0x3, 0x2, 0x2, 0xe9,
- 0xea, 0x5, 0x4, 0x3, 0x2, 0xea, 0xeb, 0x7, 0x4,
- 0x2, 0x2, 0xeb, 0xed, 0x3, 0x2, 0x2, 0x2, 0xec,
- 0xe8, 0x3, 0x2, 0x2, 0x2, 0xec, 0xed, 0x3, 0x2,
- 0x2, 0x2, 0xed, 0xd, 0x3, 0x2, 0x2, 0x2, 0xee,
- 0xef, 0x7, 0x7, 0x2, 0x2, 0xef, 0xf1, 0x5, 0x2,
- 0x2, 0x2, 0xf0, 0xee, 0x3, 0x2, 0x2, 0x2, 0xf0,
- 0xf1, 0x3, 0x2, 0x2, 0x2, 0xf1, 0xf, 0x3, 0x2,
- 0x2, 0x2, 0xf2, 0xf3, 0x7, 0x29, 0x2, 0x2, 0xf3,
- 0xf8, 0x5, 0xc, 0x7, 0x2, 0xf4, 0xf5, 0x7, 0x6,
- 0x2, 0x2, 0xf5, 0xf7, 0x5, 0xc, 0x7, 0x2, 0xf6,
- 0xf4, 0x3, 0x2, 0x2, 0x2, 0xf7, 0xfa, 0x3, 0x2,
- 0x2, 0x2, 0xf8, 0xf6, 0x3, 0x2, 0x2, 0x2, 0xf8,
- 0xf9, 0x3, 0x2, 0x2, 0x2, 0xf9, 0xfc, 0x3, 0x2,
- 0x2, 0x2, 0xfa, 0xf8, 0x3, 0x2, 0x2, 0x2, 0xfb,
- 0xf2, 0x3, 0x2, 0x2, 0x2, 0xfb, 0xfc, 0x3, 0x2,
- 0x2, 0x2, 0xfc, 0x11, 0x3, 0x2, 0x2, 0x2, 0xfd,
- 0xfe, 0x7, 0x26, 0x2, 0x2, 0xfe, 0x103, 0x7, 0x4f,
- 0x2, 0x2, 0xff, 0x100, 0x7, 0x6, 0x2, 0x2, 0x100,
- 0x102, 0x7, 0x4f, 0x2, 0x2, 0x101, 0xff, 0x3, 0x2,
- 0x2, 0x2, 0x102, 0x105, 0x3, 0x2, 0x2, 0x2, 0x103,
- 0x101, 0x3, 0x2, 0x2, 0x2, 0x103, 0x104, 0x3, 0x2,
- 0x2, 0x2, 0x104, 0x107, 0x3, 0x2, 0x2, 0x2, 0x105,
- 0x103, 0x3, 0x2, 0x2, 0x2, 0x106, 0xfd, 0x3, 0x2,
- 0x2, 0x2, 0x106, 0x107, 0x3, 0x2, 0x2, 0x2, 0x107,
- 0x13, 0x3, 0x2, 0x2, 0x2, 0x108, 0x109, 0x7, 0x4f,
- 0x2, 0x2, 0x109, 0x10b, 0x7, 0x7, 0x2, 0x2, 0x10a,
- 0x10c, 0x5, 0x2, 0x2, 0x2, 0x10b, 0x10a, 0x3, 0x2,
- 0x2, 0x2, 0x10b, 0x10c, 0x3, 0x2, 0x2, 0x2, 0x10c,
- 0x15, 0x3, 0x2, 0x2, 0x2, 0x10d, 0x10f, 0x7, 0x3,
- 0x2, 0x2, 0x10e, 0x110, 0x5, 0x14, 0xb, 0x2, 0x10f,
- 0x10e, 0x3, 0x2, 0x2, 0x2, 0x10f, 0x110, 0x3, 0x2,
- 0x2, 0x2, 0x110, 0x115, 0x3, 0x2, 0x2, 0x2, 0x111,
- 0x112, 0x7, 0x6, 0x2, 0x2, 0x112, 0x114, 0x5, 0x14,
- 0xb, 0x2, 0x113, 0x111, 0x3, 0x2, 0x2, 0x2, 0x114,
- 0x117, 0x3, 0x2, 0x2, 0x2, 0x115, 0x113, 0x3, 0x2,
- 0x2, 0x2, 0x115, 0x116, 0x3, 0x2, 0x2, 0x2, 0x116,
- 0x118, 0x3, 0x2, 0x2, 0x2, 0x117, 0x115, 0x3, 0x2,
- 0x2, 0x2, 0x118, 0x123, 0x7, 0x4, 0x2, 0x2, 0x119,
- 0x11a, 0x7, 0x3, 0x2, 0x2, 0x11a, 0x11b, 0x5, 0x14,
- 0xb, 0x2, 0x11b, 0x11c, 0x7, 0x6, 0x2, 0x2, 0x11c,
- 0x11d, 0x5, 0x14, 0xb, 0x2, 0x11d, 0x11e, 0x7, 0x6,
- 0x2, 0x2, 0x11e, 0x11f, 0x7, 0x49, 0x2, 0x2, 0x11f,
- 0x120, 0x7, 0x4f, 0x2, 0x2, 0x120, 0x121, 0x7, 0x4,
- 0x2, 0x2, 0x121, 0x123, 0x3, 0x2, 0x2, 0x2, 0x122,
- 0x10d, 0x3, 0x2, 0x2, 0x2, 0x122, 0x119, 0x3, 0x2,
- 0x2, 0x2, 0x123, 0x17, 0x3, 0x2, 0x2, 0x2, 0x124,
- 0x126, 0x7, 0x4f, 0x2, 0x2, 0x125, 0x127, 0x5, 0x16,
- 0xc, 0x2, 0x126, 0x125, 0x3, 0x2, 0x2, 0x2, 0x126,
- 0x127, 0x3, 0x2, 0x2, 0x2, 0x127, 0x19, 0x3, 0x2,
- 0x2, 0x2, 0x128, 0x129, 0x5, 0x1c, 0xf, 0x2, 0x129,
- 0x1b, 0x3, 0x2, 0x2, 0x2, 0x12a, 0x12b, 0x8, 0xf,
- 0x1, 0x2, 0x12b, 0x12c, 0x5, 0x1e, 0x10, 0x2, 0x12c,
- 0x135, 0x3, 0x2, 0x2, 0x2, 0x12d, 0x12e, 0xc, 0x3,
- 0x2, 0x2, 0x12e, 0x12f, 0x7, 0x9, 0x2, 0x2, 0x12f,
- 0x130, 0x5, 0x1e, 0x10, 0x2, 0x130, 0x131, 0x7, 0x7,
- 0x2, 0x2, 0x131, 0x132, 0x5, 0x1e, 0x10, 0x2, 0x132,
- 0x134, 0x3, 0x2, 0x2, 0x2, 0x133, 0x12d, 0x3, 0x2,
- 0x2, 0x2, 0x134, 0x137, 0x3, 0x2, 0x2, 0x2, 0x135,
- 0x133, 0x3, 0x2, 0x2, 0x2, 0x135, 0x136, 0x3, 0x2,
- 0x2, 0x2, 0x136, 0x1d, 0x3, 0x2, 0x2, 0x2, 0x137,
- 0x135, 0x3, 0x2, 0x2, 0x2, 0x138, 0x139, 0x8, 0x10,
- 0x1, 0x2, 0x139, 0x13a, 0x5, 0x20, 0x11, 0x2, 0x13a,
- 0x140, 0x3, 0x2, 0x2, 0x2, 0x13b, 0x13c, 0xc, 0x3,
- 0x2, 0x2, 0x13c, 0x13d, 0x7, 0xa, 0x2, 0x2, 0x13d,
- 0x13f, 0x5, 0x20, 0x11, 0x2, 0x13e, 0x13b, 0x3, 0x2,
- 0x2, 0x2, 0x13f, 0x142, 0x3, 0x2, 0x2, 0x2, 0x140,
- 0x13e, 0x3, 0x2, 0x2, 0x2, 0x140, 0x141, 0x3, 0x2,
- 0x2, 0x2, 0x141, 0x1f, 0x3, 0x2, 0x2, 0x2, 0x142,
- 0x140, 0x3, 0x2, 0x2, 0x2, 0x143, 0x144, 0x8, 0x11,
- 0x1, 0x2, 0x144, 0x145, 0x5, 0x22, 0x12, 0x2, 0x145,
- 0x14b, 0x3, 0x2, 0x2, 0x2, 0x146, 0x147, 0xc, 0x3,
- 0x2, 0x2, 0x147, 0x148, 0x7, 0xb, 0x2, 0x2, 0x148,
- 0x14a, 0x5, 0x22, 0x12, 0x2, 0x149, 0x146, 0x3, 0x2,
- 0x2, 0x2, 0x14a, 0x14d, 0x3, 0x2, 0x2, 0x2, 0x14b,
- 0x149, 0x3, 0x2, 0x2, 0x2, 0x14b, 0x14c, 0x3, 0x2,
- 0x2, 0x2, 0x14c, 0x21, 0x3, 0x2, 0x2, 0x2, 0x14d,
- 0x14b, 0x3, 0x2, 0x2, 0x2, 0x14e, 0x14f, 0x8, 0x12,
- 0x1, 0x2, 0x14f, 0x150, 0x5, 0x24, 0x13, 0x2, 0x150,
- 0x156, 0x3, 0x2, 0x2, 0x2, 0x151, 0x152, 0xc, 0x3,
- 0x2, 0x2, 0x152, 0x153, 0x9, 0x2, 0x2, 0x2, 0x153,
- 0x155, 0x5, 0x24, 0x13, 0x2, 0x154, 0x151, 0x3, 0x2,
- 0x2, 0x2, 0x155, 0x158, 0x3, 0x2, 0x2, 0x2, 0x156,
- 0x154, 0x3, 0x2, 0x2, 0x2, 0x156, 0x157, 0x3, 0x2,
- 0x2, 0x2, 0x157, 0x23, 0x3, 0x2, 0x2, 0x2, 0x158,
- 0x156, 0x3, 0x2, 0x2, 0x2, 0x159, 0x15a, 0x8, 0x13,
- 0x1, 0x2, 0x15a, 0x15b, 0x5, 0x26, 0x14, 0x2, 0x15b,
- 0x161, 0x3, 0x2, 0x2, 0x2, 0x15c, 0x15d, 0xc, 0x3,
- 0x2, 0x2, 0x15d, 0x15e, 0x9, 0x3, 0x2, 0x2, 0x15e,
- 0x160, 0x5, 0x26, 0x14, 0x2, 0x15f, 0x15c, 0x3, 0x2,
- 0x2, 0x2, 0x160, 0x163, 0x3, 0x2, 0x2, 0x2, 0x161,
- 0x15f, 0x3, 0x2, 0x2, 0x2, 0x161, 0x162, 0x3, 0x2,
- 0x2, 0x2, 0x162, 0x25, 0x3, 0x2, 0x2, 0x2, 0x163,
- 0x161, 0x3, 0x2, 0x2, 0x2, 0x164, 0x165, 0x8, 0x14,
- 0x1, 0x2, 0x165, 0x166, 0x5, 0x28, 0x15, 0x2, 0x166,
- 0x16c, 0x3, 0x2, 0x2, 0x2, 0x167, 0x168, 0xc, 0x3,
- 0x2, 0x2, 0x168, 0x169, 0x9, 0x4, 0x2, 0x2, 0x169,
- 0x16b, 0x5, 0x28, 0x15, 0x2, 0x16a, 0x167, 0x3, 0x2,
- 0x2, 0x2, 0x16b, 0x16e, 0x3, 0x2, 0x2, 0x2, 0x16c,
- 0x16a, 0x3, 0x2, 0x2, 0x2, 0x16c, 0x16d, 0x3, 0x2,
- 0x2, 0x2, 0x16d, 0x27, 0x3, 0x2, 0x2, 0x2, 0x16e,
- 0x16c, 0x3, 0x2, 0x2, 0x2, 0x16f, 0x170, 0x8, 0x15,
- 0x1, 0x2, 0x170, 0x171, 0x5, 0x2a, 0x16, 0x2, 0x171,
- 0x177, 0x3, 0x2, 0x2, 0x2, 0x172, 0x173, 0xc, 0x3,
- 0x2, 0x2, 0x173, 0x174, 0x9, 0x5, 0x2, 0x2, 0x174,
- 0x176, 0x5, 0x2a, 0x16, 0x2, 0x175, 0x172, 0x3, 0x2,
- 0x2, 0x2, 0x176, 0x179, 0x3, 0x2, 0x2, 0x2, 0x177,
- 0x175, 0x3, 0x2, 0x2, 0x2, 0x177, 0x178, 0x3, 0x2,
- 0x2, 0x2, 0x178, 0x29, 0x3, 0x2, 0x2, 0x2, 0x179,
- 0x177, 0x3, 0x2, 0x2, 0x2, 0x17a, 0x17b, 0x8, 0x16,
- 0x1, 0x2, 0x17b, 0x17c, 0x5, 0x2c, 0x17, 0x2, 0x17c,
- 0x182, 0x3, 0x2, 0x2, 0x2, 0x17d, 0x17e, 0xc, 0x3,
- 0x2, 0x2, 0x17e, 0x17f, 0x9, 0x6, 0x2, 0x2, 0x17f,
- 0x181, 0x5, 0x2c, 0x17, 0x2, 0x180, 0x17d, 0x3, 0x2,
- 0x2, 0x2, 0x181, 0x184, 0x3, 0x2, 0x2, 0x2, 0x182,
- 0x180, 0x3, 0x2, 0x2, 0x2, 0x182, 0x183, 0x3, 0x2,
- 0x2, 0x2, 0x183, 0x2b, 0x3, 0x2, 0x2, 0x2, 0x184,
- 0x182, 0x3, 0x2, 0x2, 0x2, 0x185, 0x186, 0x8, 0x17,
- 0x1, 0x2, 0x186, 0x187, 0x5, 0x2e, 0x18, 0x2, 0x187,
- 0x18d, 0x3, 0x2, 0x2, 0x2, 0x188, 0x189, 0xc, 0x3,
- 0x2, 0x2, 0x189, 0x18a, 0x9, 0x7, 0x2, 0x2, 0x18a,
- 0x18c, 0x5, 0x2e, 0x18, 0x2, 0x18b, 0x188, 0x3, 0x2,
- 0x2, 0x2, 0x18c, 0x18f, 0x3, 0x2, 0x2, 0x2, 0x18d,
- 0x18b, 0x3, 0x2, 0x2, 0x2, 0x18d, 0x18e, 0x3, 0x2,
- 0x2, 0x2, 0x18e, 0x2d, 0x3, 0x2, 0x2, 0x2, 0x18f,
- 0x18d, 0x3, 0x2, 0x2, 0x2, 0x190, 0x194, 0x5, 0x36,
- 0x1c, 0x2, 0x191, 0x192, 0x9, 0x8, 0x2, 0x2, 0x192,
- 0x194, 0x5, 0x2e, 0x18, 0x2, 0x193, 0x190, 0x3, 0x2,
- 0x2, 0x2, 0x193, 0x191, 0x3, 0x2, 0x2, 0x2, 0x194,
- 0x2f, 0x3, 0x2, 0x2, 0x2, 0x195, 0x196, 0x8, 0x19,
- 0x1, 0x2, 0x196, 0x1a1, 0x7, 0x4f, 0x2, 0x2, 0x197,
- 0x198, 0x5, 0x3c, 0x1f, 0x2, 0x198, 0x199, 0x7, 0xc,
- 0x2, 0x2, 0x199, 0x19a, 0x7, 0x4f, 0x2, 0x2, 0x19a,
- 0x1a1, 0x3, 0x2, 0x2, 0x2, 0x19b, 0x19c, 0x5, 0x3c,
- 0x1f, 0x2, 0x19c, 0x19d, 0x7, 0xd, 0x2, 0x2, 0x19d,
- 0x19e, 0x5, 0x1a, 0xe, 0x2, 0x19e, 0x19f, 0x7, 0xe,
- 0x2, 0x2, 0x19f, 0x1a1, 0x3, 0x2, 0x2, 0x2, 0x1a0,
- 0x195, 0x3, 0x2, 0x2, 0x2, 0x1a0, 0x197, 0x3, 0x2,
- 0x2, 0x2, 0x1a0, 0x19b, 0x3, 0x2, 0x2, 0x2, 0x1a1,
- 0x1ac, 0x3, 0x2, 0x2, 0x2, 0x1a2, 0x1a3, 0xc, 0x6,
- 0x2, 0x2, 0x1a3, 0x1a4, 0x7, 0xc, 0x2, 0x2, 0x1a4,
- 0x1ab, 0x7, 0x4f, 0x2, 0x2, 0x1a5, 0x1a6, 0xc, 0x4,
- 0x2, 0x2, 0x1a6, 0x1a7, 0x7, 0xd, 0x2, 0x2, 0x1a7,
- 0x1a8, 0x5, 0x1a, 0xe, 0x2, 0x1a8, 0x1a9, 0x7, 0xe,
- 0x2, 0x2, 0x1a9, 0x1ab, 0x3, 0x2, 0x2, 0x2, 0x1aa,
- 0x1a2, 0x3, 0x2, 0x2, 0x2, 0x1aa, 0x1a5, 0x3, 0x2,
- 0x2, 0x2, 0x1ab, 0x1ae, 0x3, 0x2, 0x2, 0x2, 0x1ac,
- 0x1aa, 0x3, 0x2, 0x2, 0x2, 0x1ac, 0x1ad, 0x3, 0x2,
- 0x2, 0x2, 0x1ad, 0x31, 0x3, 0x2, 0x2, 0x2, 0x1ae,
- 0x1ac, 0x3, 0x2, 0x2, 0x2, 0x1af, 0x1b0, 0x7, 0x4b,
- 0x2, 0x2, 0x1b0, 0x1ba, 0x5, 0x30, 0x19, 0x2, 0x1b1,
- 0x1b2, 0x7, 0x4c, 0x2, 0x2, 0x1b2, 0x1ba, 0x5, 0x30,
- 0x19, 0x2, 0x1b3, 0x1b4, 0x5, 0x30, 0x19, 0x2, 0x1b4,
- 0x1b5, 0x7, 0x4b, 0x2, 0x2, 0x1b5, 0x1ba, 0x3, 0x2,
- 0x2, 0x2, 0x1b6, 0x1b7, 0x5, 0x30, 0x19, 0x2, 0x1b7,
- 0x1b8, 0x7, 0x4c, 0x2, 0x2, 0x1b8, 0x1ba, 0x3, 0x2,
- 0x2, 0x2, 0x1b9, 0x1af, 0x3, 0x2, 0x2, 0x2, 0x1b9,
- 0x1b1, 0x3, 0x2, 0x2, 0x2, 0x1b9, 0x1b3, 0x3, 0x2,
- 0x2, 0x2, 0x1b9, 0x1b6, 0x3, 0x2, 0x2, 0x2, 0x1ba,
- 0x33, 0x3, 0x2, 0x2, 0x2, 0x1bb, 0x1c2, 0x5, 0x32,
- 0x1a, 0x2, 0x1bc, 0x1bf, 0x5, 0x30, 0x19, 0x2, 0x1bd,
- 0x1be, 0x9, 0x9, 0x2, 0x2, 0x1be, 0x1c0, 0x5, 0x1a,
- 0xe, 0x2, 0x1bf, 0x1bd, 0x3, 0x2, 0x2, 0x2, 0x1bf,
- 0x1c0, 0x3, 0x2, 0x2, 0x2, 0x1c0, 0x1c2, 0x3, 0x2,
- 0x2, 0x2, 0x1c1, 0x1bb, 0x3, 0x2, 0x2, 0x2, 0x1c1,
- 0x1bc, 0x3, 0x2, 0x2, 0x2, 0x1c2, 0x35, 0x3, 0x2,
- 0x2, 0x2, 0x1c3, 0x1c6, 0x5, 0x3a, 0x1e, 0x2, 0x1c4,
- 0x1c6, 0x5, 0x34, 0x1b, 0x2, 0x1c5, 0x1c3, 0x3, 0x2,
- 0x2, 0x2, 0x1c5, 0x1c4, 0x3, 0x2, 0x2, 0x2, 0x1c6,
- 0x37, 0x3, 0x2, 0x2, 0x2, 0x1c7, 0x1c8, 0x7, 0x4f,
- 0x2, 0x2, 0x1c8, 0x1d1, 0x7, 0xf, 0x2, 0x2, 0x1c9,
- 0x1ce, 0x5, 0x1a, 0xe, 0x2, 0x1ca, 0x1cb, 0x7, 0x6,
- 0x2, 0x2, 0x1cb, 0x1cd, 0x5, 0x1a, 0xe, 0x2, 0x1cc,
- 0x1ca, 0x3, 0x2, 0x2, 0x2, 0x1cd, 0x1d0, 0x3, 0x2,
- 0x2, 0x2, 0x1ce, 0x1cc, 0x3, 0x2, 0x2, 0x2, 0x1ce,
- 0x1cf, 0x3, 0x2, 0x2, 0x2, 0x1cf, 0x1d2, 0x3, 0x2,
- 0x2, 0x2, 0x1d0, 0x1ce, 0x3, 0x2, 0x2, 0x2, 0x1d1,
- 0x1c9, 0x3, 0x2, 0x2, 0x2, 0x1d1, 0x1d2, 0x3, 0x2,
- 0x2, 0x2, 0x1d2, 0x1d3, 0x3, 0x2, 0x2, 0x2, 0x1d3,
- 0x1d4, 0x7, 0x10, 0x2, 0x2, 0x1d4, 0x39, 0x3, 0x2,
- 0x2, 0x2, 0x1d5, 0x1db, 0x5, 0x3c, 0x1f, 0x2, 0x1d6,
- 0x1d8, 0x7, 0x4f, 0x2, 0x2, 0x1d7, 0x1d9, 0x5, 0x6,
- 0x4, 0x2, 0x1d8, 0x1d7, 0x3, 0x2, 0x2, 0x2, 0x1d8,
- 0x1d9, 0x3, 0x2, 0x2, 0x2, 0x1d9, 0x1db, 0x3, 0x2,
- 0x2, 0x2, 0x1da, 0x1d5, 0x3, 0x2, 0x2, 0x2, 0x1da,
- 0x1d6, 0x3, 0x2, 0x2, 0x2, 0x1db, 0x3b, 0x3, 0x2,
- 0x2, 0x2, 0x1dc, 0x1e5, 0x5, 0x4c, 0x27, 0x2, 0x1dd,
- 0x1e5, 0x5, 0x38, 0x1d, 0x2, 0x1de, 0x1e5, 0x7, 0x53,
- 0x2, 0x2, 0x1df, 0x1e5, 0x7, 0x4e, 0x2, 0x2, 0x1e0,
- 0x1e1, 0x7, 0x3, 0x2, 0x2, 0x1e1, 0x1e2, 0x5, 0x1a,
- 0xe, 0x2, 0x1e2, 0x1e3, 0x7, 0x4, 0x2, 0x2, 0x1e3,
- 0x1e5, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x1dc, 0x3, 0x2,
- 0x2, 0x2, 0x1e4, 0x1dd, 0x3, 0x2, 0x2, 0x2, 0x1e4,
- 0x1de, 0x3, 0x2, 0x2, 0x2, 0x1e4, 0x1df, 0x3, 0x2,
- 0x2, 0x2, 0x1e4, 0x1e0, 0x3, 0x2, 0x2, 0x2, 0x1e5,
- 0x3d, 0x3, 0x2, 0x2, 0x2, 0x1e6, 0x1e8, 0x5, 0x52,
- 0x2a, 0x2, 0x1e7, 0x1e6, 0x3, 0x2, 0x2, 0x2, 0x1e7,
- 0x1e8, 0x3, 0x2, 0x2, 0x2, 0x1e8, 0x3f, 0x3, 0x2,
- 0x2, 0x2, 0x1e9, 0x1ea, 0x7, 0x1f, 0x2, 0x2, 0x1ea,
- 0x1eb, 0x7, 0x3, 0x2, 0x2, 0x1eb, 0x1ec, 0x5, 0x3e,
- 0x20, 0x2, 0x1ec, 0x1ed, 0x7, 0x11, 0x2, 0x2, 0x1ed,
- 0x1ee, 0x5, 0x1a, 0xe, 0x2, 0x1ee, 0x1ef, 0x7, 0x11,
- 0x2, 0x2, 0x1ef, 0x1f0, 0x5, 0x34, 0x1b, 0x2, 0x1f0,
- 0x1f1, 0x7, 0x4, 0x2, 0x2, 0x1f1, 0x1f2, 0x5, 0x70,
- 0x39, 0x2, 0x1f2, 0x41, 0x3, 0x2, 0x2, 0x2, 0x1f3,
- 0x1f5, 0x7, 0xd, 0x2, 0x2, 0x1f4, 0x1f6, 0x5, 0x1a,
- 0xe, 0x2, 0x1f5, 0x1f4, 0x3, 0x2, 0x2, 0x2, 0x1f5,
- 0x1f6, 0x3, 0x2, 0x2, 0x2, 0x1f6, 0x1f7, 0x3, 0x2,
- 0x2, 0x2, 0x1f7, 0x1f9, 0x7, 0x7, 0x2, 0x2, 0x1f8,
- 0x1fa, 0x5, 0x1a, 0xe, 0x2, 0x1f9, 0x1f8, 0x3, 0x2,
- 0x2, 0x2, 0x1f9, 0x1fa, 0x3, 0x2, 0x2, 0x2, 0x1fa,
- 0x1fb, 0x3, 0x2, 0x2, 0x2, 0x1fb, 0x1fc, 0x7, 0xe,
- 0x2, 0x2, 0x1fc, 0x43, 0x3, 0x2, 0x2, 0x2, 0x1fd,
- 0x1ff, 0x5, 0x42, 0x22, 0x2, 0x1fe, 0x1fd, 0x3, 0x2,
- 0x2, 0x2, 0x1fe, 0x1ff, 0x3, 0x2, 0x2, 0x2, 0x1ff,
- 0x45, 0x3, 0x2, 0x2, 0x2, 0x200, 0x201, 0x7, 0x1f,
- 0x2, 0x2, 0x201, 0x202, 0x7, 0x3, 0x2, 0x2, 0x202,
- 0x203, 0x5, 0x50, 0x29, 0x2, 0x203, 0x204, 0x7, 0x12,
- 0x2, 0x2, 0x204, 0x205, 0x5, 0x1a, 0xe, 0x2, 0x205,
- 0x206, 0x5, 0x44, 0x23, 0x2, 0x206, 0x207, 0x7, 0x4,
- 0x2, 0x2, 0x207, 0x208, 0x5, 0x70, 0x39, 0x2, 0x208,
- 0x47, 0x3, 0x2, 0x2, 0x2, 0x209, 0x20a, 0x5, 0x1a,
- 0xe, 0x2, 0x20a, 0x49, 0x3, 0x2, 0x2, 0x2, 0x20b,
- 0x20d, 0x7, 0x3, 0x2, 0x2, 0x20c, 0x20e, 0x5, 0x48,
- 0x25, 0x2, 0x20d, 0x20c, 0x3, 0x2, 0x2, 0x2, 0x20d,
- 0x20e, 0x3, 0x2, 0x2, 0x2, 0x20e, 0x213, 0x3, 0x2,
- 0x2, 0x2, 0x20f, 0x210, 0x7, 0x6, 0x2, 0x2, 0x210,
- 0x212, 0x5, 0x48, 0x25, 0x2, 0x211, 0x20f, 0x3, 0x2,
- 0x2, 0x2, 0x212, 0x215, 0x3, 0x2, 0x2, 0x2, 0x213,
- 0x211, 0x3, 0x2, 0x2, 0x2, 0x213, 0x214, 0x3, 0x2,
- 0x2, 0x2, 0x214, 0x216, 0x3, 0x2, 0x2, 0x2, 0x215,
- 0x213, 0x3, 0x2, 0x2, 0x2, 0x216, 0x217, 0x7, 0x4,
- 0x2, 0x2, 0x217, 0x4b, 0x3, 0x2, 0x2, 0x2, 0x218,
- 0x21a, 0x9, 0xa, 0x2, 0x2, 0x219, 0x21b, 0x5, 0x6,
- 0x4, 0x2, 0x21a, 0x219, 0x3, 0x2, 0x2, 0x2, 0x21a,
- 0x21b, 0x3, 0x2, 0x2, 0x2, 0x21b, 0x21c, 0x3, 0x2,
- 0x2, 0x2, 0x21c, 0x21d, 0x5, 0x4a, 0x26, 0x2, 0x21d,
- 0x21e, 0x5, 0x12, 0xa, 0x2, 0x21e, 0x4d, 0x3, 0x2,
- 0x2, 0x2, 0x21f, 0x220, 0x7, 0x4f, 0x2, 0x2, 0x220,
- 0x4f, 0x3, 0x2, 0x2, 0x2, 0x221, 0x222, 0x9, 0xb,
- 0x2, 0x2, 0x222, 0x223, 0x7, 0x4f, 0x2, 0x2, 0x223,
- 0x224, 0x7, 0x7, 0x2, 0x2, 0x224, 0x225, 0x5, 0x2,
- 0x2, 0x2, 0x225, 0x51, 0x3, 0x2, 0x2, 0x2, 0x226,
- 0x229, 0x5, 0x50, 0x29, 0x2, 0x227, 0x228, 0x7, 0x34,
- 0x2, 0x2, 0x228, 0x22a, 0x5, 0x1a, 0xe, 0x2, 0x229,
- 0x227, 0x3, 0x2, 0x2, 0x2, 0x229, 0x22a, 0x3, 0x2,
- 0x2, 0x2, 0x22a, 0x53, 0x3, 0x2, 0x2, 0x2, 0x22b,
- 0x22d, 0x7, 0x2a, 0x2, 0x2, 0x22c, 0x22b, 0x3, 0x2,
- 0x2, 0x2, 0x22c, 0x22d, 0x3, 0x2, 0x2, 0x2, 0x22d,
- 0x22e, 0x3, 0x2, 0x2, 0x2, 0x22e, 0x22f, 0x5, 0x4c,
- 0x27, 0x2, 0x22f, 0x55, 0x3, 0x2, 0x2, 0x2, 0x230,
- 0x231, 0x5, 0x34, 0x1b, 0x2, 0x231, 0x57, 0x3, 0x2,
- 0x2, 0x2, 0x232, 0x234, 0x7, 0x1e, 0x2, 0x2, 0x233,
- 0x235, 0x7, 0x22, 0x2, 0x2, 0x234, 0x233, 0x3, 0x2,
- 0x2, 0x2, 0x234, 0x235, 0x3, 0x2, 0x2, 0x2, 0x235,
- 0x236, 0x3, 0x2, 0x2, 0x2, 0x236, 0x237, 0x7, 0x3,
- 0x2, 0x2, 0x237, 0x238, 0x5, 0x1a, 0xe, 0x2, 0x238,
- 0x239, 0x7, 0x4, 0x2, 0x2, 0x239, 0x23c, 0x5, 0x70,
- 0x39, 0x2, 0x23a, 0x23b, 0x7, 0x13, 0x2, 0x2, 0x23b,
- 0x23d, 0x5, 0x70, 0x39, 0x2, 0x23c, 0x23a, 0x3, 0x2,
- 0x2, 0x2, 0x23c, 0x23d, 0x3, 0x2, 0x2, 0x2, 0x23d,
- 0x59, 0x3, 0x2, 0x2, 0x2, 0x23e, 0x23f, 0x7, 0x20,
- 0x2, 0x2, 0x23f, 0x240, 0x7, 0x3, 0x2, 0x2, 0x240,
- 0x241, 0x5, 0x1a, 0xe, 0x2, 0x241, 0x242, 0x7, 0x4,
- 0x2, 0x2, 0x242, 0x243, 0x5, 0x70, 0x39, 0x2, 0x243,
- 0x5b, 0x3, 0x2, 0x2, 0x2, 0x244, 0x246, 0x7, 0x21,
- 0x2, 0x2, 0x245, 0x247, 0x5, 0x1a, 0xe, 0x2, 0x246,
- 0x245, 0x3, 0x2, 0x2, 0x2, 0x246, 0x247, 0x3, 0x2,
- 0x2, 0x2, 0x247, 0x5d, 0x3, 0x2, 0x2, 0x2, 0x248,
- 0x249, 0x7, 0x24, 0x2, 0x2, 0x249, 0x5f, 0x3, 0x2,
- 0x2, 0x2, 0x24a, 0x24b, 0x7, 0x23, 0x2, 0x2, 0x24b,
- 0x61, 0x3, 0x2, 0x2, 0x2, 0x24c, 0x24d, 0x7, 0x25,
- 0x2, 0x2, 0x24d, 0x24f, 0x5, 0x4e, 0x28, 0x2, 0x24e,
- 0x250, 0x5, 0x4a, 0x26, 0x2, 0x24f, 0x24e, 0x3, 0x2,
- 0x2, 0x2, 0x24f, 0x250, 0x3, 0x2, 0x2, 0x2, 0x250,
- 0x63, 0x3, 0x2, 0x2, 0x2, 0x251, 0x252, 0x7, 0x28,
- 0x2, 0x2, 0x252, 0x253, 0x5, 0x18, 0xd, 0x2, 0x253,
- 0x254, 0x5, 0x70, 0x39, 0x2, 0x254, 0x65, 0x3, 0x2,
- 0x2, 0x2, 0x255, 0x256, 0x7, 0x27, 0x2, 0x2, 0x256,
- 0x258, 0x5, 0x70, 0x39, 0x2, 0x257, 0x259, 0x5, 0x64,
- 0x33, 0x2, 0x258, 0x257, 0x3, 0x2, 0x2, 0x2, 0x259,
- 0x25a, 0x3, 0x2, 0x2, 0x2, 0x25a, 0x258, 0x3, 0x2,
- 0x2, 0x2, 0x25a, 0x25b, 0x3, 0x2, 0x2, 0x2, 0x25b,
- 0x67, 0x3, 0x2, 0x2, 0x2, 0x25c, 0x25d, 0x9, 0xc,
- 0x2, 0x2, 0x25d, 0x25e, 0x7, 0x3, 0x2, 0x2, 0x25e,
- 0x25f, 0x5, 0x1a, 0xe, 0x2, 0x25f, 0x260, 0x7, 0x4,
- 0x2, 0x2, 0x260, 0x264, 0x3, 0x2, 0x2, 0x2, 0x261,
- 0x264, 0x7, 0x32, 0x2, 0x2, 0x262, 0x264, 0x7, 0x33,
- 0x2, 0x2, 0x263, 0x25c, 0x3, 0x2, 0x2, 0x2, 0x263,
- 0x261, 0x3, 0x2, 0x2, 0x2, 0x263, 0x262, 0x3, 0x2,
- 0x2, 0x2, 0x264, 0x69, 0x3, 0x2, 0x2, 0x2, 0x265,
- 0x266, 0x5, 0x52, 0x2a, 0x2, 0x266, 0x267, 0x7, 0x11,
- 0x2, 0x2, 0x267, 0x283, 0x3, 0x2, 0x2, 0x2, 0x268,
- 0x269, 0x5, 0x54, 0x2b, 0x2, 0x269, 0x26a, 0x7, 0x11,
- 0x2, 0x2, 0x26a, 0x283, 0x3, 0x2, 0x2, 0x2, 0x26b,
- 0x26c, 0x5, 0x56, 0x2c, 0x2, 0x26c, 0x26d, 0x7, 0x11,
- 0x2, 0x2, 0x26d, 0x283, 0x3, 0x2, 0x2, 0x2, 0x26e,
- 0x26f, 0x5, 0x5c, 0x2f, 0x2, 0x26f, 0x270, 0x7, 0x11,
- 0x2, 0x2, 0x270, 0x283, 0x3, 0x2, 0x2, 0x2, 0x271,
- 0x272, 0x5, 0x5e, 0x30, 0x2, 0x272, 0x273, 0x7, 0x11,
- 0x2, 0x2, 0x273, 0x283, 0x3, 0x2, 0x2, 0x2, 0x274,
- 0x275, 0x5, 0x60, 0x31, 0x2, 0x275, 0x276, 0x7, 0x11,
- 0x2, 0x2, 0x276, 0x283, 0x3, 0x2, 0x2, 0x2, 0x277,
- 0x278, 0x5, 0x62, 0x32, 0x2, 0x278, 0x279, 0x7, 0x11,
- 0x2, 0x2, 0x279, 0x283, 0x3, 0x2, 0x2, 0x2, 0x27a,
- 0x283, 0x5, 0x58, 0x2d, 0x2, 0x27b, 0x27c, 0x5, 0x68,
- 0x35, 0x2, 0x27c, 0x27d, 0x7, 0x11, 0x2, 0x2, 0x27d,
- 0x283, 0x3, 0x2, 0x2, 0x2, 0x27e, 0x283, 0x5, 0x5a,
- 0x2e, 0x2, 0x27f, 0x283, 0x5, 0x46, 0x24, 0x2, 0x280,
- 0x283, 0x5, 0x40, 0x21, 0x2, 0x281, 0x283, 0x5, 0x66,
- 0x34, 0x2, 0x282, 0x265, 0x3, 0x2, 0x2, 0x2, 0x282,
- 0x268, 0x3, 0x2, 0x2, 0x2, 0x282, 0x26b, 0x3, 0x2,
- 0x2, 0x2, 0x282, 0x26e, 0x3, 0x2, 0x2, 0x2, 0x282,
- 0x271, 0x3, 0x2, 0x2, 0x2, 0x282, 0x274, 0x3, 0x2,
- 0x2, 0x2, 0x282, 0x277, 0x3, 0x2, 0x2, 0x2, 0x282,
- 0x27a, 0x3, 0x2, 0x2, 0x2, 0x282, 0x27b, 0x3, 0x2,
- 0x2, 0x2, 0x282, 0x27e, 0x3, 0x2, 0x2, 0x2, 0x282,
- 0x27f, 0x3, 0x2, 0x2, 0x2, 0x282, 0x280, 0x3, 0x2,
- 0x2, 0x2, 0x282, 0x281, 0x3, 0x2, 0x2, 0x2, 0x283,
- 0x6b, 0x3, 0x2, 0x2, 0x2, 0x284, 0x286, 0x5, 0x6a,
- 0x36, 0x2, 0x285, 0x284, 0x3, 0x2, 0x2, 0x2, 0x286,
- 0x289, 0x3, 0x2, 0x2, 0x2, 0x287, 0x285, 0x3, 0x2,
- 0x2, 0x2, 0x287, 0x288, 0x3, 0x2, 0x2, 0x2, 0x288,
- 0x6d, 0x3, 0x2, 0x2, 0x2, 0x289, 0x287, 0x3, 0x2,
- 0x2, 0x2, 0x28a, 0x28c, 0x7, 0x1d, 0x2, 0x2, 0x28b,
- 0x28a, 0x3, 0x2, 0x2, 0x2, 0x28b, 0x28c, 0x3, 0x2,
- 0x2, 0x2, 0x28c, 0x28d, 0x3, 0x2, 0x2, 0x2, 0x28d,
- 0x28e, 0x7, 0xf, 0x2, 0x2, 0x28e, 0x28f, 0x5, 0x6c,
- 0x37, 0x2, 0x28f, 0x290, 0x7, 0x10, 0x2, 0x2, 0x290,
- 0x6f, 0x3, 0x2, 0x2, 0x2, 0x291, 0x294, 0x5, 0x6a,
- 0x36, 0x2, 0x292, 0x294, 0x5, 0x6e, 0x38, 0x2, 0x293,
- 0x291, 0x3, 0x2, 0x2, 0x2, 0x293, 0x292, 0x3, 0x2,
- 0x2, 0x2, 0x294, 0x71, 0x3, 0x2, 0x2, 0x2, 0x295,
- 0x296, 0x5, 0x6e, 0x38, 0x2, 0x296, 0x73, 0x3, 0x2,
- 0x2, 0x2, 0x297, 0x298, 0x7, 0x4f, 0x2, 0x2, 0x298,
- 0x299, 0x7, 0x7, 0x2, 0x2, 0x299, 0x29a, 0x5, 0x2,
- 0x2, 0x2, 0x29a, 0x29b, 0x7, 0x11, 0x2, 0x2, 0x29b,
- 0x75, 0x3, 0x2, 0x2, 0x2, 0x29c, 0x29e, 0x5, 0x74,
- 0x3b, 0x2, 0x29d, 0x29c, 0x3, 0x2, 0x2, 0x2, 0x29e,
- 0x2a1, 0x3, 0x2, 0x2, 0x2, 0x29f, 0x29d, 0x3, 0x2,
- 0x2, 0x2, 0x29f, 0x2a0, 0x3, 0x2, 0x2, 0x2, 0x2a0,
- 0x77, 0x3, 0x2, 0x2, 0x2, 0x2a1, 0x29f, 0x3, 0x2,
- 0x2, 0x2, 0x2a2, 0x2a3, 0x7, 0x14, 0x2, 0x2, 0x2a3,
- 0x2a4, 0x7, 0x4f, 0x2, 0x2, 0x2a4, 0x79, 0x3, 0x2,
- 0x2, 0x2, 0x2a5, 0x2a6, 0x7, 0x15, 0x2, 0x2, 0x2a6,
- 0x2a7, 0x7, 0x4e, 0x2, 0x2, 0x2a7, 0x7b, 0x3, 0x2,
- 0x2, 0x2, 0x2a8, 0x2a9, 0x7, 0x22, 0x2, 0x2, 0x2a9,
- 0x2aa, 0x7, 0x4e, 0x2, 0x2, 0x2aa, 0x7d, 0x3, 0x2,
- 0x2, 0x2, 0x2ab, 0x2ac, 0x7, 0x8, 0x2, 0x2, 0x2ac,
- 0x2ae, 0x7, 0x4f, 0x2, 0x2, 0x2ad, 0x2af, 0x5, 0x78,
- 0x3d, 0x2, 0x2ae, 0x2ad, 0x3, 0x2, 0x2, 0x2, 0x2ae,
- 0x2af, 0x3, 0x2, 0x2, 0x2, 0x2af, 0x2b1, 0x3, 0x2,
- 0x2, 0x2, 0x2b0, 0x2b2, 0x5, 0x7a, 0x3e, 0x2, 0x2b1,
- 0x2b0, 0x3, 0x2, 0x2, 0x2, 0x2b1, 0x2b2, 0x3, 0x2,
- 0x2, 0x2, 0x2b2, 0x2b4, 0x3, 0x2, 0x2, 0x2, 0x2b3,
- 0x2b5, 0x5, 0x7c, 0x3f, 0x2, 0x2b4, 0x2b3, 0x3, 0x2,
- 0x2, 0x2, 0x2b4, 0x2b5, 0x3, 0x2, 0x2, 0x2, 0x2b5,
- 0x2b6, 0x3, 0x2, 0x2, 0x2, 0x2b6, 0x2b7, 0x7, 0x11,
- 0x2, 0x2, 0x2b7, 0x7f, 0x3, 0x2, 0x2, 0x2, 0x2b8,
- 0x2b9, 0x7, 0x8, 0x2, 0x2, 0x2b9, 0x2ba, 0x7, 0x4f,
- 0x2, 0x2, 0x2ba, 0x2bb, 0x7, 0x34, 0x2, 0x2, 0x2bb,
- 0x2bc, 0x5, 0x2, 0x2, 0x2, 0x2bc, 0x2bd, 0x7, 0x11,
- 0x2, 0x2, 0x2bd, 0x81, 0x3, 0x2, 0x2, 0x2, 0x2be,
- 0x2c0, 0x7, 0x2f, 0x2, 0x2, 0x2bf, 0x2c1, 0x7, 0x1c,
- 0x2, 0x2, 0x2c0, 0x2bf, 0x3, 0x2, 0x2, 0x2, 0x2c0,
- 0x2c1, 0x3, 0x2, 0x2, 0x2, 0x2c1, 0x2c2, 0x3, 0x2,
- 0x2, 0x2, 0x2c2, 0x2c3, 0x7, 0x19, 0x2, 0x2, 0x2c3,
- 0x2c4, 0x7, 0x4f, 0x2, 0x2, 0x2c4, 0x2c5, 0x5, 0x8,
- 0x5, 0x2, 0x2c5, 0x2c6, 0x7, 0x3, 0x2, 0x2, 0x2c6,
- 0x2c7, 0x5, 0x4, 0x3, 0x2, 0x2c7, 0x2c8, 0x7, 0x4,
- 0x2, 0x2, 0x2c8, 0x2c9, 0x5, 0xe, 0x8, 0x2, 0x2c9,
- 0x2ca, 0x7, 0x11, 0x2, 0x2, 0x2ca, 0x83, 0x3, 0x2,
- 0x2, 0x2, 0x2cb, 0x2ce, 0x7, 0x2f, 0x2, 0x2, 0x2cc,
- 0x2cd, 0x7, 0x16, 0x2, 0x2, 0x2cd, 0x2cf, 0x7, 0x4e,
- 0x2, 0x2, 0x2ce, 0x2cc, 0x3, 0x2, 0x2, 0x2, 0x2ce,
- 0x2cf, 0x3, 0x2, 0x2, 0x2, 0x2cf, 0x2d0, 0x3, 0x2,
- 0x2, 0x2, 0x2d0, 0x2d1, 0x7, 0x18, 0x2, 0x2, 0x2d1,
- 0x2d2, 0x7, 0x4f, 0x2, 0x2, 0x2d2, 0x2d3, 0x5, 0x8,
- 0x5, 0x2, 0x2d3, 0x2d4, 0x5, 0xa, 0x6, 0x2, 0x2d4,
- 0x2d5, 0x5, 0xe, 0x8, 0x2, 0x2d5, 0x2d6, 0x5, 0x10,
- 0x9, 0x2, 0x2d6, 0x2d7, 0x7, 0x11, 0x2, 0x2, 0x2d7,
- 0x85, 0x3, 0x2, 0x2, 0x2, 0x2d8, 0x2d9, 0x7, 0x2f,
- 0x2, 0x2, 0x2d9, 0x2da, 0x7, 0x1a, 0x2, 0x2, 0x2da,
- 0x2db, 0x7, 0x4f, 0x2, 0x2, 0x2db, 0x2dc, 0x5, 0xa,
- 0x6, 0x2, 0x2dc, 0x2dd, 0x5, 0xe, 0x8, 0x2, 0x2dd,
- 0x2de, 0x7, 0x11, 0x2, 0x2, 0x2de, 0x87, 0x3, 0x2,
- 0x2, 0x2, 0x2df, 0x2e1, 0x7, 0x1c, 0x2, 0x2, 0x2e0,
- 0x2df, 0x3, 0x2, 0x2, 0x2, 0x2e0, 0x2e1, 0x3, 0x2,
- 0x2, 0x2, 0x2e1, 0x2e2, 0x3, 0x2, 0x2, 0x2, 0x2e2,
- 0x2e3, 0x7, 0x19, 0x2, 0x2, 0x2e3, 0x2e4, 0x7, 0x4f,
- 0x2, 0x2, 0x2e4, 0x2e5, 0x5, 0x8, 0x5, 0x2, 0x2e5,
- 0x2e6, 0x5, 0x16, 0xc, 0x2, 0x2e6, 0x2e9, 0x5, 0xe,
- 0x8, 0x2, 0x2e7, 0x2ea, 0x5, 0x72, 0x3a, 0x2, 0x2e8,
- 0x2ea, 0x7, 0x11, 0x2, 0x2, 0x2e9, 0x2e7, 0x3, 0x2,
- 0x2, 0x2, 0x2e9, 0x2e8, 0x3, 0x2, 0x2, 0x2, 0x2ea,
- 0x89, 0x3, 0x2, 0x2, 0x2, 0x2eb, 0x2ec, 0x7, 0x4f,
- 0x2, 0x2, 0x2ec, 0x2ed, 0x5, 0x6, 0x4, 0x2, 0x2ed,
- 0x2ee, 0x5, 0x16, 0xc, 0x2, 0x2ee, 0x2ef, 0x5, 0xe,
- 0x8, 0x2, 0x2ef, 0x2f0, 0x5, 0x10, 0x9, 0x2, 0x2f0,
- 0x2f1, 0x5, 0x72, 0x3a, 0x2, 0x2f1, 0x8b, 0x3, 0x2,
- 0x2, 0x2, 0x2f2, 0x2f3, 0x7, 0x16, 0x2, 0x2, 0x2f3,
- 0x2f5, 0x7, 0x4e, 0x2, 0x2, 0x2f4, 0x2f2, 0x3, 0x2,
- 0x2, 0x2, 0x2f4, 0x2f5, 0x3, 0x2, 0x2, 0x2, 0x2f5,
- 0x2f6, 0x3, 0x2, 0x2, 0x2, 0x2f6, 0x2f7, 0x7, 0x18,
- 0x2, 0x2, 0x2f7, 0x2f8, 0x7, 0x4f, 0x2, 0x2, 0x2f8,
- 0x2f9, 0x5, 0x8, 0x5, 0x2, 0x2f9, 0x2fa, 0x5, 0x16,
- 0xc, 0x2, 0x2fa, 0x2fb, 0x5, 0xe, 0x8, 0x2, 0x2fb,
- 0x2fe, 0x5, 0x10, 0x9, 0x2, 0x2fc, 0x2ff, 0x5, 0x72,
- 0x3a, 0x2, 0x2fd, 0x2ff, 0x7, 0x11, 0x2, 0x2, 0x2fe,
- 0x2fc, 0x3, 0x2, 0x2, 0x2, 0x2fe, 0x2fd, 0x3, 0x2,
- 0x2, 0x2, 0x2ff, 0x8d, 0x3, 0x2, 0x2, 0x2, 0x300,
- 0x301, 0x7, 0x2e, 0x2, 0x2, 0x301, 0x302, 0x7, 0x4f,
- 0x2, 0x2, 0x302, 0x303, 0x7, 0x7, 0x2, 0x2, 0x303,
- 0x304, 0x5, 0x2, 0x2, 0x2, 0x304, 0x305, 0x5, 0x7a,
- 0x3e, 0x2, 0x305, 0x306, 0x7, 0x11, 0x2, 0x2, 0x306,
- 0x8f, 0x3, 0x2, 0x2, 0x2, 0x307, 0x308, 0x7, 0x2e,
- 0x2, 0x2, 0x308, 0x309, 0x7, 0x4f, 0x2, 0x2, 0x309,
- 0x30a, 0x7, 0x7, 0x2, 0x2, 0x30a, 0x30b, 0x5, 0x2,
- 0x2, 0x2, 0x30b, 0x30c, 0x7, 0x34, 0x2, 0x2, 0x30c,
- 0x30d, 0x5, 0x1a, 0xe, 0x2, 0x30d, 0x30e, 0x7, 0x11,
- 0x2, 0x2, 0x30e, 0x91, 0x3, 0x2, 0x2, 0x2, 0x30f,
- 0x310, 0x7, 0x17, 0x2, 0x2, 0x310, 0x311, 0x7, 0x4f,
- 0x2, 0x2, 0x311, 0x312, 0x7, 0xf, 0x2, 0x2, 0x312,
- 0x313, 0x5, 0x76, 0x3c, 0x2, 0x313, 0x314, 0x7, 0x10,
- 0x2, 0x2, 0x314, 0x93, 0x3, 0x2, 0x2, 0x2, 0x315,
- 0x321, 0x5, 0x92, 0x4a, 0x2, 0x316, 0x321, 0x5, 0x7e,
- 0x40, 0x2, 0x317, 0x321, 0x5, 0x80, 0x41, 0x2, 0x318,
- 0x321, 0x5, 0x88, 0x45, 0x2, 0x319, 0x321, 0x5, 0x8a,
- 0x46, 0x2, 0x31a, 0x321, 0x5, 0x8c, 0x47, 0x2, 0x31b,
- 0x321, 0x5, 0x84, 0x43, 0x2, 0x31c, 0x321, 0x5, 0x82,
- 0x42, 0x2, 0x31d, 0x321, 0x5, 0x86, 0x44, 0x2, 0x31e,
- 0x321, 0x5, 0x8e, 0x48, 0x2, 0x31f, 0x321, 0x5, 0x90,
- 0x49, 0x2, 0x320, 0x315, 0x3, 0x2, 0x2, 0x2, 0x320,
- 0x316, 0x3, 0x2, 0x2, 0x2, 0x320, 0x317, 0x3, 0x2,
- 0x2, 0x2, 0x320, 0x318, 0x3, 0x2, 0x2, 0x2, 0x320,
- 0x319, 0x3, 0x2, 0x2, 0x2, 0x320, 0x31a, 0x3, 0x2,
- 0x2, 0x2, 0x320, 0x31b, 0x3, 0x2, 0x2, 0x2, 0x320,
- 0x31c, 0x3, 0x2, 0x2, 0x2, 0x320, 0x31d, 0x3, 0x2,
- 0x2, 0x2, 0x320, 0x31e, 0x3, 0x2, 0x2, 0x2, 0x320,
- 0x31f, 0x3, 0x2, 0x2, 0x2, 0x321, 0x95, 0x3, 0x2,
- 0x2, 0x2, 0x322, 0x323, 0x7, 0x1b, 0x2, 0x2, 0x323,
- 0x324, 0x7, 0x4f, 0x2, 0x2, 0x324, 0x328, 0x7, 0xf,
- 0x2, 0x2, 0x325, 0x327, 0x5, 0x94, 0x4b, 0x2, 0x326,
- 0x325, 0x3, 0x2, 0x2, 0x2, 0x327, 0x32a, 0x3, 0x2,
- 0x2, 0x2, 0x328, 0x326, 0x3, 0x2, 0x2, 0x2, 0x328,
- 0x329, 0x3, 0x2, 0x2, 0x2, 0x329, 0x32b, 0x3, 0x2,
- 0x2, 0x2, 0x32a, 0x328, 0x3, 0x2, 0x2, 0x2, 0x32b,
- 0x32c, 0x7, 0x10, 0x2, 0x2, 0x32c, 0x97, 0x3, 0x2,
- 0x2, 0x2, 0x32d, 0x330, 0x5, 0x96, 0x4c, 0x2, 0x32e,
- 0x330, 0x5, 0x94, 0x4b, 0x2, 0x32f, 0x32d, 0x3, 0x2,
- 0x2, 0x2, 0x32f, 0x32e, 0x3, 0x2, 0x2, 0x2, 0x330,
- 0x333, 0x3, 0x2, 0x2, 0x2, 0x331, 0x32f, 0x3, 0x2,
- 0x2, 0x2, 0x331, 0x332, 0x3, 0x2, 0x2, 0x2, 0x332,
- 0x99, 0x3, 0x2, 0x2, 0x2, 0x333, 0x331, 0x3, 0x2,
- 0x2, 0x2, 0x4f, 0x9c, 0xaa, 0xb1, 0xb9, 0xbc, 0xcc,
- 0xd0, 0xd4, 0xda, 0xdf, 0xe5, 0xec, 0xf0, 0xf8, 0xfb,
- 0x103, 0x106, 0x10b, 0x10f, 0x115, 0x122, 0x126, 0x135, 0x140,
- 0x14b, 0x156, 0x161, 0x16c, 0x177, 0x182, 0x18d, 0x193, 0x1a0,
- 0x1aa, 0x1ac, 0x1b9, 0x1bf, 0x1c1, 0x1c5, 0x1ce, 0x1d1, 0x1d8,
- 0x1da, 0x1e4, 0x1e7, 0x1f5, 0x1f9, 0x1fe, 0x20d, 0x213, 0x21a,
- 0x229, 0x22c, 0x234, 0x23c, 0x246, 0x24f, 0x25a, 0x263, 0x282,
- 0x287, 0x28b, 0x293, 0x29f, 0x2ae, 0x2b1, 0x2b4, 0x2c0, 0x2ce,
- 0x2e0, 0x2e9, 0x2f4, 0x2fe, 0x320, 0x328, 0x32f, 0x331,
- };
-
- atn::ATNDeserializer deserializer;
- _atn = deserializer.deserialize(_serializedATN);
-
- size_t count = _atn.getNumberOfDecisions();
- _decisionToDFA.reserve(count);
- for (size_t i = 0; i < count; i++) {
- _decisionToDFA.emplace_back(_atn.getDecisionState(i), i);
- }
-}
-
-TorqueParser::Initializer TorqueParser::_init;
diff --git a/deps/v8/src/torque/TorqueParser.h b/deps/v8/src/torque/TorqueParser.h
deleted file mode 100644
index bca847835f..0000000000
--- a/deps/v8/src/torque/TorqueParser.h
+++ /dev/null
@@ -1,1635 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-#ifndef V8_TORQUE_TORQUEPARSER_H_
-#define V8_TORQUE_TORQUEPARSER_H_
-
-// Generated from Torque.g4 by ANTLR 4.7.1
-
-#pragma once
-
-#include "./antlr4-runtime.h"
-
-class TorqueParser : public antlr4::Parser {
- public:
- enum {
- T__0 = 1,
- T__1 = 2,
- T__2 = 3,
- T__3 = 4,
- T__4 = 5,
- T__5 = 6,
- T__6 = 7,
- T__7 = 8,
- T__8 = 9,
- T__9 = 10,
- T__10 = 11,
- T__11 = 12,
- T__12 = 13,
- T__13 = 14,
- T__14 = 15,
- T__15 = 16,
- T__16 = 17,
- T__17 = 18,
- T__18 = 19,
- T__19 = 20,
- T__20 = 21,
- MACRO = 22,
- BUILTIN = 23,
- RUNTIME = 24,
- MODULE = 25,
- JAVASCRIPT = 26,
- DEFERRED = 27,
- IF = 28,
- FOR = 29,
- WHILE = 30,
- RETURN = 31,
- CONSTEXPR = 32,
- CONTINUE = 33,
- BREAK = 34,
- GOTO = 35,
- OTHERWISE = 36,
- TRY = 37,
- LABEL = 38,
- LABELS = 39,
- TAIL = 40,
- ISNT = 41,
- IS = 42,
- LET = 43,
- CONST = 44,
- EXTERN = 45,
- ASSERT_TOKEN = 46,
- CHECK_TOKEN = 47,
- UNREACHABLE_TOKEN = 48,
- DEBUG_TOKEN = 49,
- ASSIGNMENT = 50,
- ASSIGNMENT_OPERATOR = 51,
- EQUAL = 52,
- PLUS = 53,
- MINUS = 54,
- MULTIPLY = 55,
- DIVIDE = 56,
- MODULO = 57,
- BIT_OR = 58,
- BIT_AND = 59,
- BIT_NOT = 60,
- MAX = 61,
- MIN = 62,
- NOT_EQUAL = 63,
- LESS_THAN = 64,
- LESS_THAN_EQUAL = 65,
- GREATER_THAN = 66,
- GREATER_THAN_EQUAL = 67,
- SHIFT_LEFT = 68,
- SHIFT_RIGHT = 69,
- SHIFT_RIGHT_ARITHMETIC = 70,
- VARARGS = 71,
- EQUALITY_OPERATOR = 72,
- INCREMENT = 73,
- DECREMENT = 74,
- NOT = 75,
- STRING_LITERAL = 76,
- IDENTIFIER = 77,
- WS = 78,
- BLOCK_COMMENT = 79,
- LINE_COMMENT = 80,
- DECIMAL_LITERAL = 81
- };
-
- enum {
- RuleType = 0,
- RuleTypeList = 1,
- RuleGenericSpecializationTypeList = 2,
- RuleOptionalGenericTypeList = 3,
- RuleTypeListMaybeVarArgs = 4,
- RuleLabelParameter = 5,
- RuleOptionalType = 6,
- RuleOptionalLabelList = 7,
- RuleOptionalOtherwise = 8,
- RuleParameter = 9,
- RuleParameterList = 10,
- RuleLabelDeclaration = 11,
- RuleExpression = 12,
- RuleConditionalExpression = 13,
- RuleLogicalORExpression = 14,
- RuleLogicalANDExpression = 15,
- RuleBitwiseExpression = 16,
- RuleEqualityExpression = 17,
- RuleRelationalExpression = 18,
- RuleShiftExpression = 19,
- RuleAdditiveExpression = 20,
- RuleMultiplicativeExpression = 21,
- RuleUnaryExpression = 22,
- RuleLocationExpression = 23,
- RuleIncrementDecrement = 24,
- RuleAssignment = 25,
- RuleAssignmentExpression = 26,
- RuleStructExpression = 27,
- RuleFunctionPointerExpression = 28,
- RulePrimaryExpression = 29,
- RuleForInitialization = 30,
- RuleForLoop = 31,
- RuleRangeSpecifier = 32,
- RuleForOfRange = 33,
- RuleForOfLoop = 34,
- RuleArgument = 35,
- RuleArgumentList = 36,
- RuleHelperCall = 37,
- RuleLabelReference = 38,
- RuleVariableDeclaration = 39,
- RuleVariableDeclarationWithInitialization = 40,
- RuleHelperCallStatement = 41,
- RuleExpressionStatement = 42,
- RuleIfStatement = 43,
- RuleWhileLoop = 44,
- RuleReturnStatement = 45,
- RuleBreakStatement = 46,
- RuleContinueStatement = 47,
- RuleGotoStatement = 48,
- RuleHandlerWithStatement = 49,
- RuleTryLabelStatement = 50,
- RuleDiagnosticStatement = 51,
- RuleStatement = 52,
- RuleStatementList = 53,
- RuleStatementScope = 54,
- RuleStatementBlock = 55,
- RuleHelperBody = 56,
- RuleFieldDeclaration = 57,
- RuleFieldListDeclaration = 58,
- RuleExtendsDeclaration = 59,
- RuleGeneratesDeclaration = 60,
- RuleConstexprDeclaration = 61,
- RuleTypeDeclaration = 62,
- RuleTypeAliasDeclaration = 63,
- RuleExternalBuiltin = 64,
- RuleExternalMacro = 65,
- RuleExternalRuntime = 66,
- RuleBuiltinDeclaration = 67,
- RuleGenericSpecialization = 68,
- RuleMacroDeclaration = 69,
- RuleExternConstDeclaration = 70,
- RuleConstDeclaration = 71,
- RuleStructDeclaration = 72,
- RuleDeclaration = 73,
- RuleModuleDeclaration = 74,
- RuleFile = 75
- };
-
- explicit TorqueParser(antlr4::TokenStream* input);
- ~TorqueParser();
-
- std::string getGrammarFileName() const override;
- const antlr4::atn::ATN& getATN() const override { return _atn; };
- const std::vector<std::string>& getTokenNames() const override {
- return _tokenNames;
- }; // deprecated: use vocabulary instead.
- const std::vector<std::string>& getRuleNames() const override;
- antlr4::dfa::Vocabulary& getVocabulary() const override;
-
- class TypeContext;
- class TypeListContext;
- class GenericSpecializationTypeListContext;
- class OptionalGenericTypeListContext;
- class TypeListMaybeVarArgsContext;
- class LabelParameterContext;
- class OptionalTypeContext;
- class OptionalLabelListContext;
- class OptionalOtherwiseContext;
- class ParameterContext;
- class ParameterListContext;
- class LabelDeclarationContext;
- class ExpressionContext;
- class ConditionalExpressionContext;
- class LogicalORExpressionContext;
- class LogicalANDExpressionContext;
- class BitwiseExpressionContext;
- class EqualityExpressionContext;
- class RelationalExpressionContext;
- class ShiftExpressionContext;
- class AdditiveExpressionContext;
- class MultiplicativeExpressionContext;
- class UnaryExpressionContext;
- class LocationExpressionContext;
- class IncrementDecrementContext;
- class AssignmentContext;
- class AssignmentExpressionContext;
- class StructExpressionContext;
- class FunctionPointerExpressionContext;
- class PrimaryExpressionContext;
- class ForInitializationContext;
- class ForLoopContext;
- class RangeSpecifierContext;
- class ForOfRangeContext;
- class ForOfLoopContext;
- class ArgumentContext;
- class ArgumentListContext;
- class HelperCallContext;
- class LabelReferenceContext;
- class VariableDeclarationContext;
- class VariableDeclarationWithInitializationContext;
- class HelperCallStatementContext;
- class ExpressionStatementContext;
- class IfStatementContext;
- class WhileLoopContext;
- class ReturnStatementContext;
- class BreakStatementContext;
- class ContinueStatementContext;
- class GotoStatementContext;
- class HandlerWithStatementContext;
- class TryLabelStatementContext;
- class DiagnosticStatementContext;
- class StatementContext;
- class StatementListContext;
- class StatementScopeContext;
- class StatementBlockContext;
- class HelperBodyContext;
- class FieldDeclarationContext;
- class FieldListDeclarationContext;
- class ExtendsDeclarationContext;
- class GeneratesDeclarationContext;
- class ConstexprDeclarationContext;
- class TypeDeclarationContext;
- class TypeAliasDeclarationContext;
- class ExternalBuiltinContext;
- class ExternalMacroContext;
- class ExternalRuntimeContext;
- class BuiltinDeclarationContext;
- class GenericSpecializationContext;
- class MacroDeclarationContext;
- class ExternConstDeclarationContext;
- class ConstDeclarationContext;
- class StructDeclarationContext;
- class DeclarationContext;
- class ModuleDeclarationContext;
- class FileContext;
-
- class TypeContext : public antlr4::ParserRuleContext {
- public:
- TypeContext(antlr4::ParserRuleContext* parent, size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* IDENTIFIER();
- antlr4::tree::TerminalNode* CONSTEXPR();
- antlr4::tree::TerminalNode* BUILTIN();
- TypeListContext* typeList();
- std::vector<TypeContext*> type();
- TypeContext* type(size_t i);
- antlr4::tree::TerminalNode* BIT_OR();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- TypeContext* type();
- TypeContext* type(int precedence);
- class TypeListContext : public antlr4::ParserRuleContext {
- public:
- TypeListContext(antlr4::ParserRuleContext* parent, size_t invokingState);
- size_t getRuleIndex() const override;
- std::vector<TypeContext*> type();
- TypeContext* type(size_t i);
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- TypeListContext* typeList();
-
- class GenericSpecializationTypeListContext
- : public antlr4::ParserRuleContext {
- public:
- GenericSpecializationTypeListContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- TypeListContext* typeList();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- GenericSpecializationTypeListContext* genericSpecializationTypeList();
-
- class OptionalGenericTypeListContext : public antlr4::ParserRuleContext {
- public:
- OptionalGenericTypeListContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- std::vector<antlr4::tree::TerminalNode*> IDENTIFIER();
- antlr4::tree::TerminalNode* IDENTIFIER(size_t i);
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- OptionalGenericTypeListContext* optionalGenericTypeList();
-
- class TypeListMaybeVarArgsContext : public antlr4::ParserRuleContext {
- public:
- TypeListMaybeVarArgsContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- std::vector<TypeContext*> type();
- TypeContext* type(size_t i);
- antlr4::tree::TerminalNode* VARARGS();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- TypeListMaybeVarArgsContext* typeListMaybeVarArgs();
-
- class LabelParameterContext : public antlr4::ParserRuleContext {
- public:
- LabelParameterContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* IDENTIFIER();
- TypeListContext* typeList();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- LabelParameterContext* labelParameter();
-
- class OptionalTypeContext : public antlr4::ParserRuleContext {
- public:
- OptionalTypeContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- TypeContext* type();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- OptionalTypeContext* optionalType();
-
- class OptionalLabelListContext : public antlr4::ParserRuleContext {
- public:
- OptionalLabelListContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* LABELS();
- std::vector<LabelParameterContext*> labelParameter();
- LabelParameterContext* labelParameter(size_t i);
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- OptionalLabelListContext* optionalLabelList();
-
- class OptionalOtherwiseContext : public antlr4::ParserRuleContext {
- public:
- OptionalOtherwiseContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* OTHERWISE();
- std::vector<antlr4::tree::TerminalNode*> IDENTIFIER();
- antlr4::tree::TerminalNode* IDENTIFIER(size_t i);
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- OptionalOtherwiseContext* optionalOtherwise();
-
- class ParameterContext : public antlr4::ParserRuleContext {
- public:
- ParameterContext(antlr4::ParserRuleContext* parent, size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* IDENTIFIER();
- TypeContext* type();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ParameterContext* parameter();
-
- class ParameterListContext : public antlr4::ParserRuleContext {
- public:
- ParameterListContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- std::vector<ParameterContext*> parameter();
- ParameterContext* parameter(size_t i);
- antlr4::tree::TerminalNode* VARARGS();
- antlr4::tree::TerminalNode* IDENTIFIER();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ParameterListContext* parameterList();
-
- class LabelDeclarationContext : public antlr4::ParserRuleContext {
- public:
- LabelDeclarationContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* IDENTIFIER();
- ParameterListContext* parameterList();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- LabelDeclarationContext* labelDeclaration();
-
- class ExpressionContext : public antlr4::ParserRuleContext {
- public:
- ExpressionContext(antlr4::ParserRuleContext* parent, size_t invokingState);
- size_t getRuleIndex() const override;
- ConditionalExpressionContext* conditionalExpression();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ExpressionContext* expression();
-
- class ConditionalExpressionContext : public antlr4::ParserRuleContext {
- public:
- ConditionalExpressionContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- std::vector<LogicalORExpressionContext*> logicalORExpression();
- LogicalORExpressionContext* logicalORExpression(size_t i);
- ConditionalExpressionContext* conditionalExpression();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ConditionalExpressionContext* conditionalExpression();
- ConditionalExpressionContext* conditionalExpression(int precedence);
- class LogicalORExpressionContext : public antlr4::ParserRuleContext {
- public:
- LogicalORExpressionContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- LogicalANDExpressionContext* logicalANDExpression();
- LogicalORExpressionContext* logicalORExpression();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- LogicalORExpressionContext* logicalORExpression();
- LogicalORExpressionContext* logicalORExpression(int precedence);
- class LogicalANDExpressionContext : public antlr4::ParserRuleContext {
- public:
- LogicalANDExpressionContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- BitwiseExpressionContext* bitwiseExpression();
- LogicalANDExpressionContext* logicalANDExpression();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- LogicalANDExpressionContext* logicalANDExpression();
- LogicalANDExpressionContext* logicalANDExpression(int precedence);
- class BitwiseExpressionContext : public antlr4::ParserRuleContext {
- public:
- antlr4::Token* op = nullptr;
- BitwiseExpressionContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- EqualityExpressionContext* equalityExpression();
- BitwiseExpressionContext* bitwiseExpression();
- antlr4::tree::TerminalNode* BIT_AND();
- antlr4::tree::TerminalNode* BIT_OR();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- BitwiseExpressionContext* bitwiseExpression();
- BitwiseExpressionContext* bitwiseExpression(int precedence);
- class EqualityExpressionContext : public antlr4::ParserRuleContext {
- public:
- antlr4::Token* op = nullptr;
- EqualityExpressionContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- RelationalExpressionContext* relationalExpression();
- EqualityExpressionContext* equalityExpression();
- antlr4::tree::TerminalNode* EQUAL();
- antlr4::tree::TerminalNode* NOT_EQUAL();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- EqualityExpressionContext* equalityExpression();
- EqualityExpressionContext* equalityExpression(int precedence);
- class RelationalExpressionContext : public antlr4::ParserRuleContext {
- public:
- antlr4::Token* op = nullptr;
- RelationalExpressionContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- ShiftExpressionContext* shiftExpression();
- RelationalExpressionContext* relationalExpression();
- antlr4::tree::TerminalNode* LESS_THAN();
- antlr4::tree::TerminalNode* LESS_THAN_EQUAL();
- antlr4::tree::TerminalNode* GREATER_THAN();
- antlr4::tree::TerminalNode* GREATER_THAN_EQUAL();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- RelationalExpressionContext* relationalExpression();
- RelationalExpressionContext* relationalExpression(int precedence);
- class ShiftExpressionContext : public antlr4::ParserRuleContext {
- public:
- antlr4::Token* op = nullptr;
- ShiftExpressionContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- AdditiveExpressionContext* additiveExpression();
- ShiftExpressionContext* shiftExpression();
- antlr4::tree::TerminalNode* SHIFT_RIGHT();
- antlr4::tree::TerminalNode* SHIFT_LEFT();
- antlr4::tree::TerminalNode* SHIFT_RIGHT_ARITHMETIC();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ShiftExpressionContext* shiftExpression();
- ShiftExpressionContext* shiftExpression(int precedence);
- class AdditiveExpressionContext : public antlr4::ParserRuleContext {
- public:
- antlr4::Token* op = nullptr;
- AdditiveExpressionContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- MultiplicativeExpressionContext* multiplicativeExpression();
- AdditiveExpressionContext* additiveExpression();
- antlr4::tree::TerminalNode* PLUS();
- antlr4::tree::TerminalNode* MINUS();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- AdditiveExpressionContext* additiveExpression();
- AdditiveExpressionContext* additiveExpression(int precedence);
- class MultiplicativeExpressionContext : public antlr4::ParserRuleContext {
- public:
- antlr4::Token* op = nullptr;
- MultiplicativeExpressionContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- UnaryExpressionContext* unaryExpression();
- MultiplicativeExpressionContext* multiplicativeExpression();
- antlr4::tree::TerminalNode* MULTIPLY();
- antlr4::tree::TerminalNode* DIVIDE();
- antlr4::tree::TerminalNode* MODULO();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- MultiplicativeExpressionContext* multiplicativeExpression();
- MultiplicativeExpressionContext* multiplicativeExpression(int precedence);
- class UnaryExpressionContext : public antlr4::ParserRuleContext {
- public:
- antlr4::Token* op = nullptr;
- UnaryExpressionContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- AssignmentExpressionContext* assignmentExpression();
- UnaryExpressionContext* unaryExpression();
- antlr4::tree::TerminalNode* PLUS();
- antlr4::tree::TerminalNode* MINUS();
- antlr4::tree::TerminalNode* BIT_NOT();
- antlr4::tree::TerminalNode* NOT();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- UnaryExpressionContext* unaryExpression();
-
- class LocationExpressionContext : public antlr4::ParserRuleContext {
- public:
- LocationExpressionContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* IDENTIFIER();
- PrimaryExpressionContext* primaryExpression();
- ExpressionContext* expression();
- LocationExpressionContext* locationExpression();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- LocationExpressionContext* locationExpression();
- LocationExpressionContext* locationExpression(int precedence);
- class IncrementDecrementContext : public antlr4::ParserRuleContext {
- public:
- antlr4::Token* op = nullptr;
- IncrementDecrementContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* INCREMENT();
- LocationExpressionContext* locationExpression();
- antlr4::tree::TerminalNode* DECREMENT();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- IncrementDecrementContext* incrementDecrement();
-
- class AssignmentContext : public antlr4::ParserRuleContext {
- public:
- AssignmentContext(antlr4::ParserRuleContext* parent, size_t invokingState);
- size_t getRuleIndex() const override;
- IncrementDecrementContext* incrementDecrement();
- LocationExpressionContext* locationExpression();
- ExpressionContext* expression();
- antlr4::tree::TerminalNode* ASSIGNMENT();
- antlr4::tree::TerminalNode* ASSIGNMENT_OPERATOR();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- AssignmentContext* assignment();
-
- class AssignmentExpressionContext : public antlr4::ParserRuleContext {
- public:
- AssignmentExpressionContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- FunctionPointerExpressionContext* functionPointerExpression();
- AssignmentContext* assignment();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- AssignmentExpressionContext* assignmentExpression();
-
- class StructExpressionContext : public antlr4::ParserRuleContext {
- public:
- StructExpressionContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* IDENTIFIER();
- std::vector<ExpressionContext*> expression();
- ExpressionContext* expression(size_t i);
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- StructExpressionContext* structExpression();
-
- class FunctionPointerExpressionContext : public antlr4::ParserRuleContext {
- public:
- FunctionPointerExpressionContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- PrimaryExpressionContext* primaryExpression();
- antlr4::tree::TerminalNode* IDENTIFIER();
- GenericSpecializationTypeListContext* genericSpecializationTypeList();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- FunctionPointerExpressionContext* functionPointerExpression();
-
- class PrimaryExpressionContext : public antlr4::ParserRuleContext {
- public:
- PrimaryExpressionContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- HelperCallContext* helperCall();
- StructExpressionContext* structExpression();
- antlr4::tree::TerminalNode* DECIMAL_LITERAL();
- antlr4::tree::TerminalNode* STRING_LITERAL();
- ExpressionContext* expression();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- PrimaryExpressionContext* primaryExpression();
-
- class ForInitializationContext : public antlr4::ParserRuleContext {
- public:
- ForInitializationContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- VariableDeclarationWithInitializationContext*
- variableDeclarationWithInitialization();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ForInitializationContext* forInitialization();
-
- class ForLoopContext : public antlr4::ParserRuleContext {
- public:
- ForLoopContext(antlr4::ParserRuleContext* parent, size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* FOR();
- ForInitializationContext* forInitialization();
- ExpressionContext* expression();
- AssignmentContext* assignment();
- StatementBlockContext* statementBlock();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ForLoopContext* forLoop();
-
- class RangeSpecifierContext : public antlr4::ParserRuleContext {
- public:
- TorqueParser::ExpressionContext* begin = nullptr;
- TorqueParser::ExpressionContext* end = nullptr;
- RangeSpecifierContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- std::vector<ExpressionContext*> expression();
- ExpressionContext* expression(size_t i);
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- RangeSpecifierContext* rangeSpecifier();
-
- class ForOfRangeContext : public antlr4::ParserRuleContext {
- public:
- ForOfRangeContext(antlr4::ParserRuleContext* parent, size_t invokingState);
- size_t getRuleIndex() const override;
- RangeSpecifierContext* rangeSpecifier();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ForOfRangeContext* forOfRange();
-
- class ForOfLoopContext : public antlr4::ParserRuleContext {
- public:
- ForOfLoopContext(antlr4::ParserRuleContext* parent, size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* FOR();
- VariableDeclarationContext* variableDeclaration();
- ExpressionContext* expression();
- ForOfRangeContext* forOfRange();
- StatementBlockContext* statementBlock();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ForOfLoopContext* forOfLoop();
-
- class ArgumentContext : public antlr4::ParserRuleContext {
- public:
- ArgumentContext(antlr4::ParserRuleContext* parent, size_t invokingState);
- size_t getRuleIndex() const override;
- ExpressionContext* expression();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ArgumentContext* argument();
-
- class ArgumentListContext : public antlr4::ParserRuleContext {
- public:
- ArgumentListContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- std::vector<ArgumentContext*> argument();
- ArgumentContext* argument(size_t i);
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ArgumentListContext* argumentList();
-
- class HelperCallContext : public antlr4::ParserRuleContext {
- public:
- HelperCallContext(antlr4::ParserRuleContext* parent, size_t invokingState);
- size_t getRuleIndex() const override;
- ArgumentListContext* argumentList();
- OptionalOtherwiseContext* optionalOtherwise();
- antlr4::tree::TerminalNode* MIN();
- antlr4::tree::TerminalNode* MAX();
- antlr4::tree::TerminalNode* IDENTIFIER();
- GenericSpecializationTypeListContext* genericSpecializationTypeList();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- HelperCallContext* helperCall();
-
- class LabelReferenceContext : public antlr4::ParserRuleContext {
- public:
- LabelReferenceContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* IDENTIFIER();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- LabelReferenceContext* labelReference();
-
- class VariableDeclarationContext : public antlr4::ParserRuleContext {
- public:
- VariableDeclarationContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* IDENTIFIER();
- TypeContext* type();
- antlr4::tree::TerminalNode* LET();
- antlr4::tree::TerminalNode* CONST();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- VariableDeclarationContext* variableDeclaration();
-
- class VariableDeclarationWithInitializationContext
- : public antlr4::ParserRuleContext {
- public:
- VariableDeclarationWithInitializationContext(
- antlr4::ParserRuleContext* parent, size_t invokingState);
- size_t getRuleIndex() const override;
- VariableDeclarationContext* variableDeclaration();
- antlr4::tree::TerminalNode* ASSIGNMENT();
- ExpressionContext* expression();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- VariableDeclarationWithInitializationContext*
- variableDeclarationWithInitialization();
-
- class HelperCallStatementContext : public antlr4::ParserRuleContext {
- public:
- HelperCallStatementContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- HelperCallContext* helperCall();
- antlr4::tree::TerminalNode* TAIL();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- HelperCallStatementContext* helperCallStatement();
-
- class ExpressionStatementContext : public antlr4::ParserRuleContext {
- public:
- ExpressionStatementContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- AssignmentContext* assignment();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ExpressionStatementContext* expressionStatement();
-
- class IfStatementContext : public antlr4::ParserRuleContext {
- public:
- IfStatementContext(antlr4::ParserRuleContext* parent, size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* IF();
- ExpressionContext* expression();
- std::vector<StatementBlockContext*> statementBlock();
- StatementBlockContext* statementBlock(size_t i);
- antlr4::tree::TerminalNode* CONSTEXPR();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- IfStatementContext* ifStatement();
-
- class WhileLoopContext : public antlr4::ParserRuleContext {
- public:
- WhileLoopContext(antlr4::ParserRuleContext* parent, size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* WHILE();
- ExpressionContext* expression();
- StatementBlockContext* statementBlock();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- WhileLoopContext* whileLoop();
-
- class ReturnStatementContext : public antlr4::ParserRuleContext {
- public:
- ReturnStatementContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* RETURN();
- ExpressionContext* expression();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ReturnStatementContext* returnStatement();
-
- class BreakStatementContext : public antlr4::ParserRuleContext {
- public:
- BreakStatementContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* BREAK();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- BreakStatementContext* breakStatement();
-
- class ContinueStatementContext : public antlr4::ParserRuleContext {
- public:
- ContinueStatementContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* CONTINUE();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ContinueStatementContext* continueStatement();
-
- class GotoStatementContext : public antlr4::ParserRuleContext {
- public:
- GotoStatementContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* GOTO();
- LabelReferenceContext* labelReference();
- ArgumentListContext* argumentList();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- GotoStatementContext* gotoStatement();
-
- class HandlerWithStatementContext : public antlr4::ParserRuleContext {
- public:
- HandlerWithStatementContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* LABEL();
- LabelDeclarationContext* labelDeclaration();
- StatementBlockContext* statementBlock();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- HandlerWithStatementContext* handlerWithStatement();
-
- class TryLabelStatementContext : public antlr4::ParserRuleContext {
- public:
- TryLabelStatementContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* TRY();
- StatementBlockContext* statementBlock();
- std::vector<HandlerWithStatementContext*> handlerWithStatement();
- HandlerWithStatementContext* handlerWithStatement(size_t i);
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- TryLabelStatementContext* tryLabelStatement();
-
- class DiagnosticStatementContext : public antlr4::ParserRuleContext {
- public:
- DiagnosticStatementContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- ExpressionContext* expression();
- antlr4::tree::TerminalNode* ASSERT_TOKEN();
- antlr4::tree::TerminalNode* CHECK_TOKEN();
- antlr4::tree::TerminalNode* UNREACHABLE_TOKEN();
- antlr4::tree::TerminalNode* DEBUG_TOKEN();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- DiagnosticStatementContext* diagnosticStatement();
-
- class StatementContext : public antlr4::ParserRuleContext {
- public:
- StatementContext(antlr4::ParserRuleContext* parent, size_t invokingState);
- size_t getRuleIndex() const override;
- VariableDeclarationWithInitializationContext*
- variableDeclarationWithInitialization();
- HelperCallStatementContext* helperCallStatement();
- ExpressionStatementContext* expressionStatement();
- ReturnStatementContext* returnStatement();
- BreakStatementContext* breakStatement();
- ContinueStatementContext* continueStatement();
- GotoStatementContext* gotoStatement();
- IfStatementContext* ifStatement();
- DiagnosticStatementContext* diagnosticStatement();
- WhileLoopContext* whileLoop();
- ForOfLoopContext* forOfLoop();
- ForLoopContext* forLoop();
- TryLabelStatementContext* tryLabelStatement();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- StatementContext* statement();
-
- class StatementListContext : public antlr4::ParserRuleContext {
- public:
- StatementListContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- std::vector<StatementContext*> statement();
- StatementContext* statement(size_t i);
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- StatementListContext* statementList();
-
- class StatementScopeContext : public antlr4::ParserRuleContext {
- public:
- StatementScopeContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- StatementListContext* statementList();
- antlr4::tree::TerminalNode* DEFERRED();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- StatementScopeContext* statementScope();
-
- class StatementBlockContext : public antlr4::ParserRuleContext {
- public:
- StatementBlockContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- StatementContext* statement();
- StatementScopeContext* statementScope();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- StatementBlockContext* statementBlock();
-
- class HelperBodyContext : public antlr4::ParserRuleContext {
- public:
- HelperBodyContext(antlr4::ParserRuleContext* parent, size_t invokingState);
- size_t getRuleIndex() const override;
- StatementScopeContext* statementScope();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- HelperBodyContext* helperBody();
-
- class FieldDeclarationContext : public antlr4::ParserRuleContext {
- public:
- FieldDeclarationContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* IDENTIFIER();
- TypeContext* type();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- FieldDeclarationContext* fieldDeclaration();
-
- class FieldListDeclarationContext : public antlr4::ParserRuleContext {
- public:
- FieldListDeclarationContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- std::vector<FieldDeclarationContext*> fieldDeclaration();
- FieldDeclarationContext* fieldDeclaration(size_t i);
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- FieldListDeclarationContext* fieldListDeclaration();
-
- class ExtendsDeclarationContext : public antlr4::ParserRuleContext {
- public:
- ExtendsDeclarationContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* IDENTIFIER();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ExtendsDeclarationContext* extendsDeclaration();
-
- class GeneratesDeclarationContext : public antlr4::ParserRuleContext {
- public:
- GeneratesDeclarationContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* STRING_LITERAL();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- GeneratesDeclarationContext* generatesDeclaration();
-
- class ConstexprDeclarationContext : public antlr4::ParserRuleContext {
- public:
- ConstexprDeclarationContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* STRING_LITERAL();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ConstexprDeclarationContext* constexprDeclaration();
-
- class TypeDeclarationContext : public antlr4::ParserRuleContext {
- public:
- TypeDeclarationContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* IDENTIFIER();
- ExtendsDeclarationContext* extendsDeclaration();
- GeneratesDeclarationContext* generatesDeclaration();
- ConstexprDeclarationContext* constexprDeclaration();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- TypeDeclarationContext* typeDeclaration();
-
- class TypeAliasDeclarationContext : public antlr4::ParserRuleContext {
- public:
- TypeAliasDeclarationContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* IDENTIFIER();
- TypeContext* type();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- TypeAliasDeclarationContext* typeAliasDeclaration();
-
- class ExternalBuiltinContext : public antlr4::ParserRuleContext {
- public:
- ExternalBuiltinContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* EXTERN();
- antlr4::tree::TerminalNode* BUILTIN();
- antlr4::tree::TerminalNode* IDENTIFIER();
- OptionalGenericTypeListContext* optionalGenericTypeList();
- TypeListContext* typeList();
- OptionalTypeContext* optionalType();
- antlr4::tree::TerminalNode* JAVASCRIPT();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ExternalBuiltinContext* externalBuiltin();
-
- class ExternalMacroContext : public antlr4::ParserRuleContext {
- public:
- ExternalMacroContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* EXTERN();
- antlr4::tree::TerminalNode* MACRO();
- antlr4::tree::TerminalNode* IDENTIFIER();
- OptionalGenericTypeListContext* optionalGenericTypeList();
- TypeListMaybeVarArgsContext* typeListMaybeVarArgs();
- OptionalTypeContext* optionalType();
- OptionalLabelListContext* optionalLabelList();
- antlr4::tree::TerminalNode* STRING_LITERAL();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ExternalMacroContext* externalMacro();
-
- class ExternalRuntimeContext : public antlr4::ParserRuleContext {
- public:
- ExternalRuntimeContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* EXTERN();
- antlr4::tree::TerminalNode* RUNTIME();
- antlr4::tree::TerminalNode* IDENTIFIER();
- TypeListMaybeVarArgsContext* typeListMaybeVarArgs();
- OptionalTypeContext* optionalType();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ExternalRuntimeContext* externalRuntime();
-
- class BuiltinDeclarationContext : public antlr4::ParserRuleContext {
- public:
- BuiltinDeclarationContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* BUILTIN();
- antlr4::tree::TerminalNode* IDENTIFIER();
- OptionalGenericTypeListContext* optionalGenericTypeList();
- ParameterListContext* parameterList();
- OptionalTypeContext* optionalType();
- HelperBodyContext* helperBody();
- antlr4::tree::TerminalNode* JAVASCRIPT();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- BuiltinDeclarationContext* builtinDeclaration();
-
- class GenericSpecializationContext : public antlr4::ParserRuleContext {
- public:
- GenericSpecializationContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* IDENTIFIER();
- GenericSpecializationTypeListContext* genericSpecializationTypeList();
- ParameterListContext* parameterList();
- OptionalTypeContext* optionalType();
- OptionalLabelListContext* optionalLabelList();
- HelperBodyContext* helperBody();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- GenericSpecializationContext* genericSpecialization();
-
- class MacroDeclarationContext : public antlr4::ParserRuleContext {
- public:
- MacroDeclarationContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* MACRO();
- antlr4::tree::TerminalNode* IDENTIFIER();
- OptionalGenericTypeListContext* optionalGenericTypeList();
- ParameterListContext* parameterList();
- OptionalTypeContext* optionalType();
- OptionalLabelListContext* optionalLabelList();
- HelperBodyContext* helperBody();
- antlr4::tree::TerminalNode* STRING_LITERAL();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- MacroDeclarationContext* macroDeclaration();
-
- class ExternConstDeclarationContext : public antlr4::ParserRuleContext {
- public:
- ExternConstDeclarationContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* CONST();
- antlr4::tree::TerminalNode* IDENTIFIER();
- TypeContext* type();
- GeneratesDeclarationContext* generatesDeclaration();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ExternConstDeclarationContext* externConstDeclaration();
-
- class ConstDeclarationContext : public antlr4::ParserRuleContext {
- public:
- ConstDeclarationContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* CONST();
- antlr4::tree::TerminalNode* IDENTIFIER();
- TypeContext* type();
- antlr4::tree::TerminalNode* ASSIGNMENT();
- ExpressionContext* expression();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ConstDeclarationContext* constDeclaration();
-
- class StructDeclarationContext : public antlr4::ParserRuleContext {
- public:
- StructDeclarationContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* IDENTIFIER();
- FieldListDeclarationContext* fieldListDeclaration();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- StructDeclarationContext* structDeclaration();
-
- class DeclarationContext : public antlr4::ParserRuleContext {
- public:
- DeclarationContext(antlr4::ParserRuleContext* parent, size_t invokingState);
- size_t getRuleIndex() const override;
- StructDeclarationContext* structDeclaration();
- TypeDeclarationContext* typeDeclaration();
- TypeAliasDeclarationContext* typeAliasDeclaration();
- BuiltinDeclarationContext* builtinDeclaration();
- GenericSpecializationContext* genericSpecialization();
- MacroDeclarationContext* macroDeclaration();
- ExternalMacroContext* externalMacro();
- ExternalBuiltinContext* externalBuiltin();
- ExternalRuntimeContext* externalRuntime();
- ExternConstDeclarationContext* externConstDeclaration();
- ConstDeclarationContext* constDeclaration();
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- DeclarationContext* declaration();
-
- class ModuleDeclarationContext : public antlr4::ParserRuleContext {
- public:
- ModuleDeclarationContext(antlr4::ParserRuleContext* parent,
- size_t invokingState);
- size_t getRuleIndex() const override;
- antlr4::tree::TerminalNode* MODULE();
- antlr4::tree::TerminalNode* IDENTIFIER();
- std::vector<DeclarationContext*> declaration();
- DeclarationContext* declaration(size_t i);
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- ModuleDeclarationContext* moduleDeclaration();
-
- class FileContext : public antlr4::ParserRuleContext {
- public:
- FileContext(antlr4::ParserRuleContext* parent, size_t invokingState);
- size_t getRuleIndex() const override;
- std::vector<ModuleDeclarationContext*> moduleDeclaration();
- ModuleDeclarationContext* moduleDeclaration(size_t i);
- std::vector<DeclarationContext*> declaration();
- DeclarationContext* declaration(size_t i);
-
- void enterRule(antlr4::tree::ParseTreeListener* listener) override;
- void exitRule(antlr4::tree::ParseTreeListener* listener) override;
-
- antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor* visitor) override;
- };
-
- FileContext* file();
-
- bool sempred(antlr4::RuleContext* _localctx, size_t ruleIndex,
- size_t predicateIndex) override;
- bool typeSempred(TypeContext* _localctx, size_t predicateIndex);
- bool conditionalExpressionSempred(ConditionalExpressionContext* _localctx,
- size_t predicateIndex);
- bool logicalORExpressionSempred(LogicalORExpressionContext* _localctx,
- size_t predicateIndex);
- bool logicalANDExpressionSempred(LogicalANDExpressionContext* _localctx,
- size_t predicateIndex);
- bool bitwiseExpressionSempred(BitwiseExpressionContext* _localctx,
- size_t predicateIndex);
- bool equalityExpressionSempred(EqualityExpressionContext* _localctx,
- size_t predicateIndex);
- bool relationalExpressionSempred(RelationalExpressionContext* _localctx,
- size_t predicateIndex);
- bool shiftExpressionSempred(ShiftExpressionContext* _localctx,
- size_t predicateIndex);
- bool additiveExpressionSempred(AdditiveExpressionContext* _localctx,
- size_t predicateIndex);
- bool multiplicativeExpressionSempred(
- MultiplicativeExpressionContext* _localctx, size_t predicateIndex);
- bool locationExpressionSempred(LocationExpressionContext* _localctx,
- size_t predicateIndex);
-
- private:
- static std::vector<antlr4::dfa::DFA> _decisionToDFA;
- static antlr4::atn::PredictionContextCache _sharedContextCache;
- static std::vector<std::string> _ruleNames;
- static std::vector<std::string> _tokenNames;
-
- static std::vector<std::string> _literalNames;
- static std::vector<std::string> _symbolicNames;
- static antlr4::dfa::Vocabulary _vocabulary;
- static antlr4::atn::ATN _atn;
- static std::vector<uint16_t> _serializedATN;
-
- struct Initializer {
- Initializer();
- };
- static Initializer _init;
-};
-
-#endif // V8_TORQUE_TORQUEPARSER_H_
diff --git a/deps/v8/src/torque/TorqueVisitor.h b/deps/v8/src/torque/TorqueVisitor.h
deleted file mode 100644
index 7b1ee2754e..0000000000
--- a/deps/v8/src/torque/TorqueVisitor.h
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-#ifndef V8_TORQUE_TORQUEVISITOR_H_
-#define V8_TORQUE_TORQUEVISITOR_H_
-
-// Generated from Torque.g4 by ANTLR 4.7.1
-
-#pragma once
-
-#include "./antlr4-runtime.h"
-#include "TorqueParser.h"
-
-/**
- * This class defines an abstract visitor for a parse tree
- * produced by TorqueParser.
- */
-class TorqueVisitor : public antlr4::tree::AbstractParseTreeVisitor {
- public:
- /**
- * Visit parse trees produced by TorqueParser.
- */
- virtual antlrcpp::Any visitType(TorqueParser::TypeContext* context) = 0;
-
- virtual antlrcpp::Any visitTypeList(
- TorqueParser::TypeListContext* context) = 0;
-
- virtual antlrcpp::Any visitGenericSpecializationTypeList(
- TorqueParser::GenericSpecializationTypeListContext* context) = 0;
-
- virtual antlrcpp::Any visitOptionalGenericTypeList(
- TorqueParser::OptionalGenericTypeListContext* context) = 0;
-
- virtual antlrcpp::Any visitTypeListMaybeVarArgs(
- TorqueParser::TypeListMaybeVarArgsContext* context) = 0;
-
- virtual antlrcpp::Any visitLabelParameter(
- TorqueParser::LabelParameterContext* context) = 0;
-
- virtual antlrcpp::Any visitOptionalType(
- TorqueParser::OptionalTypeContext* context) = 0;
-
- virtual antlrcpp::Any visitOptionalLabelList(
- TorqueParser::OptionalLabelListContext* context) = 0;
-
- virtual antlrcpp::Any visitOptionalOtherwise(
- TorqueParser::OptionalOtherwiseContext* context) = 0;
-
- virtual antlrcpp::Any visitParameter(
- TorqueParser::ParameterContext* context) = 0;
-
- virtual antlrcpp::Any visitParameterList(
- TorqueParser::ParameterListContext* context) = 0;
-
- virtual antlrcpp::Any visitLabelDeclaration(
- TorqueParser::LabelDeclarationContext* context) = 0;
-
- virtual antlrcpp::Any visitExpression(
- TorqueParser::ExpressionContext* context) = 0;
-
- virtual antlrcpp::Any visitConditionalExpression(
- TorqueParser::ConditionalExpressionContext* context) = 0;
-
- virtual antlrcpp::Any visitLogicalORExpression(
- TorqueParser::LogicalORExpressionContext* context) = 0;
-
- virtual antlrcpp::Any visitLogicalANDExpression(
- TorqueParser::LogicalANDExpressionContext* context) = 0;
-
- virtual antlrcpp::Any visitBitwiseExpression(
- TorqueParser::BitwiseExpressionContext* context) = 0;
-
- virtual antlrcpp::Any visitEqualityExpression(
- TorqueParser::EqualityExpressionContext* context) = 0;
-
- virtual antlrcpp::Any visitRelationalExpression(
- TorqueParser::RelationalExpressionContext* context) = 0;
-
- virtual antlrcpp::Any visitShiftExpression(
- TorqueParser::ShiftExpressionContext* context) = 0;
-
- virtual antlrcpp::Any visitAdditiveExpression(
- TorqueParser::AdditiveExpressionContext* context) = 0;
-
- virtual antlrcpp::Any visitMultiplicativeExpression(
- TorqueParser::MultiplicativeExpressionContext* context) = 0;
-
- virtual antlrcpp::Any visitUnaryExpression(
- TorqueParser::UnaryExpressionContext* context) = 0;
-
- virtual antlrcpp::Any visitLocationExpression(
- TorqueParser::LocationExpressionContext* context) = 0;
-
- virtual antlrcpp::Any visitIncrementDecrement(
- TorqueParser::IncrementDecrementContext* context) = 0;
-
- virtual antlrcpp::Any visitAssignment(
- TorqueParser::AssignmentContext* context) = 0;
-
- virtual antlrcpp::Any visitAssignmentExpression(
- TorqueParser::AssignmentExpressionContext* context) = 0;
-
- virtual antlrcpp::Any visitStructExpression(
- TorqueParser::StructExpressionContext* context) = 0;
-
- virtual antlrcpp::Any visitFunctionPointerExpression(
- TorqueParser::FunctionPointerExpressionContext* context) = 0;
-
- virtual antlrcpp::Any visitPrimaryExpression(
- TorqueParser::PrimaryExpressionContext* context) = 0;
-
- virtual antlrcpp::Any visitForInitialization(
- TorqueParser::ForInitializationContext* context) = 0;
-
- virtual antlrcpp::Any visitForLoop(TorqueParser::ForLoopContext* context) = 0;
-
- virtual antlrcpp::Any visitRangeSpecifier(
- TorqueParser::RangeSpecifierContext* context) = 0;
-
- virtual antlrcpp::Any visitForOfRange(
- TorqueParser::ForOfRangeContext* context) = 0;
-
- virtual antlrcpp::Any visitForOfLoop(
- TorqueParser::ForOfLoopContext* context) = 0;
-
- virtual antlrcpp::Any visitArgument(
- TorqueParser::ArgumentContext* context) = 0;
-
- virtual antlrcpp::Any visitArgumentList(
- TorqueParser::ArgumentListContext* context) = 0;
-
- virtual antlrcpp::Any visitHelperCall(
- TorqueParser::HelperCallContext* context) = 0;
-
- virtual antlrcpp::Any visitLabelReference(
- TorqueParser::LabelReferenceContext* context) = 0;
-
- virtual antlrcpp::Any visitVariableDeclaration(
- TorqueParser::VariableDeclarationContext* context) = 0;
-
- virtual antlrcpp::Any visitVariableDeclarationWithInitialization(
- TorqueParser::VariableDeclarationWithInitializationContext* context) = 0;
-
- virtual antlrcpp::Any visitHelperCallStatement(
- TorqueParser::HelperCallStatementContext* context) = 0;
-
- virtual antlrcpp::Any visitExpressionStatement(
- TorqueParser::ExpressionStatementContext* context) = 0;
-
- virtual antlrcpp::Any visitIfStatement(
- TorqueParser::IfStatementContext* context) = 0;
-
- virtual antlrcpp::Any visitWhileLoop(
- TorqueParser::WhileLoopContext* context) = 0;
-
- virtual antlrcpp::Any visitReturnStatement(
- TorqueParser::ReturnStatementContext* context) = 0;
-
- virtual antlrcpp::Any visitBreakStatement(
- TorqueParser::BreakStatementContext* context) = 0;
-
- virtual antlrcpp::Any visitContinueStatement(
- TorqueParser::ContinueStatementContext* context) = 0;
-
- virtual antlrcpp::Any visitGotoStatement(
- TorqueParser::GotoStatementContext* context) = 0;
-
- virtual antlrcpp::Any visitHandlerWithStatement(
- TorqueParser::HandlerWithStatementContext* context) = 0;
-
- virtual antlrcpp::Any visitTryLabelStatement(
- TorqueParser::TryLabelStatementContext* context) = 0;
-
- virtual antlrcpp::Any visitDiagnosticStatement(
- TorqueParser::DiagnosticStatementContext* context) = 0;
-
- virtual antlrcpp::Any visitStatement(
- TorqueParser::StatementContext* context) = 0;
-
- virtual antlrcpp::Any visitStatementList(
- TorqueParser::StatementListContext* context) = 0;
-
- virtual antlrcpp::Any visitStatementScope(
- TorqueParser::StatementScopeContext* context) = 0;
-
- virtual antlrcpp::Any visitStatementBlock(
- TorqueParser::StatementBlockContext* context) = 0;
-
- virtual antlrcpp::Any visitHelperBody(
- TorqueParser::HelperBodyContext* context) = 0;
-
- virtual antlrcpp::Any visitFieldDeclaration(
- TorqueParser::FieldDeclarationContext* context) = 0;
-
- virtual antlrcpp::Any visitFieldListDeclaration(
- TorqueParser::FieldListDeclarationContext* context) = 0;
-
- virtual antlrcpp::Any visitExtendsDeclaration(
- TorqueParser::ExtendsDeclarationContext* context) = 0;
-
- virtual antlrcpp::Any visitGeneratesDeclaration(
- TorqueParser::GeneratesDeclarationContext* context) = 0;
-
- virtual antlrcpp::Any visitConstexprDeclaration(
- TorqueParser::ConstexprDeclarationContext* context) = 0;
-
- virtual antlrcpp::Any visitTypeDeclaration(
- TorqueParser::TypeDeclarationContext* context) = 0;
-
- virtual antlrcpp::Any visitTypeAliasDeclaration(
- TorqueParser::TypeAliasDeclarationContext* context) = 0;
-
- virtual antlrcpp::Any visitExternalBuiltin(
- TorqueParser::ExternalBuiltinContext* context) = 0;
-
- virtual antlrcpp::Any visitExternalMacro(
- TorqueParser::ExternalMacroContext* context) = 0;
-
- virtual antlrcpp::Any visitExternalRuntime(
- TorqueParser::ExternalRuntimeContext* context) = 0;
-
- virtual antlrcpp::Any visitBuiltinDeclaration(
- TorqueParser::BuiltinDeclarationContext* context) = 0;
-
- virtual antlrcpp::Any visitGenericSpecialization(
- TorqueParser::GenericSpecializationContext* context) = 0;
-
- virtual antlrcpp::Any visitMacroDeclaration(
- TorqueParser::MacroDeclarationContext* context) = 0;
-
- virtual antlrcpp::Any visitExternConstDeclaration(
- TorqueParser::ExternConstDeclarationContext* context) = 0;
-
- virtual antlrcpp::Any visitConstDeclaration(
- TorqueParser::ConstDeclarationContext* context) = 0;
-
- virtual antlrcpp::Any visitStructDeclaration(
- TorqueParser::StructDeclarationContext* context) = 0;
-
- virtual antlrcpp::Any visitDeclaration(
- TorqueParser::DeclarationContext* context) = 0;
-
- virtual antlrcpp::Any visitModuleDeclaration(
- TorqueParser::ModuleDeclarationContext* context) = 0;
-
- virtual antlrcpp::Any visitFile(TorqueParser::FileContext* context) = 0;
-};
-
-#endif // V8_TORQUE_TORQUEVISITOR_H_
diff --git a/deps/v8/src/torque/ast-generator.cc b/deps/v8/src/torque/ast-generator.cc
deleted file mode 100644
index 5eb2f37ef6..0000000000
--- a/deps/v8/src/torque/ast-generator.cc
+++ /dev/null
@@ -1,833 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <string>
-
-#include "src/base/macros.h"
-#include "src/torque/ast-generator.h"
-
-namespace v8 {
-namespace internal {
-namespace torque {
-
-namespace {
-
-std::vector<std::string> GetIdentifierVector(
- std::vector<antlr4::tree::TerminalNode*> source) {
- std::vector<std::string> result;
- for (auto s : source) {
- result.push_back(s->getSymbol()->getText());
- }
- return result;
-}
-
-std::string StringLiteralUnquote(const std::string& s) {
- assert('"' == s.front() || '\'' == s.front());
- assert('"' == s.back() || '\'' == s.back());
- std::stringstream result;
- for (size_t i = 1; i < s.length() - 1; ++i) {
- if (s[i] == '\\') {
- switch (s[++i]) {
- case 'n':
- result << '\n';
- break;
- case 'r':
- result << '\r';
- break;
- case 't':
- result << '\t';
- break;
- case '\'':
- case '"':
- case '\\':
- result << s[i];
- break;
- default:
- UNREACHABLE();
- }
- } else {
- result << s[i];
- }
- }
- return result.str();
-}
-
-} // namespace
-
-LabelAndTypesVector AstGenerator::GetOptionalLabelAndTypeList(
- TorqueParser::OptionalLabelListContext* context) {
- LabelAndTypesVector labels;
- if (context) {
- for (auto* label : context->labelParameter()) {
- LabelAndTypes new_label;
- new_label.name = label->IDENTIFIER()->getSymbol()->getText();
- if (label->typeList() != nullptr) {
- for (auto* type : label->typeList()->type()) {
- new_label.types.emplace_back(GetType(type));
- }
- }
- labels.emplace_back(new_label);
- }
- }
- return labels;
-}
-
-TypeExpression* AstGenerator::GetType(TorqueParser::TypeContext* context) {
- if (context->BUILTIN()) {
- ParameterList parameters = context->typeList()->accept(this);
- TypeExpression* return_type = GetType(context->type(0));
- return RegisterNode(
- new FunctionTypeExpression(Pos(context), parameters, return_type));
- } else if (context->BIT_OR()) {
- return RegisterNode(new UnionTypeExpression(
- Pos(context), GetType(context->type(0)), GetType(context->type(1))));
- } else if (context->IDENTIFIER()) {
- bool is_constexpr = context->CONSTEXPR() != nullptr;
- std::string name = context->IDENTIFIER()->getSymbol()->getText();
- return RegisterNode(
- new BasicTypeExpression(Pos(context), is_constexpr, std::move(name)));
- } else {
- DCHECK_EQ(1, context->type().size());
- return GetType(context->type(0));
- }
-}
-
-TypeExpression* AstGenerator::GetOptionalType(
- TorqueParser::OptionalTypeContext* context) {
- if (!context->type())
- return RegisterNode(new BasicTypeExpression(Pos(context), false, "void"));
- return GetType(context->type());
-}
-
-std::vector<TypeExpression*> AstGenerator::GetTypeVector(
- TorqueParser::TypeListContext* type_list) {
- std::vector<TypeExpression*> result;
- for (auto t : type_list->type()) {
- result.push_back(GetType(t));
- }
- return result;
-}
-
-ParameterList AstGenerator::GetOptionalParameterList(
- TorqueParser::ParameterListContext* context) {
- if (context != nullptr) {
- return context->accept(this).as<ParameterList>();
- } else {
- return ParameterList();
- }
-}
-
-Statement* AstGenerator::GetOptionalHelperBody(
- TorqueParser::HelperBodyContext* context) {
- if (context) return context->accept(this).as<Statement*>();
- return nullptr;
-}
-
-antlrcpp::Any AstGenerator::visitParameterList(
- TorqueParser::ParameterListContext* context) {
- ParameterList result{{}, {}, context->VARARGS(), {}};
- if (context->VARARGS()) {
- result.arguments_variable = context->IDENTIFIER()->getSymbol()->getText();
- }
- for (auto* parameter : context->parameter()) {
- parameter->accept(this);
- result.names.push_back(parameter->IDENTIFIER()->getSymbol()->getText());
- result.types.push_back(GetType(parameter->type()));
- }
- return std::move(result);
-}
-
-antlrcpp::Any AstGenerator::visitTypeList(
- TorqueParser::TypeListContext* context) {
- ParameterList result{{}, {}, false, {}};
- result.types = GetTypeVector(context);
- return std::move(result);
-}
-
-antlrcpp::Any AstGenerator::visitTypeListMaybeVarArgs(
- TorqueParser::TypeListMaybeVarArgsContext* context) {
- ParameterList result{{}, {}, context->VARARGS(), {}};
- result.types.reserve(context->type().size());
- for (auto* type : context->type()) {
- result.types.push_back(GetType(type));
- }
- return std::move(result);
-}
-
-antlrcpp::Any AstGenerator::visitModuleDeclaration(
- TorqueParser::ModuleDeclarationContext* context) {
- ModuleDeclaration* result = RegisterNode(new ExplicitModuleDeclaration{
- Pos(context), context->IDENTIFIER()->getSymbol()->getText(), {}});
- for (auto* declaration : context->declaration()) {
- result->declarations.push_back(
- declaration->accept(this).as<Declaration*>());
- }
- return implicit_cast<Declaration*>(result);
-}
-
-antlrcpp::Any AstGenerator::visitMacroDeclaration(
- TorqueParser::MacroDeclarationContext* context) {
- auto generic_parameters =
- GetIdentifierVector(context->optionalGenericTypeList()->IDENTIFIER());
- MacroDeclaration* macro = RegisterNode(new TorqueMacroDeclaration{
- Pos(context), context->IDENTIFIER()->getSymbol()->getText(),
- GetOptionalParameterList(context->parameterList()),
- GetOptionalType(context->optionalType()),
- GetOptionalLabelAndTypeList(context->optionalLabelList())});
- if (auto* op = context->STRING_LITERAL()) {
- macro->op = StringLiteralUnquote(op->getSymbol()->getText());
- }
- base::Optional<Statement*> body;
- if (context->helperBody())
- body = context->helperBody()->accept(this).as<Statement*>();
- Declaration* result = nullptr;
- if (generic_parameters.size() != 0) {
- result = RegisterNode(
- new GenericDeclaration{Pos(context), macro, generic_parameters, body});
- } else {
- if (!body) ReportError("A non-generic declaration needs a body.");
- result = RegisterNode(new StandardDeclaration{Pos(context), macro, *body});
- }
- return result;
-}
-
-antlrcpp::Any AstGenerator::visitBuiltinDeclaration(
- TorqueParser::BuiltinDeclarationContext* context) {
- auto generic_parameters =
- GetIdentifierVector(context->optionalGenericTypeList()->IDENTIFIER());
- base::Optional<Statement*> body;
- if (context->helperBody())
- body = context->helperBody()->accept(this).as<Statement*>();
-
- TorqueBuiltinDeclaration* builtin = RegisterNode(new TorqueBuiltinDeclaration{
- Pos(context), context->JAVASCRIPT() != nullptr,
- context->IDENTIFIER()->getSymbol()->getText(),
- std::move(context->parameterList()->accept(this).as<ParameterList>()),
- GetOptionalType(context->optionalType())});
-
- Declaration* result = nullptr;
- if (generic_parameters.size() != 0) {
- result = RegisterNode(new GenericDeclaration{Pos(context), builtin,
- generic_parameters, body});
- } else {
- if (!body) ReportError("A non-generic declaration needs a body.");
- result =
- RegisterNode(new StandardDeclaration{Pos(context), builtin, *body});
- }
- return result;
-}
-
-antlrcpp::Any AstGenerator::visitExternalMacro(
- TorqueParser::ExternalMacroContext* context) {
- auto generic_parameters =
- GetIdentifierVector(context->optionalGenericTypeList()->IDENTIFIER());
- MacroDeclaration* macro = RegisterNode(new ExternalMacroDeclaration{
- Pos(context),
- context->IDENTIFIER()->getSymbol()->getText(),
- {},
- std::move(
- context->typeListMaybeVarArgs()->accept(this).as<ParameterList>()),
- GetOptionalType(context->optionalType()),
- GetOptionalLabelAndTypeList(context->optionalLabelList())});
- if (auto* op = context->STRING_LITERAL())
- macro->op = StringLiteralUnquote(op->getSymbol()->getText());
- Declaration* result = nullptr;
- if (generic_parameters.size() != 0) {
- result = RegisterNode(new GenericDeclaration{Pos(context), macro,
- generic_parameters, nullptr});
- } else {
- result =
- RegisterNode(new StandardDeclaration{Pos(context), macro, nullptr});
- }
- return result;
-}
-
-antlrcpp::Any AstGenerator::visitExternalBuiltin(
- TorqueParser::ExternalBuiltinContext* context) {
- auto generic_parameters =
- GetIdentifierVector(context->optionalGenericTypeList()->IDENTIFIER());
- ExternalBuiltinDeclaration* builtin =
- RegisterNode(new ExternalBuiltinDeclaration{
- Pos(context), context->JAVASCRIPT() != nullptr,
- context->IDENTIFIER()->getSymbol()->getText(),
- std::move(context->typeList()->accept(this).as<ParameterList>()),
- GetOptionalType(context->optionalType())});
-
- Declaration* result = nullptr;
- if (generic_parameters.size() != 0) {
- result = RegisterNode(new GenericDeclaration{Pos(context), builtin,
- generic_parameters, nullptr});
- } else {
- result =
- RegisterNode(new StandardDeclaration{Pos(context), builtin, nullptr});
- }
- return result;
-}
-
-antlrcpp::Any AstGenerator::visitExternalRuntime(
- TorqueParser::ExternalRuntimeContext* context) {
- ExternalRuntimeDeclaration* runtime =
- RegisterNode(new ExternalRuntimeDeclaration{
- Pos(context), context->IDENTIFIER()->getSymbol()->getText(),
- std::move(context->typeListMaybeVarArgs()
- ->accept(this)
- .as<ParameterList>()),
- GetOptionalType(context->optionalType())});
- return implicit_cast<Declaration*>(
- RegisterNode(new StandardDeclaration{Pos(context), runtime, nullptr}));
-}
-
-antlrcpp::Any AstGenerator::visitConstDeclaration(
- TorqueParser::ConstDeclarationContext* context) {
- auto name = context->IDENTIFIER()->getSymbol()->getText();
- auto type = GetType(context->type());
- Expression* expression =
- context->expression()->accept(this).as<Expression*>();
- return implicit_cast<Declaration*>(
- RegisterNode(new ConstDeclaration{Pos(context), name, type, expression}));
-}
-
-antlrcpp::Any AstGenerator::visitGenericSpecialization(
- TorqueParser::GenericSpecializationContext* context) {
- auto name = context->IDENTIFIER()->getSymbol()->getText();
- auto specialization_parameters =
- GetTypeVector(context->genericSpecializationTypeList()->typeList());
- return implicit_cast<Declaration*>(RegisterNode(new SpecializationDeclaration{
- Pos(context), name, false, specialization_parameters,
- GetOptionalParameterList(context->parameterList()),
- GetOptionalType(context->optionalType()),
- GetOptionalLabelAndTypeList(context->optionalLabelList()),
- context->helperBody()->accept(this).as<Statement*>()}));
-}
-
-antlrcpp::Any AstGenerator::visitExternConstDeclaration(
- TorqueParser::ExternConstDeclarationContext* context) {
- return implicit_cast<Declaration*>(RegisterNode(new ExternConstDeclaration{
- Pos(context), context->IDENTIFIER()->getSymbol()->getText(),
- GetType(context->type()),
- StringLiteralUnquote(context->generatesDeclaration()
- ->STRING_LITERAL()
- ->getSymbol()
- ->getText())}));
-}
-
-antlrcpp::Any AstGenerator::visitTypeDeclaration(
- TorqueParser::TypeDeclarationContext* context) {
- TypeDeclaration* result = RegisterNode(new TypeDeclaration{
- Pos(context), context->IDENTIFIER()->getSymbol()->getText(), {}, {}});
- if (context->extendsDeclaration())
- result->extends =
- context->extendsDeclaration()->IDENTIFIER()->getSymbol()->getText();
- if (context->generatesDeclaration()) {
- result->generates = StringLiteralUnquote(context->generatesDeclaration()
- ->STRING_LITERAL()
- ->getSymbol()
- ->getText());
- }
- if (context->constexprDeclaration()) {
- result->constexpr_generates =
- StringLiteralUnquote(context->constexprDeclaration()
- ->STRING_LITERAL()
- ->getSymbol()
- ->getText());
- }
- return implicit_cast<Declaration*>(result);
-}
-
-antlrcpp::Any AstGenerator::visitTypeAliasDeclaration(
- TorqueParser::TypeAliasDeclarationContext* context) {
- TypeAliasDeclaration* result = RegisterNode(new TypeAliasDeclaration{
- Pos(context), context->IDENTIFIER()->getSymbol()->getText(),
- GetType(context->type())});
- return implicit_cast<Declaration*>(result);
-}
-
-antlrcpp::Any AstGenerator::visitVariableDeclaration(
- TorqueParser::VariableDeclarationContext* context) {
- bool is_const_qualified = context->CONST() != nullptr;
- return RegisterNode(
- new VarDeclarationStatement{Pos(context),
- is_const_qualified,
- context->IDENTIFIER()->getSymbol()->getText(),
- GetType(context->type()),
- {}});
-}
-
-antlrcpp::Any AstGenerator::visitVariableDeclarationWithInitialization(
- TorqueParser::VariableDeclarationWithInitializationContext* context) {
- VarDeclarationStatement* result =
- VarDeclarationStatement::cast(context->variableDeclaration()
- ->accept(this)
- .as<VarDeclarationStatement*>());
- result->pos = Pos(context);
- if (context->expression())
- result->initializer = context->expression()->accept(this).as<Expression*>();
- return implicit_cast<Statement*>(result);
-}
-
-antlrcpp::Any AstGenerator::visitHelperCall(
- TorqueParser::HelperCallContext* context) {
- antlr4::tree::TerminalNode* callee;
- bool is_operator = context->MIN() || context->MAX();
- if (context->MIN()) callee = context->MIN();
- if (context->MAX()) callee = context->MAX();
- if (context->IDENTIFIER()) callee = context->IDENTIFIER();
- std::vector<std::string> labels;
- for (auto label : context->optionalOtherwise()->IDENTIFIER()) {
- labels.push_back(label->getSymbol()->getText());
- }
- std::vector<TypeExpression*> templateArguments;
- if (context->genericSpecializationTypeList()) {
- templateArguments =
- GetTypeVector(context->genericSpecializationTypeList()->typeList());
- }
- CallExpression* result =
- RegisterNode(new CallExpression{Pos(context),
- callee->getSymbol()->getText(),
- is_operator,
- templateArguments,
- {},
- labels});
- for (auto* arg : context->argumentList()->argument()) {
- result->arguments.push_back(arg->accept(this).as<Expression*>());
- }
- return implicit_cast<Expression*>(result);
-}
-
-antlrcpp::Any AstGenerator::visitHelperCallStatement(
- TorqueParser::HelperCallStatementContext* context) {
- Statement* result;
- if (context->TAIL()) {
- result = RegisterNode(new TailCallStatement{
- Pos(context),
- CallExpression::cast(
- context->helperCall()->accept(this).as<Expression*>())});
- } else {
- result = RegisterNode(new ExpressionStatement{
- Pos(context), context->helperCall()->accept(this).as<Expression*>()});
- }
- return result;
-}
-
-antlrcpp::Any AstGenerator::visitStatementScope(
- TorqueParser::StatementScopeContext* context) {
- BlockStatement* result = RegisterNode(
- new BlockStatement{Pos(context), context->DEFERRED() != nullptr, {}});
- for (auto* child : context->statementList()->statement()) {
- result->statements.push_back(child->accept(this).as<Statement*>());
- }
- return implicit_cast<Statement*>(result);
-}
-
-antlrcpp::Any AstGenerator::visitExpressionStatement(
- TorqueParser::ExpressionStatementContext* context) {
- return implicit_cast<Statement*>(RegisterNode(new ExpressionStatement{
- Pos(context), context->assignment()->accept(this).as<Expression*>()}));
-}
-
-antlrcpp::Any AstGenerator::visitReturnStatement(
- TorqueParser::ReturnStatementContext* context) {
- if (context->expression() != nullptr) {
- return implicit_cast<Statement*>(RegisterNode(new ReturnStatement{
- Pos(context), context->expression()->accept(this).as<Expression*>()}));
- } else {
- return implicit_cast<Statement*>(
- RegisterNode(new ReturnStatement{Pos(context), {}}));
- }
-}
-
-antlrcpp::Any AstGenerator::visitBreakStatement(
- TorqueParser::BreakStatementContext* context) {
- return implicit_cast<Statement*>(
- RegisterNode(new BreakStatement{Pos(context)}));
-}
-
-antlrcpp::Any AstGenerator::visitContinueStatement(
- TorqueParser::ContinueStatementContext* context) {
- return implicit_cast<Statement*>(
- RegisterNode(new ContinueStatement{Pos(context)}));
-}
-
-antlrcpp::Any AstGenerator::visitGotoStatement(
- TorqueParser::GotoStatementContext* context) {
- GotoStatement* result = RegisterNode(new GotoStatement{Pos(context), {}, {}});
- if (context->labelReference())
- result->label =
- context->labelReference()->IDENTIFIER()->getSymbol()->getText();
- if (context->argumentList() != nullptr) {
- for (auto a : context->argumentList()->argument()) {
- result->arguments.push_back(a->accept(this).as<Expression*>());
- }
- }
- return implicit_cast<Statement*>(result);
-}
-
-antlrcpp::Any AstGenerator::visitIfStatement(
- TorqueParser::IfStatementContext* context) {
- IfStatement* result = RegisterNode(new IfStatement{
- Pos(context),
- std::move(context->expression()->accept(this).as<Expression*>()),
- context->CONSTEXPR() != nullptr,
- std::move(context->statementBlock(0)->accept(this).as<Statement*>()),
- {}});
- if (context->statementBlock(1))
- result->if_false =
- std::move(context->statementBlock(1)->accept(this).as<Statement*>());
- return implicit_cast<Statement*>(result);
-}
-
-antlrcpp::Any AstGenerator::visitWhileLoop(
- TorqueParser::WhileLoopContext* context) {
- return implicit_cast<Statement*>(RegisterNode(new WhileStatement{
- Pos(context), context->expression()->accept(this).as<Expression*>(),
- context->statementBlock()->accept(this).as<Statement*>()}));
-}
-
-antlrcpp::Any AstGenerator::visitForLoop(
- TorqueParser::ForLoopContext* context) {
- ForLoopStatement* result = RegisterNode(new ForLoopStatement{
- Pos(context),
- {},
- context->expression()->accept(this).as<Expression*>(),
- context->assignment()->accept(this).as<Expression*>(),
- context->statementBlock()->accept(this).as<Statement*>()});
- if (auto* init = context->forInitialization()
- ->variableDeclarationWithInitialization()) {
- result->var_declaration =
- VarDeclarationStatement::cast(init->accept(this).as<Statement*>());
- }
- return implicit_cast<Statement*>(result);
-}
-
-antlrcpp::Any AstGenerator::visitForOfLoop(
- TorqueParser::ForOfLoopContext* context) {
- ForOfLoopStatement* result = RegisterNode(new ForOfLoopStatement{
- Pos(context),
- context->variableDeclaration()
- ->accept(this)
- .as<VarDeclarationStatement*>(),
- context->expression()->accept(this).as<Expression*>(),
- {},
- {},
- context->statementBlock()->accept(this).as<Statement*>()});
- if (auto* range = context->forOfRange()->rangeSpecifier()) {
- if (auto* begin = range->begin) {
- result->begin = begin->accept(this).as<Expression*>();
- }
- if (auto* end = range->end) {
- result->end = end->accept(this).as<Expression*>();
- }
- }
- return implicit_cast<Statement*>(result);
-}
-
-antlrcpp::Any AstGenerator::visitTryLabelStatement(
- TorqueParser::TryLabelStatementContext* context) {
- TryLabelStatement* result = RegisterNode(new TryLabelStatement{
- Pos(context), context->statementBlock()->accept(this).as<Statement*>()});
- for (auto* handler : context->handlerWithStatement()) {
- handler->labelDeclaration()->accept(this);
- auto parameter_list = handler->labelDeclaration()->parameterList();
- ParameterList label_parameters = parameter_list == nullptr
- ? ParameterList()
- : handler->labelDeclaration()
- ->parameterList()
- ->accept(this)
- .as<ParameterList>();
- LabelBlock* label_block = RegisterNode(new LabelBlock{
- Pos(handler->statementBlock()),
- handler->labelDeclaration()->IDENTIFIER()->getSymbol()->getText(),
- label_parameters,
- handler->statementBlock()->accept(this).as<Statement*>()});
- result->label_blocks.push_back(label_block);
- }
- return implicit_cast<Statement*>(result);
-}
-
-antlrcpp::Any AstGenerator::visitFunctionPointerExpression(
- TorqueParser::FunctionPointerExpressionContext* context) {
- if (context->IDENTIFIER()) {
- std::vector<TypeExpression*> templateArguments;
- if (context->genericSpecializationTypeList()) {
- templateArguments =
- GetTypeVector(context->genericSpecializationTypeList()->typeList());
- }
- return implicit_cast<Expression*>(RegisterNode(new IdentifierExpression{
- Pos(context), context->IDENTIFIER()->getSymbol()->getText(),
- std::move(templateArguments)}));
- }
- return context->primaryExpression()->accept(this);
-}
-
-antlrcpp::Any AstGenerator::visitPrimaryExpression(
- TorqueParser::PrimaryExpressionContext* context) {
- if (auto* e = context->helperCall()) return e->accept(this);
- if (auto* e = context->DECIMAL_LITERAL())
- return implicit_cast<Expression*>(RegisterNode(
- new NumberLiteralExpression{Pos(context), e->getSymbol()->getText()}));
- if (auto* e = context->STRING_LITERAL())
- return implicit_cast<Expression*>(RegisterNode(
- new StringLiteralExpression{Pos(context), e->getSymbol()->getText()}));
- if (context->structExpression()) {
- return context->structExpression()->accept(this);
- }
- return context->expression()->accept(this);
-}
-
-antlrcpp::Any AstGenerator::visitStructExpression(
- TorqueParser::StructExpressionContext* context) {
- std::vector<Expression*> expressions;
- for (auto& e : context->expression()) {
- expressions.push_back(e->accept(this).as<Expression*>());
- }
- return implicit_cast<Expression*>(RegisterNode(new StructExpression{
- Pos(context), context->IDENTIFIER()->getSymbol()->getText(),
- expressions}));
-}
-
-antlrcpp::Any AstGenerator::visitAssignment(
- TorqueParser::AssignmentContext* context) {
- if (auto* e = context->incrementDecrement()) return e->accept(this);
- LocationExpression* location = LocationExpression::cast(
- context->locationExpression()->accept(this).as<Expression*>());
- if (auto* e = context->expression()) {
- AssignmentExpression* result = RegisterNode(new AssignmentExpression{
- Pos(context), location, {}, e->accept(this).as<Expression*>()});
- if (auto* op_node = context->ASSIGNMENT_OPERATOR()) {
- std::string op = op_node->getSymbol()->getText();
- result->op = op.substr(0, op.length() - 1);
- }
- return implicit_cast<Expression*>(result);
- }
- return implicit_cast<Expression*>(location);
-}
-
-antlrcpp::Any AstGenerator::visitIncrementDecrement(
- TorqueParser::IncrementDecrementContext* context) {
- bool postfix = context->op;
- return implicit_cast<Expression*>(
- RegisterNode(new IncrementDecrementExpression{
- Pos(context),
- LocationExpression::cast(
- context->locationExpression()->accept(this).as<Expression*>()),
- context->INCREMENT() ? IncrementDecrementOperator::kIncrement
- : IncrementDecrementOperator::kDecrement,
- postfix}));
-}
-
-antlrcpp::Any AstGenerator::visitLocationExpression(
- TorqueParser::LocationExpressionContext* context) {
- Expression* location = nullptr;
- if (auto* p = context->primaryExpression()) {
- location = p->accept(this).as<Expression*>();
- } else if (auto* l = context->locationExpression()) {
- location = l->accept(this).as<Expression*>();
- } else {
- return implicit_cast<Expression*>(RegisterNode(new IdentifierExpression{
- Pos(context), context->IDENTIFIER()->getSymbol()->getText(), {}}));
- }
-
- if (auto* e = context->expression()) {
- return implicit_cast<Expression*>(RegisterNode(new ElementAccessExpression{
- Pos(context), location, e->accept(this).as<Expression*>()}));
- }
-
- return implicit_cast<Expression*>(RegisterNode(new FieldAccessExpression{
- Pos(context), location, context->IDENTIFIER()->getSymbol()->getText()}));
-}
-
-antlrcpp::Any AstGenerator::visitUnaryExpression(
- TorqueParser::UnaryExpressionContext* context) {
- if (auto* e = context->assignmentExpression()) return e->accept(this);
- std::vector<Expression*> args;
- args.push_back(context->unaryExpression()->accept(this).as<Expression*>());
- return implicit_cast<Expression*>(RegisterNode(new CallExpression{
- Pos(context), context->op->getText(), true, {}, std::move(args), {}}));
-}
-
-antlrcpp::Any AstGenerator::visitMultiplicativeExpression(
- TorqueParser::MultiplicativeExpressionContext* context) {
- auto* right = context->unaryExpression();
- if (auto* left = context->multiplicativeExpression()) {
- return implicit_cast<Expression*>(
- RegisterNode(new CallExpression{Pos(context),
- context->op->getText(),
- true,
- {},
- {left->accept(this).as<Expression*>(),
- right->accept(this).as<Expression*>()},
- {}}));
- }
- return right->accept(this);
-}
-
-antlrcpp::Any AstGenerator::visitAdditiveExpression(
- TorqueParser::AdditiveExpressionContext* context) {
- auto* right = context->multiplicativeExpression();
- if (auto* left = context->additiveExpression()) {
- return implicit_cast<Expression*>(
- RegisterNode(new CallExpression{Pos(context),
- context->op->getText(),
- true,
- {},
- {left->accept(this).as<Expression*>(),
- right->accept(this).as<Expression*>()},
- {}}));
- }
- return right->accept(this);
-}
-
-antlrcpp::Any AstGenerator::visitShiftExpression(
- TorqueParser::ShiftExpressionContext* context) {
- auto* right = context->additiveExpression();
- if (auto* left = context->shiftExpression()) {
- return implicit_cast<Expression*>(
- RegisterNode(new CallExpression{Pos(context),
- context->op->getText(),
- true,
- {},
- {left->accept(this).as<Expression*>(),
- right->accept(this).as<Expression*>()},
- {}}));
- }
- return right->accept(this);
-}
-
-antlrcpp::Any AstGenerator::visitRelationalExpression(
- TorqueParser::RelationalExpressionContext* context) {
- auto* right = context->shiftExpression();
- if (auto* left = context->relationalExpression()) {
- return implicit_cast<Expression*>(
- RegisterNode(new CallExpression{Pos(context),
- context->op->getText(),
- true,
- {},
- {left->accept(this).as<Expression*>(),
- right->accept(this).as<Expression*>()},
- {}}));
- }
- return right->accept(this);
-}
-
-antlrcpp::Any AstGenerator::visitEqualityExpression(
- TorqueParser::EqualityExpressionContext* context) {
- auto* right = context->relationalExpression();
- if (auto* left = context->equalityExpression()) {
- return implicit_cast<Expression*>(
- RegisterNode(new CallExpression{Pos(context),
- context->op->getText(),
- true,
- {},
- {left->accept(this).as<Expression*>(),
- right->accept(this).as<Expression*>()},
- {}}));
- }
- return right->accept(this);
-}
-
-antlrcpp::Any AstGenerator::visitBitwiseExpression(
- TorqueParser::BitwiseExpressionContext* context) {
- auto* right = context->equalityExpression();
- if (auto* left = context->bitwiseExpression()) {
- return implicit_cast<Expression*>(
- RegisterNode(new CallExpression{Pos(context),
- context->op->getText(),
- true,
- {},
- {left->accept(this).as<Expression*>(),
- right->accept(this).as<Expression*>()},
- {}}));
- }
- return right->accept(this);
-}
-
-antlrcpp::Any AstGenerator::visitLogicalANDExpression(
- TorqueParser::LogicalANDExpressionContext* context) {
- auto* right = context->bitwiseExpression();
- if (auto* left = context->logicalANDExpression()) {
- return implicit_cast<Expression*>(RegisterNode(new LogicalAndExpression{
- Pos(context), left->accept(this).as<Expression*>(),
- right->accept(this).as<Expression*>()}));
- }
- return right->accept(this);
-}
-
-antlrcpp::Any AstGenerator::visitLogicalORExpression(
- TorqueParser::LogicalORExpressionContext* context) {
- auto* right = context->logicalANDExpression();
- if (auto* left = context->logicalORExpression()) {
- return implicit_cast<Expression*>(RegisterNode(new LogicalOrExpression{
- Pos(context), left->accept(this).as<Expression*>(),
- right->accept(this).as<Expression*>()}));
- }
- return right->accept(this);
-}
-
-antlrcpp::Any AstGenerator::visitConditionalExpression(
- TorqueParser::ConditionalExpressionContext* context) {
- if (auto* condition = context->conditionalExpression()) {
- return implicit_cast<Expression*>(RegisterNode(new ConditionalExpression{
- Pos(context), condition->accept(this).as<Expression*>(),
- context->logicalORExpression(0)->accept(this).as<Expression*>(),
- context->logicalORExpression(1)->accept(this).as<Expression*>()}));
- }
- return context->logicalORExpression(0)->accept(this);
-}
-
-antlrcpp::Any AstGenerator::visitDiagnosticStatement(
- TorqueParser::DiagnosticStatementContext* context) {
- if (context->ASSERT_TOKEN() || context->CHECK_TOKEN()) {
- size_t a = context->expression()->start->getStartIndex();
- size_t b = context->expression()->stop->getStopIndex();
- antlr4::misc::Interval interval(a, b);
- std::string source = source_file_context_->stream->getText(interval);
- return implicit_cast<Statement*>(RegisterNode(new AssertStatement{
- Pos(context), context->ASSERT_TOKEN() != nullptr,
- context->expression()->accept(this).as<Expression*>(), source}));
- } else if (context->UNREACHABLE_TOKEN()) {
- return implicit_cast<Statement*>(
- RegisterNode(new DebugStatement{Pos(context), "unreachable", true}));
- } else {
- DCHECK(context->DEBUG_TOKEN());
- return implicit_cast<Statement*>(
- RegisterNode(new DebugStatement{Pos(context), "debug", false}));
- }
-}
-
-antlrcpp::Any AstGenerator::visitStructDeclaration(
- TorqueParser::StructDeclarationContext* context) {
- StructDeclaration* struct_declaration = RegisterNode(new StructDeclaration{
- Pos(context), context->IDENTIFIER()->getSymbol()->getText()});
-
- for (auto* fieldDeclaration :
- context->fieldListDeclaration()->fieldDeclaration()) {
- FieldNameAndType field = {
- fieldDeclaration->IDENTIFIER()->getSymbol()->getText(),
- GetType(fieldDeclaration->type())};
- struct_declaration->fields.push_back(field);
- }
-
- return implicit_cast<Declaration*>(struct_declaration);
-}
-
-void AstGenerator::visitSourceFile(SourceFileContext* context) {
- source_file_context_ = context;
- current_source_file_ = SourceFileMap::Get().AddSource(context->name);
- for (auto* declaration : context->file->children) {
- ast_.declarations().push_back(declaration->accept(this).as<Declaration*>());
- }
- source_file_context_ = nullptr;
-}
-
-SourcePosition AstGenerator::Pos(antlr4::ParserRuleContext* context) {
- antlr4::misc::Interval i = context->getSourceInterval();
- auto token = source_file_context_->tokens->get(i.a);
- int line = static_cast<int>(token->getLine());
- int column = static_cast<int>(token->getCharPositionInLine());
- return SourcePosition{current_source_file_, line, column};
-}
-
-} // namespace torque
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/torque/ast-generator.h b/deps/v8/src/torque/ast-generator.h
deleted file mode 100644
index 31eca57b91..0000000000
--- a/deps/v8/src/torque/ast-generator.h
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TORQUE_AST_GENERATOR_H_
-#define V8_TORQUE_AST_GENERATOR_H_
-
-#include "src/torque/TorqueBaseVisitor.h"
-#include "src/torque/ast.h"
-#include "src/torque/global-context.h"
-
-namespace v8 {
-namespace internal {
-namespace torque {
-
-class AstGenerator : public TorqueBaseVisitor {
- public:
- antlrcpp::Any visitParameterList(
- TorqueParser::ParameterListContext* context) override;
-
- antlrcpp::Any visitTypeList(TorqueParser::TypeListContext* context) override;
-
- antlrcpp::Any visitTypeListMaybeVarArgs(
- TorqueParser::TypeListMaybeVarArgsContext* context) override;
-
- antlrcpp::Any visitModuleDeclaration(
- TorqueParser::ModuleDeclarationContext* context) override;
-
- antlrcpp::Any visitMacroDeclaration(
- TorqueParser::MacroDeclarationContext* context) override;
-
- antlrcpp::Any visitBuiltinDeclaration(
- TorqueParser::BuiltinDeclarationContext* context) override;
-
- antlrcpp::Any visitExternalMacro(
- TorqueParser::ExternalMacroContext* context) override;
-
- antlrcpp::Any visitExternalBuiltin(
- TorqueParser::ExternalBuiltinContext* context) override;
-
- antlrcpp::Any visitExternalRuntime(
- TorqueParser::ExternalRuntimeContext* context) override;
-
- antlrcpp::Any visitConstDeclaration(
- TorqueParser::ConstDeclarationContext* context) override;
-
- antlrcpp::Any visitGenericSpecialization(
- TorqueParser::GenericSpecializationContext* context) override;
-
- antlrcpp::Any visitExternConstDeclaration(
- TorqueParser::ExternConstDeclarationContext* context) override;
-
- antlrcpp::Any visitTypeDeclaration(
- TorqueParser::TypeDeclarationContext* context) override;
-
- antlrcpp::Any visitTypeAliasDeclaration(
- TorqueParser::TypeAliasDeclarationContext* context) override;
-
- antlrcpp::Any visitVariableDeclaration(
- TorqueParser::VariableDeclarationContext* context) override;
-
- antlrcpp::Any visitVariableDeclarationWithInitialization(
- TorqueParser::VariableDeclarationWithInitializationContext* context)
- override;
-
- antlrcpp::Any visitHelperCall(
- TorqueParser::HelperCallContext* context) override;
-
- antlrcpp::Any visitHelperCallStatement(
- TorqueParser::HelperCallStatementContext* context) override;
-
- antlrcpp::Any visitStructExpression(
- TorqueParser::StructExpressionContext* context) override;
-
- antlrcpp::Any visitConditionalExpression(
- TorqueParser::ConditionalExpressionContext* context) override;
-
- antlrcpp::Any visitLogicalORExpression(
- TorqueParser::LogicalORExpressionContext* context) override;
-
- antlrcpp::Any visitLogicalANDExpression(
- TorqueParser::LogicalANDExpressionContext* context) override;
-
- antlrcpp::Any visitBitwiseExpression(
- TorqueParser::BitwiseExpressionContext* context) override;
-
- antlrcpp::Any visitEqualityExpression(
- TorqueParser::EqualityExpressionContext* context) override;
-
- antlrcpp::Any visitRelationalExpression(
- TorqueParser::RelationalExpressionContext* context) override;
-
- antlrcpp::Any visitShiftExpression(
- TorqueParser::ShiftExpressionContext* context) override;
-
- antlrcpp::Any visitAdditiveExpression(
- TorqueParser::AdditiveExpressionContext* context) override;
-
- antlrcpp::Any visitMultiplicativeExpression(
- TorqueParser::MultiplicativeExpressionContext* context) override;
-
- antlrcpp::Any visitUnaryExpression(
- TorqueParser::UnaryExpressionContext* context) override;
-
- antlrcpp::Any visitLocationExpression(
- TorqueParser::LocationExpressionContext* locationExpression) override;
-
- antlrcpp::Any visitIncrementDecrement(
- TorqueParser::IncrementDecrementContext* context) override;
-
- antlrcpp::Any visitAssignment(
- TorqueParser::AssignmentContext* context) override;
-
- antlrcpp::Any visitFunctionPointerExpression(
- TorqueParser::FunctionPointerExpressionContext* context) override;
-
- antlrcpp::Any visitPrimaryExpression(
- TorqueParser::PrimaryExpressionContext* context) override;
-
- antlrcpp::Any visitTryLabelStatement(
- TorqueParser::TryLabelStatementContext* context) override;
-
- antlrcpp::Any visitStatementScope(
- TorqueParser::StatementScopeContext* context) override;
-
- antlrcpp::Any visitExpressionStatement(
- TorqueParser::ExpressionStatementContext* context) override;
-
- antlrcpp::Any visitReturnStatement(
- TorqueParser::ReturnStatementContext* context) override;
-
- antlrcpp::Any visitGotoStatement(
- TorqueParser::GotoStatementContext* context) override;
-
- antlrcpp::Any visitIfStatement(
- TorqueParser::IfStatementContext* context) override;
-
- antlrcpp::Any visitWhileLoop(
- TorqueParser::WhileLoopContext* context) override;
-
- antlrcpp::Any visitBreakStatement(
- TorqueParser::BreakStatementContext* context) override;
-
- antlrcpp::Any visitContinueStatement(
- TorqueParser::ContinueStatementContext* context) override;
-
- antlrcpp::Any visitForLoop(TorqueParser::ForLoopContext* context) override;
-
- antlrcpp::Any visitForOfLoop(
- TorqueParser::ForOfLoopContext* context) override;
-
- antlrcpp::Any visitDiagnosticStatement(
- TorqueParser::DiagnosticStatementContext* context) override;
-
- antlrcpp::Any visitStructDeclaration(
- TorqueParser::StructDeclarationContext* context) override;
-
- antlrcpp::Any aggregateResult(antlrcpp::Any aggregate,
- const antlrcpp::Any& nextResult) override {
- if (aggregate.isNull())
- return std::move(const_cast<antlrcpp::Any&>(nextResult));
- if (nextResult.isNull()) return aggregate;
- UNREACHABLE();
- return {};
- }
-
- template <class T>
- T* RegisterNode(T* node) {
- ast_.AddNode(std::unique_ptr<AstNode>(node));
- return node;
- }
-
- LabelAndTypesVector GetOptionalLabelAndTypeList(
- TorqueParser::OptionalLabelListContext* context);
- TypeExpression* GetType(TorqueParser::TypeContext* context);
- TypeExpression* GetOptionalType(TorqueParser::OptionalTypeContext* context);
- std::vector<TypeExpression*> GetTypeVector(
- TorqueParser::TypeListContext* type_list);
-
- ParameterList GetOptionalParameterList(
- TorqueParser::ParameterListContext* context);
-
- Statement* GetOptionalHelperBody(TorqueParser::HelperBodyContext* context);
-
- void visitSourceFile(SourceFileContext* context);
-
- SourcePosition Pos(antlr4::ParserRuleContext* context);
-
- Ast GetAst() && { return std::move(ast_); }
-
- private:
- Ast ast_;
- SourceId current_source_file_;
- SourceFileContext* source_file_context_;
-};
-
-} // namespace torque
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TORQUE_AST_GENERATOR_H_
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index 6af444a56b..d9bb71a663 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -11,22 +11,12 @@
#include <vector>
#include "src/base/optional.h"
-#include "src/torque/contextual.h"
+#include "src/torque/source-positions.h"
namespace v8 {
namespace internal {
namespace torque {
-enum class SourceId : int {};
-
-struct SourcePosition {
- SourceId source;
- int line;
- int column;
-};
-
-DECLARE_CONTEXTUAL_VARIABLE(CurrentSourcePosition, SourcePosition)
-
#define AST_EXPRESSION_NODE_KIND_LIST(V) \
V(CallExpression) \
V(StructExpression) \
@@ -39,7 +29,8 @@ DECLARE_CONTEXTUAL_VARIABLE(CurrentSourcePosition, SourcePosition)
V(FieldAccessExpression) \
V(ElementAccessExpression) \
V(AssignmentExpression) \
- V(IncrementDecrementExpression)
+ V(IncrementDecrementExpression) \
+ V(AssumeTypeImpossibleExpression)
#define AST_TYPE_EXPRESSION_NODE_KIND_LIST(V) \
V(BasicTypeExpression) \
@@ -98,7 +89,7 @@ struct AstNode {
#undef ENUM_ITEM
};
- AstNode(Kind k, SourcePosition p) : kind(k), pos(p) {}
+ AstNode(Kind kind, SourcePosition pos) : kind(kind), pos(pos) {}
virtual ~AstNode() {}
const Kind kind;
@@ -136,36 +127,38 @@ struct AstNodeClassCheck {
}
struct Expression : AstNode {
- Expression(Kind k, SourcePosition p) : AstNode(k, p) {}
+ Expression(Kind kind, SourcePosition pos) : AstNode(kind, pos) {}
DEFINE_AST_NODE_INNER_BOILERPLATE(Expression)
};
struct LocationExpression : Expression {
- LocationExpression(Kind k, SourcePosition p) : Expression(k, p) {}
+ LocationExpression(Kind kind, SourcePosition pos) : Expression(kind, pos) {}
DEFINE_AST_NODE_INNER_BOILERPLATE(LocationExpression)
};
struct TypeExpression : AstNode {
- TypeExpression(Kind k, SourcePosition p) : AstNode(k, p) {}
+ TypeExpression(Kind kind, SourcePosition pos) : AstNode(kind, pos) {}
DEFINE_AST_NODE_INNER_BOILERPLATE(TypeExpression)
};
struct Declaration : AstNode {
- Declaration(Kind k, SourcePosition p) : AstNode(k, p) {}
+ Declaration(Kind kind, SourcePosition pos) : AstNode(kind, pos) {}
DEFINE_AST_NODE_INNER_BOILERPLATE(Declaration)
};
struct Statement : AstNode {
- Statement(Kind k, SourcePosition p) : AstNode(k, p) {}
+ Statement(Kind kind, SourcePosition pos) : AstNode(kind, pos) {}
DEFINE_AST_NODE_INNER_BOILERPLATE(Statement)
};
class Module;
struct ModuleDeclaration : Declaration {
- ModuleDeclaration(AstNode::Kind kind, SourcePosition p,
- std::vector<Declaration*> d)
- : Declaration(kind, p), module(nullptr), declarations(std::move(d)) {}
+ ModuleDeclaration(AstNode::Kind kind, SourcePosition pos,
+ std::vector<Declaration*> declarations)
+ : Declaration(kind, pos),
+ module(nullptr),
+ declarations(std::move(declarations)) {}
virtual bool IsDefault() const = 0;
// virtual std::string GetName() const = 0;
void SetModule(Module* m) { module = m; }
@@ -176,48 +169,24 @@ struct ModuleDeclaration : Declaration {
struct DefaultModuleDeclaration : ModuleDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(DefaultModuleDeclaration)
- DefaultModuleDeclaration(SourcePosition p, std::vector<Declaration*> d)
- : ModuleDeclaration(kKind, p, d) {}
+ DefaultModuleDeclaration(SourcePosition pos,
+ std::vector<Declaration*> declarations)
+ : ModuleDeclaration(kKind, pos, declarations) {}
bool IsDefault() const override { return true; }
};
struct ExplicitModuleDeclaration : ModuleDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ExplicitModuleDeclaration)
- ExplicitModuleDeclaration(SourcePosition p, std::string n,
- std::vector<Declaration*> d)
- : ModuleDeclaration(kKind, p, d), name(std::move(n)) {}
+ ExplicitModuleDeclaration(SourcePosition pos, std::string name,
+ std::vector<Declaration*> declarations)
+ : ModuleDeclaration(kKind, pos, declarations), name(std::move(name)) {}
bool IsDefault() const override { return false; }
std::string name;
};
-class SourceFileMap : public ContextualClass<SourceFileMap> {
- public:
- SourceFileMap() {}
- const std::string& GetSource(SourceId id) const {
- return sources_[static_cast<int>(id)];
- }
-
- std::string PositionAsString(SourcePosition pos) {
- return GetSource(pos.source) + ":" + std::to_string(pos.line) + ":" +
- std::to_string(pos.column);
- }
-
- SourceId AddSource(std::string path) {
- sources_.push_back(std::move(path));
- return static_cast<SourceId>(sources_.size() - 1);
- }
-
- private:
- std::vector<std::string> sources_;
-};
-
-inline std::string PositionAsString(SourcePosition pos) {
- return SourceFileMap::Get().PositionAsString(pos);
-}
-
class Ast {
public:
- Ast() : default_module_{SourcePosition(), {}} {}
+ Ast() : default_module_{SourcePosition{CurrentSourceFile::Get(), 0, 0}, {}} {}
std::vector<Declaration*>& declarations() {
return default_module_.declarations;
@@ -225,8 +194,11 @@ class Ast {
const std::vector<Declaration*>& declarations() const {
return default_module_.declarations;
}
- void AddNode(std::unique_ptr<AstNode> node) {
- nodes_.emplace_back(std::move(node));
+ template <class T>
+ T* AddNode(std::unique_ptr<T> node) {
+ T* result = node.get();
+ nodes_.push_back(std::move(node));
+ return result;
}
DefaultModuleDeclaration* default_module() { return &default_module_; }
@@ -237,10 +209,10 @@ class Ast {
struct IdentifierExpression : LocationExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(IdentifierExpression)
- IdentifierExpression(SourcePosition p, std::string n,
- std::vector<TypeExpression*> args)
- : LocationExpression(kKind, p),
- name(std::move(n)),
+ IdentifierExpression(SourcePosition pos, std::string name,
+ std::vector<TypeExpression*> args = {})
+ : LocationExpression(kKind, pos),
+ name(std::move(name)),
generic_arguments(std::move(args)) {}
std::string name;
std::vector<TypeExpression*> generic_arguments;
@@ -248,14 +220,15 @@ struct IdentifierExpression : LocationExpression {
struct CallExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(CallExpression)
- CallExpression(SourcePosition p, std::string c, bool o,
- std::vector<TypeExpression*> ga, std::vector<Expression*> a,
- std::vector<std::string> l)
- : Expression(kKind, p),
- callee(p, std::move(c), std::move(ga)),
- is_operator(o),
- arguments(std::move(a)),
- labels(l) {}
+ CallExpression(SourcePosition pos, std::string callee, bool is_operator,
+ std::vector<TypeExpression*> generic_arguments,
+ std::vector<Expression*> arguments,
+ std::vector<std::string> labels)
+ : Expression(kKind, pos),
+ callee(pos, std::move(callee), std::move(generic_arguments)),
+ is_operator(is_operator),
+ arguments(std::move(arguments)),
+ labels(labels) {}
IdentifierExpression callee;
bool is_operator;
std::vector<Expression*> arguments;
@@ -264,33 +237,39 @@ struct CallExpression : Expression {
struct StructExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(StructExpression)
- StructExpression(SourcePosition p, std::string n, std::vector<Expression*> e)
- : Expression(kKind, p), name(n), expressions(std::move(e)) {}
+ StructExpression(SourcePosition pos, std::string name,
+ std::vector<Expression*> expressions)
+ : Expression(kKind, pos),
+ name(name),
+ expressions(std::move(expressions)) {}
std::string name;
std::vector<Expression*> expressions;
};
struct LogicalOrExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(LogicalOrExpression)
- LogicalOrExpression(SourcePosition p, Expression* l, Expression* r)
- : Expression(kKind, p), left(l), right(r) {}
+ LogicalOrExpression(SourcePosition pos, Expression* left, Expression* right)
+ : Expression(kKind, pos), left(left), right(right) {}
Expression* left;
Expression* right;
};
struct LogicalAndExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(LogicalAndExpression)
- LogicalAndExpression(SourcePosition p, Expression* l, Expression* r)
- : Expression(kKind, p), left(l), right(r) {}
+ LogicalAndExpression(SourcePosition pos, Expression* left, Expression* right)
+ : Expression(kKind, pos), left(left), right(right) {}
Expression* left;
Expression* right;
};
struct ConditionalExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ConditionalExpression)
- ConditionalExpression(SourcePosition p, Expression* c, Expression* t,
- Expression* f)
- : Expression(kKind, p), condition(c), if_true(t), if_false(f) {}
+ ConditionalExpression(SourcePosition pos, Expression* condition,
+ Expression* if_true, Expression* if_false)
+ : Expression(kKind, pos),
+ condition(condition),
+ if_true(if_true),
+ if_false(if_false) {}
Expression* condition;
Expression* if_true;
Expression* if_false;
@@ -298,39 +277,46 @@ struct ConditionalExpression : Expression {
struct StringLiteralExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(StringLiteralExpression)
- StringLiteralExpression(SourcePosition p, std::string l)
- : Expression(kKind, p), literal(std::move(l)) {}
+ StringLiteralExpression(SourcePosition pos, std::string literal)
+ : Expression(kKind, pos), literal(std::move(literal)) {}
std::string literal;
};
struct NumberLiteralExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(NumberLiteralExpression)
- NumberLiteralExpression(SourcePosition p, std::string n)
- : Expression(kKind, p), number(std::move(n)) {}
+ NumberLiteralExpression(SourcePosition pos, std::string name)
+ : Expression(kKind, pos), number(std::move(name)) {}
std::string number;
};
struct ElementAccessExpression : LocationExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ElementAccessExpression)
- ElementAccessExpression(SourcePosition p, Expression* a, Expression* i)
- : LocationExpression(kKind, p), array(a), index(i) {}
+ ElementAccessExpression(SourcePosition pos, Expression* array,
+ Expression* index)
+ : LocationExpression(kKind, pos), array(array), index(index) {}
Expression* array;
Expression* index;
};
struct FieldAccessExpression : LocationExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(FieldAccessExpression)
- FieldAccessExpression(SourcePosition p, Expression* o, std::string f)
- : LocationExpression(kKind, p), object(o), field(std::move(f)) {}
+ FieldAccessExpression(SourcePosition pos, Expression* object,
+ std::string field)
+ : LocationExpression(kKind, pos),
+ object(object),
+ field(std::move(field)) {}
Expression* object;
std::string field;
};
struct AssignmentExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(AssignmentExpression)
- AssignmentExpression(SourcePosition p, LocationExpression* l,
- base::Optional<std::string> o, Expression* v)
- : Expression(kKind, p), location(l), op(std::move(o)), value(v) {}
+ AssignmentExpression(SourcePosition pos, LocationExpression* location,
+ base::Optional<std::string> op, Expression* value)
+ : Expression(kKind, pos),
+ location(location),
+ op(std::move(op)),
+ value(value) {}
LocationExpression* location;
base::Optional<std::string> op;
Expression* value;
@@ -340,34 +326,56 @@ enum class IncrementDecrementOperator { kIncrement, kDecrement };
struct IncrementDecrementExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(IncrementDecrementExpression)
- IncrementDecrementExpression(SourcePosition p, LocationExpression* l,
- IncrementDecrementOperator o, bool pf)
- : Expression(kKind, p), location(l), op(o), postfix(pf) {}
+ IncrementDecrementExpression(SourcePosition pos, LocationExpression* location,
+ IncrementDecrementOperator op, bool postfix)
+ : Expression(kKind, pos), location(location), op(op), postfix(postfix) {}
LocationExpression* location;
IncrementDecrementOperator op;
bool postfix;
};
+// This expression is only used in the desugaring of typeswitch, and it allows
+// to bake in the static information that certain types are impossible at a
+// certain position in the control flow.
+// The result type is the type of {expression} minus the provided type.
+struct AssumeTypeImpossibleExpression : Expression {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(AssumeTypeImpossibleExpression)
+ AssumeTypeImpossibleExpression(SourcePosition pos,
+ TypeExpression* excluded_type,
+ Expression* expression)
+ : Expression(kKind, pos),
+ excluded_type(excluded_type),
+ expression(expression) {}
+ TypeExpression* excluded_type;
+ Expression* expression;
+};
+
struct ParameterList {
std::vector<std::string> names;
std::vector<TypeExpression*> types;
bool has_varargs;
std::string arguments_variable;
+
+ static ParameterList Empty() { return ParameterList{{}, {}, false, ""}; }
};
struct BasicTypeExpression : TypeExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(BasicTypeExpression)
- BasicTypeExpression(SourcePosition p, bool c, std::string n)
- : TypeExpression(kKind, p), is_constexpr(c), name(n) {}
+ BasicTypeExpression(SourcePosition pos, bool is_constexpr, std::string name)
+ : TypeExpression(kKind, pos), is_constexpr(is_constexpr), name(name) {}
bool is_constexpr;
std::string name;
};
struct FunctionTypeExpression : TypeExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(FunctionTypeExpression)
- FunctionTypeExpression(SourcePosition p, ParameterList pl, TypeExpression* r)
- : TypeExpression(kKind, p), parameters(pl), return_type(r) {}
- ParameterList parameters;
+ FunctionTypeExpression(SourcePosition pos,
+ std::vector<TypeExpression*> parameters,
+ TypeExpression* return_type)
+ : TypeExpression(kKind, pos),
+ parameters(parameters),
+ return_type(return_type) {}
+ std::vector<TypeExpression*> parameters;
TypeExpression* return_type;
};
@@ -381,20 +389,20 @@ struct UnionTypeExpression : TypeExpression {
struct ExpressionStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ExpressionStatement)
- ExpressionStatement(SourcePosition p, Expression* e)
- : Statement(kKind, p), expression(e) {}
+ ExpressionStatement(SourcePosition pos, Expression* expression)
+ : Statement(kKind, pos), expression(expression) {}
Expression* expression;
};
struct IfStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(IfStatement)
- IfStatement(SourcePosition p, Expression* c, bool cexpr, Statement* t,
- base::Optional<Statement*> f)
- : Statement(kKind, p),
- condition(c),
- is_constexpr(cexpr),
- if_true(t),
- if_false(f) {}
+ IfStatement(SourcePosition pos, bool is_constexpr, Expression* condition,
+ Statement* if_true, base::Optional<Statement*> if_false)
+ : Statement(kKind, pos),
+ condition(condition),
+ is_constexpr(is_constexpr),
+ if_true(if_true),
+ if_false(if_false) {}
Expression* condition;
bool is_constexpr;
Statement* if_true;
@@ -403,23 +411,26 @@ struct IfStatement : Statement {
struct WhileStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(WhileStatement)
- WhileStatement(SourcePosition p, Expression* c, Statement* b)
- : Statement(kKind, p), condition(c), body(b) {}
+ WhileStatement(SourcePosition pos, Expression* condition, Statement* body)
+ : Statement(kKind, pos), condition(condition), body(body) {}
Expression* condition;
Statement* body;
};
struct ReturnStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ReturnStatement)
- ReturnStatement(SourcePosition p, base::Optional<Expression*> v)
- : Statement(kKind, p), value(v) {}
+ ReturnStatement(SourcePosition pos, base::Optional<Expression*> value)
+ : Statement(kKind, pos), value(value) {}
base::Optional<Expression*> value;
};
struct DebugStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(DebugStatement)
- DebugStatement(SourcePosition p, const std::string& r, bool n)
- : Statement(kKind, p), reason(r), never_continues(n) {}
+ DebugStatement(SourcePosition pos, const std::string& reason,
+ bool never_continues)
+ : Statement(kKind, pos),
+ reason(reason),
+ never_continues(never_continues) {}
std::string reason;
bool never_continues;
};
@@ -439,71 +450,86 @@ struct AssertStatement : Statement {
struct TailCallStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(TailCallStatement)
- TailCallStatement(SourcePosition p, CallExpression* c)
- : Statement(kKind, p), call(c) {}
+ TailCallStatement(SourcePosition pos, CallExpression* call)
+ : Statement(kKind, pos), call(call) {}
CallExpression* call;
};
struct VarDeclarationStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(VarDeclarationStatement)
- VarDeclarationStatement(SourcePosition p, bool c, std::string n,
- TypeExpression* t, base::Optional<Expression*> i)
- : Statement(kKind, p),
- const_qualified(c),
- name(std::move(n)),
- type(t),
- initializer(i) {}
+ VarDeclarationStatement(
+ SourcePosition pos, bool const_qualified, std::string name,
+ base::Optional<TypeExpression*> type,
+ base::Optional<Expression*> initializer = base::nullopt)
+ : Statement(kKind, pos),
+ const_qualified(const_qualified),
+ name(std::move(name)),
+ type(type),
+ initializer(initializer) {}
bool const_qualified;
std::string name;
- TypeExpression* type;
+ base::Optional<TypeExpression*> type;
base::Optional<Expression*> initializer;
};
struct BreakStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(BreakStatement)
- explicit BreakStatement(SourcePosition p) : Statement(kKind, p) {}
+ explicit BreakStatement(SourcePosition pos) : Statement(kKind, pos) {}
};
struct ContinueStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ContinueStatement)
- explicit ContinueStatement(SourcePosition p) : Statement(kKind, p) {}
+ explicit ContinueStatement(SourcePosition pos) : Statement(kKind, pos) {}
};
struct GotoStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(GotoStatement)
- GotoStatement(SourcePosition p, std::string l,
- const std::vector<Expression*>& a)
- : Statement(kKind, p), label(std::move(l)), arguments(std::move(a)) {}
+ GotoStatement(SourcePosition pos, std::string label,
+ const std::vector<Expression*>& arguments)
+ : Statement(kKind, pos),
+ label(std::move(label)),
+ arguments(std::move(arguments)) {}
std::string label;
std::vector<Expression*> arguments;
};
struct ForLoopStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ForLoopStatement)
- ForLoopStatement(SourcePosition p, base::Optional<VarDeclarationStatement*> d,
- Expression* t, Expression* a, Statement* b)
- : Statement(kKind, p),
- var_declaration(d),
- test(std::move(t)),
- action(std::move(a)),
- body(std::move(b)) {}
+ ForLoopStatement(SourcePosition pos, base::Optional<Statement*> declaration,
+ base::Optional<Expression*> test,
+ base::Optional<Expression*> action, Statement* body)
+ : Statement(kKind, pos),
+ var_declaration(),
+ test(std::move(test)),
+ action(std::move(action)),
+ body(std::move(body)) {
+ if (declaration)
+ var_declaration = VarDeclarationStatement::cast(*declaration);
+ }
base::Optional<VarDeclarationStatement*> var_declaration;
- Expression* test;
- Expression* action;
+ base::Optional<Expression*> test;
+ base::Optional<Expression*> action;
Statement* body;
};
+struct RangeExpression {
+ base::Optional<Expression*> begin;
+ base::Optional<Expression*> end;
+};
+
struct ForOfLoopStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ForOfLoopStatement)
- ForOfLoopStatement(SourcePosition p, VarDeclarationStatement* d,
- Expression* i, base::Optional<Expression*> bg,
- base::Optional<Expression*> e, Statement* bd)
- : Statement(kKind, p),
- var_declaration(d),
- iterable(std::move(i)),
- begin(std::move(bg)),
- end(std::move(e)),
- body(std::move(bd)) {}
+ ForOfLoopStatement(SourcePosition pos, Statement* decl, Expression* iterable,
+ base::Optional<RangeExpression> range, Statement* body)
+ : Statement(kKind, pos),
+ var_declaration(VarDeclarationStatement::cast(decl)),
+ iterable(iterable),
+ body(body) {
+ if (range) {
+ begin = range->begin;
+ end = range->end;
+ }
+ }
VarDeclarationStatement* var_declaration;
Expression* iterable;
base::Optional<Expression*> begin;
@@ -513,12 +539,12 @@ struct ForOfLoopStatement : Statement {
struct LabelBlock : AstNode {
DEFINE_AST_NODE_LEAF_BOILERPLATE(LabelBlock)
- LabelBlock(SourcePosition p, const std::string& l,
- const ParameterList& p_list, Statement* b)
- : AstNode(kKind, p),
- label(std::move(l)),
- parameters(p_list),
- body(std::move(b)) {}
+ LabelBlock(SourcePosition pos, const std::string& label,
+ const ParameterList& parameters, Statement* body)
+ : AstNode(kKind, pos),
+ label(std::move(label)),
+ parameters(parameters),
+ body(std::move(body)) {}
std::string label;
ParameterList parameters;
Statement* body;
@@ -526,28 +552,37 @@ struct LabelBlock : AstNode {
struct TryLabelStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(TryLabelStatement)
- TryLabelStatement(SourcePosition p, Statement* t)
- : Statement(kKind, p), try_block(std::move(t)) {}
+ TryLabelStatement(SourcePosition pos, Statement* try_block,
+ std::vector<LabelBlock*> label_blocks)
+ : Statement(kKind, pos),
+ try_block(try_block),
+ label_blocks(std::move(label_blocks)) {}
Statement* try_block;
std::vector<LabelBlock*> label_blocks;
};
struct BlockStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(BlockStatement)
- BlockStatement(SourcePosition p, bool d, std::vector<Statement*> s)
- : Statement(kKind, p), deferred(d), statements(std::move(s)) {}
+ explicit BlockStatement(SourcePosition pos, bool deferred = false,
+ std::vector<Statement*> statements = {})
+ : Statement(kKind, pos),
+ deferred(deferred),
+ statements(std::move(statements)) {}
bool deferred;
std::vector<Statement*> statements;
};
struct TypeDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(TypeDeclaration)
- TypeDeclaration(SourcePosition p, std::string n,
- base::Optional<std::string> e, base::Optional<std::string> g)
- : Declaration(kKind, p),
- name(std::move(n)),
- extends(std::move(e)),
- generates(std::move(g)) {}
+ TypeDeclaration(SourcePosition pos, std::string name,
+ base::Optional<std::string> extends,
+ base::Optional<std::string> generates,
+ base::Optional<std::string> constexpr_generates)
+ : Declaration(kKind, pos),
+ name(std::move(name)),
+ extends(std::move(extends)),
+ generates(std::move(generates)),
+ constexpr_generates(std::move(constexpr_generates)) {}
std::string name;
base::Optional<std::string> extends;
base::Optional<std::string> generates;
@@ -556,13 +591,14 @@ struct TypeDeclaration : Declaration {
struct TypeAliasDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(TypeAliasDeclaration)
- TypeAliasDeclaration(SourcePosition p, std::string n, TypeExpression* t)
- : Declaration(kKind, p), name(std::move(n)), type(t) {}
+ TypeAliasDeclaration(SourcePosition pos, std::string name,
+ TypeExpression* type)
+ : Declaration(kKind, pos), name(std::move(name)), type(type) {}
std::string name;
TypeExpression* type;
};
-struct FieldNameAndType {
+struct NameAndTypeExpression {
std::string name;
TypeExpression* type;
};
@@ -581,12 +617,12 @@ struct CallableNodeSignature {
};
struct CallableNode : AstNode {
- CallableNode(AstNode::Kind kind, SourcePosition p, std::string n,
- ParameterList pl, TypeExpression* r,
- const LabelAndTypesVector& l)
- : AstNode(kind, p),
- name(std::move(n)),
- signature(new CallableNodeSignature{pl, r, l}) {}
+ CallableNode(AstNode::Kind kind, SourcePosition pos, std::string name,
+ ParameterList parameters, TypeExpression* return_type,
+ const LabelAndTypesVector& labels)
+ : AstNode(kind, pos),
+ name(std::move(name)),
+ signature(new CallableNodeSignature{parameters, return_type, labels}) {}
DEFINE_AST_NODE_INNER_BOILERPLATE(CallableNode)
std::string name;
std::unique_ptr<CallableNodeSignature> signature;
@@ -594,61 +630,79 @@ struct CallableNode : AstNode {
struct MacroDeclaration : CallableNode {
DEFINE_AST_NODE_INNER_BOILERPLATE(MacroDeclaration)
- MacroDeclaration(AstNode::Kind kind, SourcePosition p, std::string n,
- base::Optional<std::string> o, ParameterList pl,
- TypeExpression* r, const LabelAndTypesVector& l)
- : CallableNode(kind, p, n, pl, r, l), op(std::move(o)) {}
+ MacroDeclaration(AstNode::Kind kind, SourcePosition pos, std::string name,
+ base::Optional<std::string> op, ParameterList parameters,
+ TypeExpression* return_type,
+ const LabelAndTypesVector& labels)
+ : CallableNode(kind, pos, name, parameters, return_type, labels),
+ op(std::move(op)) {}
base::Optional<std::string> op;
};
struct ExternalMacroDeclaration : MacroDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ExternalMacroDeclaration)
- ExternalMacroDeclaration(SourcePosition p, std::string n,
- base::Optional<std::string> o, ParameterList pl,
- TypeExpression* r, const LabelAndTypesVector& l)
- : MacroDeclaration(kKind, p, n, o, pl, r, l) {}
+ ExternalMacroDeclaration(SourcePosition pos, std::string name,
+ base::Optional<std::string> op,
+ ParameterList parameters,
+ TypeExpression* return_type,
+ const LabelAndTypesVector& labels)
+ : MacroDeclaration(kKind, pos, name, op, parameters, return_type,
+ labels) {}
};
struct TorqueMacroDeclaration : MacroDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(TorqueMacroDeclaration)
- TorqueMacroDeclaration(SourcePosition p, std::string n, ParameterList pl,
- TypeExpression* r, const LabelAndTypesVector& l)
- : MacroDeclaration(kKind, p, n, {}, pl, r, l) {}
+ TorqueMacroDeclaration(SourcePosition pos, std::string name,
+ base::Optional<std::string> op,
+ ParameterList parameters, TypeExpression* return_type,
+ const LabelAndTypesVector& labels)
+ : MacroDeclaration(kKind, pos, name, op, parameters, return_type,
+ labels) {}
};
struct BuiltinDeclaration : CallableNode {
- BuiltinDeclaration(AstNode::Kind kind, SourcePosition p, bool j,
- std::string n, ParameterList pl, TypeExpression* r)
- : CallableNode(kind, p, n, pl, r, {}), javascript_linkage(j) {}
+ BuiltinDeclaration(AstNode::Kind kind, SourcePosition pos,
+ bool javascript_linkage, std::string name,
+ ParameterList parameters, TypeExpression* return_type)
+ : CallableNode(kind, pos, name, parameters, return_type, {}),
+ javascript_linkage(javascript_linkage) {}
bool javascript_linkage;
};
struct ExternalBuiltinDeclaration : BuiltinDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ExternalBuiltinDeclaration)
- ExternalBuiltinDeclaration(SourcePosition p, bool j, std::string n,
- ParameterList pl, TypeExpression* r)
- : BuiltinDeclaration(kKind, p, j, n, pl, r) {}
+ ExternalBuiltinDeclaration(SourcePosition pos, bool javascript_linkage,
+ std::string name, ParameterList parameters,
+ TypeExpression* return_type)
+ : BuiltinDeclaration(kKind, pos, javascript_linkage, name, parameters,
+ return_type) {}
};
struct TorqueBuiltinDeclaration : BuiltinDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(TorqueBuiltinDeclaration)
- TorqueBuiltinDeclaration(SourcePosition p, bool j, std::string n,
- ParameterList pl, TypeExpression* r)
- : BuiltinDeclaration(kKind, p, j, n, pl, r) {}
+ TorqueBuiltinDeclaration(SourcePosition pos, bool javascript_linkage,
+ std::string name, ParameterList parameters,
+ TypeExpression* return_type)
+ : BuiltinDeclaration(kKind, pos, javascript_linkage, name, parameters,
+ return_type) {}
};
struct ExternalRuntimeDeclaration : CallableNode {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ExternalRuntimeDeclaration)
- ExternalRuntimeDeclaration(SourcePosition p, std::string n, ParameterList pl,
- TypeExpression* r)
- : CallableNode(kKind, p, n, pl, r, {}) {}
+ ExternalRuntimeDeclaration(SourcePosition pos, std::string name,
+ ParameterList parameters,
+ TypeExpression* return_type)
+ : CallableNode(kKind, pos, name, parameters, return_type, {}) {}
};
struct ConstDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ConstDeclaration)
- ConstDeclaration(SourcePosition p, std::string n, TypeExpression* r,
- Expression* e)
- : Declaration(kKind, p), name(std::move(n)), type(r), expression(e) {}
+ ConstDeclaration(SourcePosition pos, std::string name, TypeExpression* type,
+ Expression* expression)
+ : Declaration(kKind, pos),
+ name(std::move(name)),
+ type(type),
+ expression(expression) {}
std::string name;
TypeExpression* type;
Expression* expression;
@@ -656,21 +710,22 @@ struct ConstDeclaration : Declaration {
struct StandardDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(StandardDeclaration)
- StandardDeclaration(SourcePosition p, CallableNode* c, Statement* b)
- : Declaration(kKind, p), callable(c), body(b) {}
+ StandardDeclaration(SourcePosition pos, CallableNode* callable,
+ Statement* body)
+ : Declaration(kKind, pos), callable(callable), body(body) {}
CallableNode* callable;
Statement* body;
};
struct GenericDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(GenericDeclaration)
- GenericDeclaration(SourcePosition p, CallableNode* c,
- std::vector<std::string> gp,
- base::Optional<Statement*> b = base::nullopt)
- : Declaration(kKind, p),
- callable(c),
- generic_parameters(std::move(gp)),
- body(b) {}
+ GenericDeclaration(SourcePosition pos, CallableNode* callable,
+ std::vector<std::string> generic_parameters,
+ base::Optional<Statement*> body = base::nullopt)
+ : Declaration(kKind, pos),
+ callable(callable),
+ generic_parameters(std::move(generic_parameters)),
+ body(body) {}
CallableNode* callable;
std::vector<std::string> generic_parameters;
base::Optional<Statement*> body;
@@ -678,15 +733,16 @@ struct GenericDeclaration : Declaration {
struct SpecializationDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(SpecializationDeclaration)
- SpecializationDeclaration(SourcePosition p, std::string n, bool e,
- std::vector<TypeExpression*> gp, ParameterList pl,
- TypeExpression* r, LabelAndTypesVector l,
- Statement* b)
- : Declaration(kKind, p),
- name(std::move(n)),
- external(e),
- generic_parameters(gp),
- signature(new CallableNodeSignature{pl, r, l}),
+ SpecializationDeclaration(SourcePosition pos, std::string name,
+ std::vector<TypeExpression*> generic_parameters,
+ ParameterList parameters,
+ TypeExpression* return_type,
+ LabelAndTypesVector labels, Statement* b)
+ : Declaration(kKind, pos),
+ name(std::move(name)),
+ external(false),
+ generic_parameters(generic_parameters),
+ signature(new CallableNodeSignature{parameters, return_type, labels}),
body(b) {}
std::string name;
bool external;
@@ -697,12 +753,12 @@ struct SpecializationDeclaration : Declaration {
struct ExternConstDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ExternConstDeclaration)
- ExternConstDeclaration(SourcePosition p, std::string n, TypeExpression* t,
- std::string l)
- : Declaration(kKind, p),
- name(std::move(n)),
- type(t),
- literal(std::move(l)) {}
+ ExternConstDeclaration(SourcePosition pos, std::string name,
+ TypeExpression* type, std::string literal)
+ : Declaration(kKind, pos),
+ name(std::move(name)),
+ type(type),
+ literal(std::move(literal)) {}
std::string name;
TypeExpression* type;
std::string literal;
@@ -710,10 +766,13 @@ struct ExternConstDeclaration : Declaration {
struct StructDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(StructDeclaration)
- StructDeclaration(SourcePosition p, std::string n)
- : Declaration(kKind, p), name(std::move(n)) {}
+ StructDeclaration(SourcePosition pos, std::string name,
+ std::vector<NameAndTypeExpression> fields)
+ : Declaration(kKind, pos),
+ name(std::move(name)),
+ fields(std::move(fields)) {}
std::string name;
- std::vector<FieldNameAndType> fields;
+ std::vector<NameAndTypeExpression> fields;
};
#define ENUM_ITEM(name) \
diff --git a/deps/v8/src/torque/contextual.h b/deps/v8/src/torque/contextual.h
index 9cd56a2ed9..8fb02e1072 100644
--- a/deps/v8/src/torque/contextual.h
+++ b/deps/v8/src/torque/contextual.h
@@ -27,6 +27,8 @@ namespace torque {
template <class Derived, class VarType>
class ContextualVariable {
public:
+ using VariableType = VarType;
+
// A {Scope} contains a new object of type {VarType} and gives
// ContextualVariable::Get() access to it. Upon destruction, the contextual
// variable is restored to the state before the {Scope} was created. Scopes
@@ -36,13 +38,13 @@ class ContextualVariable {
public:
template <class... Args>
explicit Scope(Args&&... args)
- : current_(std::forward<Args>(args)...), previous_(top_) {
- top_ = &current_;
+ : current_(std::forward<Args>(args)...), previous_(Top()) {
+ Top() = &current_;
}
~Scope() {
// Ensure stack discipline.
- DCHECK_EQ(&current_, top_);
- top_ = previous_;
+ DCHECK_EQ(&current_, Top());
+ Top() = previous_;
}
private:
@@ -59,22 +61,27 @@ class ContextualVariable {
// Access the most recent active {Scope}. There has to be an active {Scope}
// for this contextual variable.
static VarType& Get() {
- DCHECK_NOT_NULL(top_);
- return *top_;
+ DCHECK_NOT_NULL(Top());
+ return *Top();
}
private:
- static thread_local VarType* top_;
+ V8_EXPORT_PRIVATE static VarType*& Top();
};
-template <class Derived, class VarType>
-thread_local VarType* ContextualVariable<Derived, VarType>::top_ = nullptr;
-
// Usage: DECLARE_CONTEXTUAL_VARIABLE(VarName, VarType)
#define DECLARE_CONTEXTUAL_VARIABLE(VarName, ...) \
struct VarName \
: v8::internal::torque::ContextualVariable<VarName, __VA_ARGS__> {};
+#define DEFINE_CONTEXTUAL_VARIABLE(VarName) \
+ template <> \
+ V8_EXPORT_PRIVATE VarName::VariableType*& \
+ ContextualVariable<VarName, VarName::VariableType>::Top() { \
+ static thread_local VarName::VariableType* top = nullptr; \
+ return top; \
+ }
+
// By inheriting from {ContextualClass} a class can become a contextual variable
// of itself, which is very similar to a singleton.
template <class T>
diff --git a/deps/v8/src/torque/declarable.cc b/deps/v8/src/torque/declarable.cc
index 122f7de9b4..bf0b9a9d7a 100644
--- a/deps/v8/src/torque/declarable.cc
+++ b/deps/v8/src/torque/declarable.cc
@@ -81,6 +81,8 @@ std::ostream& operator<<(std::ostream& os, const Generic& g) {
return os;
}
+size_t Label::next_id_ = 0;
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/declarable.h b/deps/v8/src/torque/declarable.h
index b8abcca801..315ff8f636 100644
--- a/deps/v8/src/torque/declarable.h
+++ b/deps/v8/src/torque/declarable.h
@@ -20,6 +20,7 @@ namespace torque {
class Scope;
class ScopeChain;
+class Generic;
class Declarable {
public:
@@ -223,35 +224,37 @@ class Callable : public Declarable {
}
void IncrementReturns() { ++returns_; }
bool HasReturns() const { return returns_; }
+ base::Optional<Generic*> generic() const { return generic_; }
protected:
Callable(Declarable::Kind kind, const std::string& name,
- const Signature& signature)
- : Declarable(kind), name_(name), signature_(signature), returns_(0) {}
+ const Signature& signature, base::Optional<Generic*> generic)
+ : Declarable(kind),
+ name_(name),
+ signature_(signature),
+ returns_(0),
+ generic_(generic) {}
private:
std::string name_;
Signature signature_;
size_t returns_;
+ base::Optional<Generic*> generic_;
};
class Macro : public Callable {
public:
DECLARE_DECLARABLE_BOILERPLATE(Macro, macro);
- protected:
- Macro(Declarable::Kind type, const std::string& name,
- const Signature& signature)
- : Callable(type, name, signature) {
+ private:
+ friend class Declarations;
+ Macro(const std::string& name, const Signature& signature,
+ base::Optional<Generic*> generic)
+ : Callable(Declarable::kMacro, name, signature, generic) {
if (signature.parameter_types.var_args) {
ReportError("Varargs are not supported for macros.");
}
}
-
- private:
- friend class Declarations;
- Macro(const std::string& name, const Signature& signature)
- : Macro(Declarable::kMacro, name, signature) {}
};
class MacroList : public Declarable {
@@ -283,8 +286,8 @@ class Builtin : public Callable {
private:
friend class Declarations;
Builtin(const std::string& name, Builtin::Kind kind, bool external,
- const Signature& signature)
- : Callable(Declarable::kBuiltin, name, signature),
+ const Signature& signature, base::Optional<Generic*> generic)
+ : Callable(Declarable::kBuiltin, name, signature, generic),
kind_(kind),
external_(external) {}
@@ -298,8 +301,9 @@ class RuntimeFunction : public Callable {
private:
friend class Declarations;
- RuntimeFunction(const std::string& name, const Signature& signature)
- : Callable(Declarable::kRuntimeFunction, name, signature) {}
+ RuntimeFunction(const std::string& name, const Signature& signature,
+ base::Optional<Generic*> generic)
+ : Callable(Declarable::kRuntimeFunction, name, signature, generic) {}
};
class Generic : public Declarable {
diff --git a/deps/v8/src/torque/declaration-visitor.cc b/deps/v8/src/torque/declaration-visitor.cc
index b83dbb3bc3..2f3a1fa869 100644
--- a/deps/v8/src/torque/declaration-visitor.cc
+++ b/deps/v8/src/torque/declaration-visitor.cc
@@ -299,15 +299,22 @@ Parameter* DeclarationVisitor::DeclareParameter(const std::string& name,
void DeclarationVisitor::Visit(VarDeclarationStatement* stmt) {
std::string variable_name = stmt->name;
- const Type* type = declarations()->GetType(stmt->type);
- if (type->IsConstexpr() && !stmt->const_qualified) {
- ReportError(
- "cannot declare variable with constexpr type. Use 'const' instead.");
- }
- DeclareVariable(variable_name, type, stmt->const_qualified);
- if (global_context_.verbose()) {
- std::cout << "declared variable " << variable_name << " with type " << *type
- << "\n";
+ if (!stmt->const_qualified) {
+ if (!stmt->type) {
+ ReportError(
+ "variable declaration is missing type. Only 'const' bindings can "
+ "infer the type.");
+ }
+ const Type* type = declarations()->GetType(*stmt->type);
+ if (type->IsConstexpr()) {
+ ReportError(
+ "cannot declare variable with constexpr type. Use 'const' instead.");
+ }
+ DeclareVariable(variable_name, type, stmt->const_qualified);
+ if (global_context_.verbose()) {
+ std::cout << "declared variable " << variable_name << " with type "
+ << *type << "\n";
+ }
}
// const qualified variables are required to be initialized properly.
@@ -434,9 +441,15 @@ void DeclarationVisitor::Visit(ForLoopStatement* stmt) {
Declarations::NodeScopeActivator scope(declarations(), stmt);
if (stmt->var_declaration) Visit(*stmt->var_declaration);
PushControlSplit();
- DeclareExpressionForBranch(stmt->test);
+
+ // Same as DeclareExpressionForBranch, but without the extra scope.
+ // If no test expression is present we can not use it for the scope.
+ declarations()->DeclareLabel(kTrueLabelName);
+ declarations()->DeclareLabel(kFalseLabelName);
+ if (stmt->test) Visit(*stmt->test);
+
Visit(stmt->body);
- Visit(stmt->action);
+ if (stmt->action) Visit(*stmt->action);
auto changed_vars = PopControlSplit();
global_context_.AddControlSplitChangedVariables(
stmt, declarations()->GetCurrentSpecializationTypeNamesVector(),
diff --git a/deps/v8/src/torque/declaration-visitor.h b/deps/v8/src/torque/declaration-visitor.h
index b37ecb2860..e1d5439018 100644
--- a/deps/v8/src/torque/declaration-visitor.h
+++ b/deps/v8/src/torque/declaration-visitor.h
@@ -137,6 +137,8 @@ class DeclarationVisitor : public FileVisitor {
Visit(expr->location);
}
+ void Visit(AssumeTypeImpossibleExpression* expr) { Visit(expr->expression); }
+
void Visit(TryLabelStatement* stmt);
void GenerateHeader(std::string& file_name);
diff --git a/deps/v8/src/torque/declarations.cc b/deps/v8/src/torque/declarations.cc
index 95764e3029..9b2964d210 100644
--- a/deps/v8/src/torque/declarations.cc
+++ b/deps/v8/src/torque/declarations.cc
@@ -43,10 +43,13 @@ Scope* Declarations::GetGenericScope(Generic* generic,
return result;
}
+bool Declarations::IsDeclaredInCurrentScope(const std::string& name) {
+ return chain_.ShallowLookup(name) != nullptr;
+}
+
void Declarations::CheckAlreadyDeclared(const std::string& name,
const char* new_type) {
- auto i = chain_.ShallowLookup(name);
- if (i != nullptr) {
+ if (IsDeclaredInCurrentScope(name)) {
std::stringstream s;
s << "cannot redeclare " << name << " (type " << new_type << ")";
ReportError(s.str());
@@ -85,7 +88,7 @@ const Type* Declarations::GetType(TypeExpression* type_expression) {
} else {
auto* function_type_exp = FunctionTypeExpression::cast(type_expression);
TypeVector argument_types;
- for (TypeExpression* type_exp : function_type_exp->parameters.types) {
+ for (TypeExpression* type_exp : function_type_exp->parameters) {
argument_types.push_back(GetType(type_exp));
}
return TypeOracle::GetFunctionPointerType(
@@ -269,8 +272,8 @@ MacroList* Declarations::GetMacroListForName(const std::string& name,
Macro* Declarations::DeclareMacro(const std::string& name,
const Signature& signature,
base::Optional<std::string> op) {
- Macro* macro =
- RegisterDeclarable(std::unique_ptr<Macro>(new Macro(name, signature)));
+ Macro* macro = RegisterDeclarable(
+ std::unique_ptr<Macro>(new Macro(name, signature, GetCurrentGeneric())));
GetMacroListForName(name, signature)->AddMacro(macro);
if (op) GetMacroListForName(*op, signature)->AddMacro(macro);
return macro;
@@ -280,7 +283,8 @@ Builtin* Declarations::DeclareBuiltin(const std::string& name,
Builtin::Kind kind, bool external,
const Signature& signature) {
CheckAlreadyDeclared(name, "builtin");
- Builtin* result = new Builtin(name, kind, external, signature);
+ Builtin* result =
+ new Builtin(name, kind, external, signature, GetCurrentGeneric());
Declare(name, std::unique_ptr<Declarable>(result));
return result;
}
@@ -288,7 +292,8 @@ Builtin* Declarations::DeclareBuiltin(const std::string& name,
RuntimeFunction* Declarations::DeclareRuntimeFunction(
const std::string& name, const Signature& signature) {
CheckAlreadyDeclared(name, "runtime function");
- RuntimeFunction* result = new RuntimeFunction(name, signature);
+ RuntimeFunction* result =
+ new RuntimeFunction(name, signature, GetCurrentGeneric());
Declare(name, std::unique_ptr<Declarable>(result));
return result;
}
@@ -367,6 +372,13 @@ TypeVector Declarations::GetCurrentSpecializationTypeNamesVector() {
return result;
}
+base::Optional<Generic*> Declarations::GetCurrentGeneric() {
+ if (current_generic_specialization_ != nullptr) {
+ return current_generic_specialization_->first;
+ }
+ return base::nullopt;
+}
+
std::string GetGeneratedCallableName(const std::string& name,
const TypeVector& specialized_types) {
std::string result = name;
diff --git a/deps/v8/src/torque/declarations.h b/deps/v8/src/torque/declarations.h
index 76a436e43e..880b3e75a6 100644
--- a/deps/v8/src/torque/declarations.h
+++ b/deps/v8/src/torque/declarations.h
@@ -108,6 +108,7 @@ class Declarations {
GenericDeclaration* generic);
TypeVector GetCurrentSpecializationTypeNamesVector();
+ base::Optional<Generic*> GetCurrentGeneric();
ScopeChain::Snapshot GetScopeChainSnapshot() { return chain_.TaskSnapshot(); }
@@ -115,6 +116,8 @@ class Declarations {
return chain_.GetLiveVariables();
}
+ bool IsDeclaredInCurrentScope(const std::string& name);
+
Statement* next_body() const { return next_body_; }
void PrintScopeChain() { chain_.Print(); }
diff --git a/deps/v8/src/torque/earley-parser.cc b/deps/v8/src/torque/earley-parser.cc
new file mode 100644
index 0000000000..69ecf3c580
--- /dev/null
+++ b/deps/v8/src/torque/earley-parser.cc
@@ -0,0 +1,303 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+#include <set>
+#include <unordered_map>
+#include <unordered_set>
+
+#include "src/torque/ast.h"
+#include "src/torque/earley-parser.h"
+#include "src/torque/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+namespace {
+
+void UpdateSourcePosition(InputPosition from, InputPosition to,
+ SourcePosition* pos) {
+ while (from != to) {
+ if (*from == '\n') {
+ pos->line += 1;
+ pos->column = 0;
+ } else {
+ pos->column += 1;
+ }
+ ++from;
+ }
+}
+
+} // namespace
+
+base::Optional<ParseResult> Rule::RunAction(const Item* completed_item,
+ const LexerResult& tokens) const {
+ std::vector<ParseResult> results;
+ for (const Item* child : completed_item->Children()) {
+ if (!child) continue;
+ base::Optional<ParseResult> child_result =
+ child->left()->RunAction(child, tokens);
+ if (child_result) results.push_back(std::move(*child_result));
+ }
+ MatchedInput matched_input = completed_item->GetMatchedInput(tokens);
+ CurrentSourcePosition::Scope pos_scope(matched_input.pos);
+ ParseResultIterator iterator(std::move(results), matched_input);
+ return action_(&iterator);
+}
+
+Symbol& Symbol::operator=(std::initializer_list<Rule> rules) {
+ rules_.clear();
+ for (const Rule& rule : rules) {
+ AddRule(rule);
+ }
+ return *this;
+}
+
+std::vector<const Item*> Item::Children() const {
+ std::vector<const Item*> children;
+ for (const Item* current = this; current->prev_; current = current->prev_) {
+ children.push_back(current->child_);
+ }
+ // The above loop collects the child nodes in reversed order.
+ std::reverse(children.begin(), children.end());
+ DCHECK_EQ(children.size(), right().size());
+ return children;
+}
+
+std::string Item::SplitByChildren(const LexerResult& tokens) const {
+ if (right().size() == 1) {
+ if (const Item* child = Children()[0])
+ return child->SplitByChildren(tokens);
+ }
+ std::stringstream s;
+ bool first = true;
+ for (const Item* item : Children()) {
+ if (!item) continue;
+ if (!first) s << " ";
+ s << item->GetMatchedInput(tokens).ToString();
+ first = false;
+ }
+ return s.str();
+}
+
+void Item::CheckAmbiguity(const Item& other, const LexerResult& tokens) const {
+ DCHECK(*this == other);
+ if (child_ != other.child_) {
+ std::stringstream s;
+ s << "Ambiguous grammer rules for \""
+ << child_->GetMatchedInput(tokens).ToString() << "\":\n "
+ << child_->SplitByChildren(tokens) << "\nvs\n "
+ << other.child_->SplitByChildren(tokens);
+ ReportError(s.str());
+ }
+ if (prev_ != other.prev_) {
+ std::stringstream s;
+ s << "Ambiguous grammer rules for \"" << GetMatchedInput(tokens).ToString()
+ << "\":\n " << SplitByChildren(tokens) << " ...\nvs\n "
+ << other.SplitByChildren(tokens) << " ...";
+ ReportError(s.str());
+ }
+}
+
+LexerResult Lexer::RunLexer(const std::string& input) {
+ LexerResult result;
+ InputPosition const begin = input.c_str();
+ InputPosition const end = begin + input.size();
+ InputPosition pos = begin;
+ InputPosition token_start = pos;
+ CurrentSourcePosition::Scope scope(
+ SourcePosition{CurrentSourceFile::Get(), 0, 0});
+ match_whitespace_(&pos);
+ while (pos != end) {
+ UpdateSourcePosition(token_start, pos, &CurrentSourcePosition::Get());
+ token_start = pos;
+ Symbol* symbol = MatchToken(&pos, end);
+ if (!symbol) {
+ ReportError("Lexer Error: unknown token " +
+ StringLiteralQuote(std::string(
+ token_start, token_start + std::min<ptrdiff_t>(
+ end - token_start, 10))));
+ }
+ result.token_symbols.push_back(symbol);
+ result.token_contents.push_back(
+ {token_start, pos, CurrentSourcePosition::Get()});
+ match_whitespace_(&pos);
+ }
+ UpdateSourcePosition(token_start, pos, &CurrentSourcePosition::Get());
+ // Add an additional token position to simplify corner cases.
+ result.token_contents.push_back({pos, pos, CurrentSourcePosition::Get()});
+ return result;
+}
+
+Symbol* Lexer::MatchToken(InputPosition* pos, InputPosition end) {
+ InputPosition token_start = *pos;
+ Symbol* symbol = nullptr;
+ // Find longest matching pattern.
+ for (std::pair<const PatternFunction, Symbol>& pair : patterns_) {
+ InputPosition token_end = token_start;
+ PatternFunction matchPattern = pair.first;
+ if (matchPattern(&token_end) && token_end > *pos) {
+ *pos = token_end;
+ symbol = &pair.second;
+ }
+ }
+ // Check if matched pattern coincides with a keyword. Prefer the keyword in
+ // this case.
+ if (*pos != token_start) {
+ auto found_keyword = keywords_.find(std::string(token_start, *pos));
+ if (found_keyword != keywords_.end()) {
+ return &found_keyword->second;
+ }
+ return symbol;
+ }
+ // Now check for a keyword (that doesn't overlap with a pattern).
+ // Iterate from the end to ensure that if one keyword is a prefix of another,
+ // we first try to match the longer one.
+ for (auto it = keywords_.rbegin(); it != keywords_.rend(); ++it) {
+ const std::string& keyword = it->first;
+ if (static_cast<size_t>(end - *pos) < keyword.size()) continue;
+ if (keyword == std::string(*pos, *pos + keyword.size())) {
+ *pos += keyword.size();
+ return &it->second;
+ }
+ }
+ return nullptr;
+}
+
+// This is an implementation of Earley's parsing algorithm
+// (https://en.wikipedia.org/wiki/Earley_parser).
+const Item* RunEarleyAlgorithm(
+ Symbol* start, const LexerResult& tokens,
+ std::unordered_set<Item, base::hash<Item>>* processed) {
+ // Worklist for items at the current position.
+ std::vector<Item> worklist;
+ // Worklist for items at the next position.
+ std::vector<Item> future_items;
+ CurrentSourcePosition::Scope source_position(
+ SourcePosition{CurrentSourceFile::Get(), 0, 0});
+ std::vector<const Item*> completed_items;
+ std::unordered_map<std::pair<size_t, Symbol*>, std::set<const Item*>,
+ base::hash<std::pair<size_t, Symbol*>>>
+ waiting;
+
+ std::vector<const Item*> debug_trace;
+
+ // Start with one top_level symbol mapping to the start symbol of the grammar.
+ // This simplifies things because the start symbol might have several
+ // rules.
+ Symbol top_level;
+ top_level.AddRule(Rule({start}));
+ worklist.push_back(Item{top_level.rule(0), 0, 0, 0});
+
+ size_t input_length = tokens.token_symbols.size();
+
+ for (size_t pos = 0; pos <= input_length; ++pos) {
+ while (!worklist.empty()) {
+ auto insert_result = processed->insert(worklist.back());
+ const Item& item = *insert_result.first;
+ DCHECK_EQ(pos, item.pos());
+ MatchedInput last_token = tokens.token_contents[pos];
+ CurrentSourcePosition::Get() = last_token.pos;
+ bool is_new = insert_result.second;
+ if (!is_new) item.CheckAmbiguity(worklist.back(), tokens);
+ worklist.pop_back();
+ if (!is_new) continue;
+
+ debug_trace.push_back(&item);
+ if (item.IsComplete()) {
+ // 'Complete' phase: Advance all items that were waiting to match this
+ // symbol next.
+ for (const Item* parent : waiting[{item.start(), item.left()}]) {
+ worklist.push_back(parent->Advance(pos, &item));
+ }
+ } else {
+ Symbol* next = item.NextSymbol();
+ // 'Scan' phase: Check if {next} is the next symbol in the input (this
+ // is never the case if {next} is a non-terminal).
+ if (pos < tokens.token_symbols.size() &&
+ tokens.token_symbols[pos] == next) {
+ future_items.push_back(item.Advance(pos + 1, nullptr));
+ }
+ // 'Predict' phase: Add items for every rule of the non-terminal.
+ if (!next->IsTerminal()) {
+ // Remember that this item is waiting for completion with {next}.
+ waiting[{pos, next}].insert(&item);
+ }
+ for (size_t i = 0; i < next->rule_number(); ++i) {
+ Rule* rule = next->rule(i);
+ auto already_completed =
+ processed->find(Item{rule, rule->right().size(), pos, pos});
+ // As discussed in section 3 of
+ // Aycock, John, and R. Nigel Horspool. "Practical earley
+ // parsing." The Computer Journal 45.6 (2002): 620-630.
+ // Earley parsing has the following problem with epsilon rules:
+ // When we complete an item that started at the current position
+ // (that is, it matched zero tokens), we might not yet have
+ // predicted all items it can complete with. Thus we check for the
+ // existence of such items here and complete them immediately.
+ if (already_completed != processed->end()) {
+ worklist.push_back(item.Advance(pos, &*already_completed));
+ } else {
+ worklist.push_back(Item{rule, 0, pos, pos});
+ }
+ }
+ }
+ }
+ std::swap(worklist, future_items);
+ }
+
+ auto final_item =
+ processed->find(Item{top_level.rule(0), 1, 0, input_length});
+ if (final_item != processed->end()) {
+ // Success: The {top_level} rule matches the complete input.
+ return final_item->Children()[0];
+ }
+ std::string reason;
+ const Item& last_item = *debug_trace.back();
+ if (last_item.pos() < tokens.token_symbols.size()) {
+ std::string next_token = tokens.token_contents[last_item.pos()].ToString();
+ reason = "unexpected token \"" + next_token + "\"";
+ } else {
+ reason = "unexpected end of input";
+ }
+ ReportError("Parser Error: " + reason);
+}
+
+// static
+bool Grammar::MatchChar(int (*char_class)(int), InputPosition* pos) {
+ if (**pos && char_class(static_cast<unsigned char>(**pos))) {
+ ++*pos;
+ return true;
+ }
+ return false;
+}
+
+// static
+bool Grammar::MatchChar(bool (*char_class)(char), InputPosition* pos) {
+ if (**pos && char_class(**pos)) {
+ ++*pos;
+ return true;
+ }
+ return false;
+}
+
+// static
+bool Grammar::MatchString(const char* s, InputPosition* pos) {
+ InputPosition current = *pos;
+ for (; *s != 0; ++s, ++current) {
+ if (*s != *current) return false;
+ }
+ *pos = current;
+ return true;
+}
+
+// static
+bool Grammar::MatchAnyChar(InputPosition* pos) {
+ return MatchChar([](char c) { return true; }, pos);
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/earley-parser.h b/deps/v8/src/torque/earley-parser.h
new file mode 100644
index 0000000000..1e77734ab6
--- /dev/null
+++ b/deps/v8/src/torque/earley-parser.h
@@ -0,0 +1,473 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_EARLEY_PARSER_H_
+#define V8_TORQUE_EARLEY_PARSER_H_
+
+#include <map>
+#include <vector>
+
+#include "src/base/optional.h"
+#include "src/torque/contextual.h"
+#include "src/torque/source-positions.h"
+#include "src/torque/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+class Symbol;
+class Item;
+
+class ParseResultHolderBase {
+ public:
+ enum class TypeId;
+ virtual ~ParseResultHolderBase() = default;
+ template <class T>
+ T& Cast();
+ template <class T>
+ const T& Cast() const;
+
+ protected:
+ explicit ParseResultHolderBase(TypeId type_id) : type_id_(type_id) {
+ // MSVC wrongly complains about type_id_ being an unused private field.
+ USE(type_id_);
+ }
+
+ private:
+ const TypeId type_id_;
+};
+
+using ParseResultTypeId = ParseResultHolderBase::TypeId;
+
+template <class T>
+class ParseResultHolder : public ParseResultHolderBase {
+ public:
+ explicit ParseResultHolder(T value)
+ : ParseResultHolderBase(id), value_(std::move(value)) {}
+
+ private:
+ V8_EXPORT_PRIVATE static const TypeId id;
+ friend class ParseResultHolderBase;
+ T value_;
+};
+
+template <class T>
+T& ParseResultHolderBase::Cast() {
+ CHECK_EQ(ParseResultHolder<T>::id, type_id_);
+ return static_cast<ParseResultHolder<T>*>(this)->value_;
+}
+
+template <class T>
+const T& ParseResultHolderBase::Cast() const {
+ CHECK_EQ(ParseResultHolder<T>::id, type_id_);
+ return static_cast<const ParseResultHolder<T>*>(this)->value_;
+}
+
+class ParseResult {
+ public:
+ template <class T>
+ explicit ParseResult(T x) : value_(new ParseResultHolder<T>(std::move(x))) {}
+
+ template <class T>
+ const T& Cast() const {
+ return value_->Cast<T>();
+ }
+ template <class T>
+ T& Cast() {
+ return value_->Cast<T>();
+ }
+
+ private:
+ std::unique_ptr<ParseResultHolderBase> value_;
+};
+
+using InputPosition = const char*;
+
+struct MatchedInput {
+ MatchedInput(InputPosition begin, InputPosition end, SourcePosition pos)
+ : begin(begin), end(end), pos(pos) {}
+ InputPosition begin;
+ InputPosition end;
+ SourcePosition pos;
+ std::string ToString() const { return {begin, end}; }
+};
+
+class ParseResultIterator {
+ public:
+ explicit ParseResultIterator(std::vector<ParseResult> results,
+ MatchedInput matched_input)
+ : results_(std::move(results)), matched_input_(matched_input) {}
+ ~ParseResultIterator() {
+ // Check that all parse results have been used.
+ CHECK_EQ(results_.size(), i_);
+ }
+
+ ParseResult Next() {
+ CHECK_LT(i_, results_.size());
+ return std::move(results_[i_++]);
+ }
+ template <class T>
+ T NextAs() {
+ return std::move(Next().Cast<T>());
+ }
+ bool HasNext() const { return i_ < results_.size(); }
+
+ const MatchedInput& matched_input() const { return matched_input_; }
+
+ private:
+ std::vector<ParseResult> results_;
+ size_t i_ = 0;
+ MatchedInput matched_input_;
+
+ DISALLOW_COPY_AND_MOVE_AND_ASSIGN(ParseResultIterator);
+};
+
+struct LexerResult {
+ std::vector<Symbol*> token_symbols;
+ std::vector<MatchedInput> token_contents;
+};
+
+using Action =
+ base::Optional<ParseResult> (*)(ParseResultIterator* child_results);
+
+inline base::Optional<ParseResult> DefaultAction(
+ ParseResultIterator* child_results) {
+ if (!child_results->HasNext()) return base::nullopt;
+ return child_results->Next();
+}
+
+// A rule of the context-free grammar. Each rule can have an action attached to
+// it, which is executed after the parsing is finished.
+class Rule final {
+ public:
+ explicit Rule(std::vector<Symbol*> right_hand_side,
+ Action action = DefaultAction)
+ : right_hand_side_(std::move(right_hand_side)), action_(action) {}
+
+ Symbol* left() const {
+ DCHECK_NOT_NULL(left_hand_side_);
+ return left_hand_side_;
+ }
+ const std::vector<Symbol*>& right() const { return right_hand_side_; }
+
+ void SetLeftHandSide(Symbol* left_hand_side) {
+ DCHECK_NULL(left_hand_side_);
+ left_hand_side_ = left_hand_side;
+ }
+
+ V8_EXPORT_PRIVATE base::Optional<ParseResult> RunAction(
+ const Item* completed_item, const LexerResult& tokens) const;
+
+ private:
+ Symbol* left_hand_side_ = nullptr;
+ std::vector<Symbol*> right_hand_side_;
+ Action action_;
+};
+
+// A Symbol represents a terminal or a non-terminal of the grammar.
+// It stores the list of rules, which have this symbol as the
+// left-hand side.
+// Terminals have an empty list of rules, they are created by the Lexer
+// instead of from rules.
+// Symbols need to reside at stable memory addresses, because the addresses are
+// used in the parser.
+class Symbol {
+ public:
+ Symbol() : Symbol({}) {}
+ Symbol(std::initializer_list<Rule> rules) { *this = rules; }
+
+ V8_EXPORT_PRIVATE Symbol& operator=(std::initializer_list<Rule> rules);
+
+ bool IsTerminal() const { return rules_.empty(); }
+ Rule* rule(size_t index) const { return rules_[index].get(); }
+ size_t rule_number() const { return rules_.size(); }
+
+ void AddRule(const Rule& rule) {
+ rules_.push_back(base::make_unique<Rule>(rule));
+ rules_.back()->SetLeftHandSide(this);
+ }
+
+ V8_EXPORT_PRIVATE base::Optional<ParseResult> RunAction(
+ const Item* item, const LexerResult& tokens);
+
+ private:
+ std::vector<std::unique_ptr<Rule>> rules_;
+
+ // Disallow copying and moving to ensure Symbol has a stable address.
+ DISALLOW_COPY_AND_MOVE_AND_ASSIGN(Symbol);
+};
+
+// Items are the core datastructure of Earley's algorithm.
+// They consist of a (partially) matched rule, a marked position inside of the
+// right-hand side of the rule (traditionally written as a dot) and an input
+// range from {start} to {pos} that matches the symbols of the right-hand side
+// that are left of the mark. In addition, they store a child and a left-sibling
+// pointer to reconstruct the AST in the end.
+class Item {
+ public:
+ Item(const Rule* rule, size_t mark, size_t start, size_t pos)
+ : rule_(rule), mark_(mark), start_(start), pos_(pos) {
+ DCHECK_LE(mark_, right().size());
+ }
+
+ // A complete item has the mark at the right end, which means the input range
+ // matches the complete rule.
+ bool IsComplete() const {
+ DCHECK_LE(mark_, right().size());
+ return mark_ == right().size();
+ }
+
+ // The symbol right after the mark is expected at {pos} for this item to
+ // advance.
+ Symbol* NextSymbol() const {
+ DCHECK(!IsComplete());
+ DCHECK_LT(mark_, right().size());
+ return right()[mark_];
+ }
+
+ // We successfully parsed NextSymbol() between {pos} and {new_pos}.
+ // If NextSymbol() was a non-terminal, then {child} is a pointer to a
+ // completed item for this parse.
+ // We create a new item, which moves the mark one forward.
+ Item Advance(size_t new_pos, const Item* child = nullptr) const {
+ if (child) {
+ DCHECK(child->IsComplete());
+ DCHECK_EQ(pos(), child->start());
+ DCHECK_EQ(new_pos, child->pos());
+ DCHECK_EQ(NextSymbol(), child->left());
+ }
+ Item result(rule_, mark_ + 1, start_, new_pos);
+ result.prev_ = this;
+ result.child_ = child;
+ return result;
+ }
+
+ // Collect the items representing the AST children of this completed item.
+ std::vector<const Item*> Children() const;
+ // The matched input separated according to the next branching AST level.
+ std::string SplitByChildren(const LexerResult& tokens) const;
+ // Check if {other} results in the same AST as this Item.
+ void CheckAmbiguity(const Item& other, const LexerResult& tokens) const;
+
+ MatchedInput GetMatchedInput(const LexerResult& tokens) const {
+ return {tokens.token_contents[start_].begin,
+ start_ == pos_ ? tokens.token_contents[start_].begin
+ : tokens.token_contents[pos_ - 1].end,
+ tokens.token_contents[start_].pos};
+ }
+
+ // We exclude {prev_} and {child_} from equality and hash computations,
+ // because they are just globally unique data associated with an item.
+ bool operator==(const Item& other) const {
+ return rule_ == other.rule_ && mark_ == other.mark_ &&
+ start_ == other.start_ && pos_ == other.pos_;
+ }
+
+ friend size_t hash_value(const Item& i) {
+ return base::hash_combine(i.rule_, i.mark_, i.start_, i.pos_);
+ }
+
+ const Rule* rule() const { return rule_; }
+ Symbol* left() const { return rule_->left(); }
+ const std::vector<Symbol*>& right() const { return rule_->right(); }
+ size_t pos() const { return pos_; }
+ size_t start() const { return start_; }
+
+ private:
+ const Rule* rule_;
+ size_t mark_;
+ size_t start_;
+ size_t pos_;
+
+ const Item* prev_ = nullptr;
+ const Item* child_ = nullptr;
+};
+
+inline base::Optional<ParseResult> Symbol::RunAction(
+ const Item* item, const LexerResult& tokens) {
+ DCHECK(item->IsComplete());
+ DCHECK_EQ(item->left(), this);
+ return item->rule()->RunAction(item, tokens);
+}
+
+V8_EXPORT_PRIVATE const Item* RunEarleyAlgorithm(
+ Symbol* start, const LexerResult& tokens,
+ std::unordered_set<Item, base::hash<Item>>* processed);
+
+inline base::Optional<ParseResult> ParseTokens(Symbol* start,
+ const LexerResult& tokens) {
+ std::unordered_set<Item, base::hash<Item>> table;
+ const Item* final_item = RunEarleyAlgorithm(start, tokens, &table);
+ return start->RunAction(final_item, tokens);
+}
+
+// The lexical syntax is dynamically defined while building the grammar by
+// adding patterns and keywords to the Lexer.
+// The term keyword here can stand for any fixed character sequence, including
+// operators and parentheses.
+// Each pattern or keyword automatically gets a terminal symbol associated with
+// it. These symbols form the result of the lexing.
+// Patterns and keywords are matched using the longest match principle. If the
+// longest matching pattern coincides with a keyword, the keyword symbol is
+// chosen instead of the pattern.
+// In addition, there is a single whitespace pattern which is consumed but does
+// not become part of the token list.
+class Lexer {
+ public:
+ // Functions to define patterns. They try to match starting from {pos}. If
+ // successful, they return true and advance {pos}. Otherwise, {pos} stays
+ // unchanged.
+ using PatternFunction = bool (*)(InputPosition* pos);
+
+ void SetWhitespace(PatternFunction whitespace) {
+ match_whitespace_ = whitespace;
+ }
+
+ Symbol* Pattern(PatternFunction pattern) { return &patterns_[pattern]; }
+ Symbol* Token(const std::string& keyword) { return &keywords_[keyword]; }
+ V8_EXPORT_PRIVATE LexerResult RunLexer(const std::string& input);
+
+ private:
+ PatternFunction match_whitespace_ = [](InputPosition*) { return false; };
+ std::map<PatternFunction, Symbol> patterns_;
+ std::map<std::string, Symbol> keywords_;
+ Symbol* MatchToken(InputPosition* pos, InputPosition end);
+};
+
+// A grammar can have a result, which is the results of the start symbol.
+// Grammar is intended to be subclassed, with Symbol members forming the
+// mutually recursive rules of the grammar.
+class Grammar {
+ public:
+ using PatternFunction = Lexer::PatternFunction;
+
+ explicit Grammar(Symbol* start) : start_(start) {}
+
+ base::Optional<ParseResult> Parse(const std::string& input) {
+ LexerResult tokens = lexer().RunLexer(input);
+ return ParseTokens(start_, tokens);
+ }
+
+ protected:
+ Symbol* Token(const std::string& s) { return lexer_.Token(s); }
+ Symbol* Pattern(PatternFunction pattern) { return lexer_.Pattern(pattern); }
+ void SetWhitespace(PatternFunction ws) { lexer_.SetWhitespace(ws); }
+
+ // NewSymbol() allocates a fresh symbol and stores it in the current grammar.
+ // This is necessary to define helpers that create new symbols.
+ Symbol* NewSymbol(std::initializer_list<Rule> rules = {}) {
+ Symbol* result = new Symbol(rules);
+ generated_symbols_.push_back(std::unique_ptr<Symbol>(result));
+ return result;
+ }
+
+ // Helper functions to define lexer patterns. If they match, they return true
+ // and advance {pos}. Otherwise, {pos} is unchanged.
+ V8_EXPORT_PRIVATE static bool MatchChar(int (*char_class)(int),
+ InputPosition* pos);
+ V8_EXPORT_PRIVATE static bool MatchChar(bool (*char_class)(char),
+ InputPosition* pos);
+ V8_EXPORT_PRIVATE static bool MatchAnyChar(InputPosition* pos);
+ V8_EXPORT_PRIVATE static bool MatchString(const char* s, InputPosition* pos);
+
+ // The action MatchInput() produces the input matched by the rule as
+ // result.
+ static base::Optional<ParseResult> YieldMatchedInput(
+ ParseResultIterator* child_results) {
+ return ParseResult{child_results->matched_input().ToString()};
+ }
+
+ // Create a new symbol to parse the given sequence of symbols.
+ // At most one of the symbols can return a result.
+ Symbol* Sequence(std::vector<Symbol*> symbols) {
+ return NewSymbol({Rule(std::move(symbols))});
+ }
+
+ template <class T, T value>
+ static base::Optional<ParseResult> YieldIntegralConstant(
+ ParseResultIterator* child_results) {
+ return ParseResult{value};
+ }
+
+ template <class T>
+ static base::Optional<ParseResult> YieldDefaultValue(
+ ParseResultIterator* child_results) {
+ return ParseResult{T{}};
+ }
+
+ template <class From, class To>
+ static base::Optional<ParseResult> CastParseResult(
+ ParseResultIterator* child_results) {
+ To result = std::move(child_results->NextAs<From>());
+ return ParseResult{std::move(result)};
+ }
+
+ // Try to parse {s} and return the result of type {Result} casted to {T}.
+ // Otherwise, the result is a default-constructed {T}.
+ template <class T, class Result = T>
+ Symbol* TryOrDefault(Symbol* s) {
+ return NewSymbol({Rule({s}, CastParseResult<Result, T>),
+ Rule({}, YieldDefaultValue<T>)});
+ }
+
+ template <class T>
+ static base::Optional<ParseResult> MakeSingletonVector(
+ ParseResultIterator* child_results) {
+ T x = child_results->NextAs<T>();
+ std::vector<T> result;
+ result.push_back(std::move(x));
+ return ParseResult{std::move(result)};
+ }
+
+ template <class T>
+ static base::Optional<ParseResult> MakeExtendedVector(
+ ParseResultIterator* child_results) {
+ std::vector<T> l = child_results->NextAs<std::vector<T>>();
+ T x = child_results->NextAs<T>();
+ l.push_back(std::move(x));
+ return ParseResult{std::move(l)};
+ }
+
+ // For example, NonemptyList(Token("A"), Token(",")) parses any of
+ // A or A,A or A,A,A and so on.
+ template <class T>
+ Symbol* NonemptyList(Symbol* element,
+ base::Optional<Symbol*> separator = {}) {
+ Symbol* list = NewSymbol();
+ *list = {Rule({element}, MakeSingletonVector<T>),
+ separator
+ ? Rule({list, *separator, element}, MakeExtendedVector<T>)
+ : Rule({list, element}, MakeExtendedVector<T>)};
+ return list;
+ }
+
+ template <class T>
+ Symbol* List(Symbol* element, base::Optional<Symbol*> separator = {}) {
+ return TryOrDefault<std::vector<T>>(NonemptyList<T>(element, separator));
+ }
+
+ template <class T>
+ Symbol* Optional(Symbol* x) {
+ return TryOrDefault<base::Optional<T>, T>(x);
+ }
+
+ Symbol* CheckIf(Symbol* x) {
+ return NewSymbol({Rule({x}, YieldIntegralConstant<bool, true>),
+ Rule({}, YieldIntegralConstant<bool, false>)});
+ }
+
+ Lexer& lexer() { return lexer_; }
+
+ private:
+ Lexer lexer_;
+ std::vector<std::unique_ptr<Symbol>> generated_symbols_;
+ Symbol* start_;
+};
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_EARLEY_PARSER_H_
diff --git a/deps/v8/src/torque/file-visitor.h b/deps/v8/src/torque/file-visitor.h
index f1b7c4bbef..d306392446 100644
--- a/deps/v8/src/torque/file-visitor.h
+++ b/deps/v8/src/torque/file-visitor.h
@@ -13,8 +13,6 @@
#include "src/torque/types.h"
#include "src/torque/utils.h"
-#include "src/torque/TorqueBaseVisitor.h"
-
namespace v8 {
namespace internal {
namespace torque {
diff --git a/deps/v8/src/torque/global-context.h b/deps/v8/src/torque/global-context.h
index 4dc5534950..33573f7175 100644
--- a/deps/v8/src/torque/global-context.h
+++ b/deps/v8/src/torque/global-context.h
@@ -5,8 +5,6 @@
#ifndef V8_TORQUE_GLOBAL_CONTEXT_H_
#define V8_TORQUE_GLOBAL_CONTEXT_H_
-#include "src/torque/TorqueLexer.h"
-#include "src/torque/TorqueParser.h"
#include "src/torque/declarable.h"
#include "src/torque/declarations.h"
#include "src/torque/scope.h"
@@ -19,6 +17,8 @@ namespace torque {
class GlobalContext;
class Scope;
class TypeOracle;
+class Builtin;
+class Label;
class Module {
public:
@@ -38,15 +38,6 @@ class Module {
std::stringstream source_stream_;
};
-struct SourceFileContext {
- std::string name;
- std::unique_ptr<antlr4::ANTLRFileStream> stream;
- std::unique_ptr<TorqueLexer> lexer;
- std::unique_ptr<antlr4::CommonTokenStream> tokens;
- std::unique_ptr<TorqueParser> parser;
- TorqueParser::FileContext* file;
-};
-
class GlobalContext {
public:
explicit GlobalContext(Ast ast)
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index 58fe638a13..5044d914db 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -7,8 +7,6 @@
#include "src/torque/implementation-visitor.h"
#include "src/torque/parameter-difference.h"
-#include "include/v8.h"
-
namespace v8 {
namespace internal {
namespace torque {
@@ -22,9 +20,8 @@ VisitResult ImplementationVisitor::Visit(Expression* expr) {
AST_EXPRESSION_NODE_KIND_LIST(ENUM_ITEM)
#undef ENUM_ITEM
default:
- UNIMPLEMENTED();
+ UNREACHABLE();
}
- return VisitResult();
}
const Type* ImplementationVisitor::Visit(Statement* stmt) {
@@ -80,58 +77,56 @@ void ImplementationVisitor::BeginModuleFile(Module* module) {
source << "#include \"src/builtins/builtins-" +
DashifyString(module->name()) + "-gen.h\"";
}
- source << std::endl;
- source << "#include \"src/builtins/builtins-utils-gen.h\"" << std::endl;
- source << "#include \"src/builtins/builtins.h\"" << std::endl;
- source << "#include \"src/code-factory.h\"" << std::endl;
- source << "#include \"src/elements-kind.h\"" << std::endl;
- source << "#include \"src/heap/factory-inl.h\"" << std::endl;
- source << "#include \"src/objects.h\"" << std::endl;
+ source << "\n";
+ source << "#include \"src/builtins/builtins-utils-gen.h\"\n";
+ source << "#include \"src/builtins/builtins.h\"\n";
+ source << "#include \"src/code-factory.h\"\n";
+ source << "#include \"src/elements-kind.h\"\n";
+ source << "#include \"src/heap/factory-inl.h\"\n";
+ source << "#include \"src/objects.h\"\n";
+ source << "#include \"src/objects/bigint.h\"\n";
source << "#include \"builtins-" + DashifyString(module->name()) +
- "-from-dsl-gen.h\"";
- source << std::endl << std::endl;
+ "-from-dsl-gen.h\"\n\n";
- source << "namespace v8 {" << std::endl
- << "namespace internal {" << std::endl
- << "" << std::endl
- << "using Node = compiler::Node;" << std::endl
- << "" << std::endl;
+ source << "namespace v8 {\n"
+ << "namespace internal {\n"
+ << "\n"
+ << "using Node = compiler::Node;\n"
+ << "\n";
std::string upper_name(module->name());
transform(upper_name.begin(), upper_name.end(), upper_name.begin(),
::toupper);
std::string headerDefine =
std::string("V8_TORQUE_") + upper_name + "_FROM_DSL_BASE_H__";
- header << "#ifndef " << headerDefine << std::endl;
- header << "#define " << headerDefine << std::endl << std::endl;
+ header << "#ifndef " << headerDefine << "\n";
+ header << "#define " << headerDefine << "\n\n";
if (module->IsDefault()) {
header << "#include \"src/code-stub-assembler.h\"";
} else {
header << "#include \"src/builtins/builtins-" +
- DashifyString(module->name()) + "-gen.h\""
- << std::endl;
+ DashifyString(module->name()) + "-gen.h\"\n";
}
- header << std::endl << std::endl;
+ header << "\n\n ";
- header << "namespace v8 {" << std::endl
- << "namespace internal {" << std::endl
- << "" << std::endl;
+ header << "namespace v8 {\n"
+ << "namespace internal {\n"
+ << "\n";
header << "class " << GetDSLAssemblerName(module) << ": public "
- << GetBaseAssemblerName(module) << " {" << std::endl;
- header << " public:" << std::endl;
+ << GetBaseAssemblerName(module) << " {\n";
+ header << " public:\n";
header << " explicit " << GetDSLAssemblerName(module)
<< "(compiler::CodeAssemblerState* state) : "
- << GetBaseAssemblerName(module) << "(state) {}" << std::endl;
+ << GetBaseAssemblerName(module) << "(state) {}\n";
- header << std::endl;
- header << " using Node = compiler::Node;" << std::endl;
- header << " template <class T>" << std::endl;
- header << " using TNode = compiler::TNode<T>;" << std::endl;
- header << " template <class T>" << std::endl;
- header << " using SloppyTNode = compiler::SloppyTNode<T>;" << std::endl
- << std::endl;
+ header << "\n";
+ header << " using Node = compiler::Node;\n";
+ header << " template <class T>\n";
+ header << " using TNode = compiler::TNode<T>;\n";
+ header << " template <class T>\n";
+ header << " using SloppyTNode = compiler::SloppyTNode<T>;\n\n";
}
void ImplementationVisitor::EndModuleFile(Module* module) {
@@ -146,15 +141,15 @@ void ImplementationVisitor::EndModuleFile(Module* module) {
std::string headerDefine =
std::string("V8_TORQUE_") + upper_name + "_FROM_DSL_BASE_H__";
- source << "} // namepsace internal" << std::endl
- << "} // namespace v8" << std::endl
- << "" << std::endl;
+ source << "} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "\n";
- header << "};" << std::endl << "" << std::endl;
- header << "} // namepsace internal" << std::endl
- << "} // namespace v8" << std::endl
- << "" << std::endl;
- header << "#endif // " << headerDefine << std::endl;
+ header << "};\n\n";
+ header << "} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "\n";
+ header << "#endif // " << headerDefine << "\n";
}
void ImplementationVisitor::Visit(ModuleDeclaration* decl) {
@@ -215,16 +210,16 @@ void ImplementationVisitor::Visit(TorqueMacroDeclaration* decl,
if (body != nullptr) {
header_out() << " ";
GenerateMacroFunctionDeclaration(header_out(), "", macro);
- header_out() << ";" << std::endl;
+ header_out() << ";\n";
GenerateMacroFunctionDeclaration(
source_out(), GetDSLAssemblerName(CurrentModule()) + "::", macro);
- source_out() << " {" << std::endl;
+ source_out() << " {\n";
const Variable* result_var = nullptr;
if (macro->HasReturnValue()) {
result_var =
- GenerateVariableDeclaration(decl, kReturnValueVariable, {}, {});
+ GeneratePredeclaredVariableDeclaration(kReturnValueVariable, {});
}
Label* macro_end = declarations()->DeclareLabel("macro_end");
GenerateLabelDefinition(macro_end, decl);
@@ -262,9 +257,9 @@ void ImplementationVisitor::Visit(TorqueMacroDeclaration* decl,
source_out() << "return "
<< RValueFlattenStructs(
VisitResult(result_var->type(), result_var))
- << ";" << std::endl;
+ << ";\n";
}
- source_out() << "}" << std::endl << std::endl;
+ source_out() << "}\n\n";
}
}
@@ -273,7 +268,7 @@ void ImplementationVisitor::Visit(TorqueBuiltinDeclaration* decl,
std::string name = GetGeneratedCallableName(
decl->name, declarations()->GetCurrentSpecializationTypeNamesVector());
source_out() << "TF_BUILTIN(" << name << ", "
- << GetDSLAssemblerName(CurrentModule()) << ") {" << std::endl;
+ << GetDSLAssemblerName(CurrentModule()) << ") {\n";
Builtin* builtin = declarations()->LookupBuiltin(name);
CurrentCallableActivator activator(global_context_, builtin, decl);
@@ -283,9 +278,9 @@ void ImplementationVisitor::Visit(TorqueBuiltinDeclaration* decl,
GenerateIndent();
source_out() << "TNode<Context> " << val->value()
<< " = UncheckedCast<Context>(Parameter("
- << "Descriptor::kContext));" << std::endl;
+ << "Descriptor::kContext));\n";
GenerateIndent();
- source_out() << "USE(" << val->value() << ");" << std::endl;
+ source_out() << "USE(" << val->value() << ");\n";
size_t first = 1;
if (builtin->IsVarArgsJavaScript()) {
@@ -296,29 +291,27 @@ void ImplementationVisitor::Visit(TorqueBuiltinDeclaration* decl,
std::string arguments_name = arguments->value();
GenerateIndent();
source_out()
- << "Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);"
- << std::endl;
+ << "Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);\n";
GenerateIndent();
source_out() << "CodeStubArguments arguments_impl(this, "
- "ChangeInt32ToIntPtr(argc));"
- << std::endl;
+ "ChangeInt32ToIntPtr(argc));\n";
const Value* receiver =
declarations()->LookupValue(decl->signature->parameters.names[1]);
GenerateIndent();
source_out() << "TNode<Object> " << receiver->value()
- << " = arguments_impl.GetReceiver();" << std::endl;
+ << " = arguments_impl.GetReceiver();\n";
GenerateIndent();
- source_out() << "auto arguments = &arguments_impl;" << std::endl;
+ source_out() << "auto arguments = &arguments_impl;\n";
GenerateIndent();
- source_out() << "USE(arguments);" << std::endl;
+ source_out() << "USE(arguments);\n";
GenerateIndent();
- source_out() << "USE(" << receiver->value() << ");" << std::endl;
+ source_out() << "USE(" << receiver->value() << ");\n";
first = 2;
}
GenerateParameterList(decl->signature->parameters.names, first);
Visit(body);
- source_out() << "}" << std::endl << std::endl;
+ source_out() << "}\n\n";
}
const Type* ImplementationVisitor::Visit(VarDeclarationStatement* stmt) {
@@ -326,7 +319,10 @@ const Type* ImplementationVisitor::Visit(VarDeclarationStatement* stmt) {
if (stmt->initializer) {
init_result = Visit(*stmt->initializer);
}
- GenerateVariableDeclaration(stmt, stmt->name, {}, init_result);
+ base::Optional<const Type*> type;
+ if (stmt->type) type = declarations()->GetType(*stmt->type);
+ GenerateVariableDeclaration(stmt, stmt->name, stmt->const_qualified, type,
+ init_result);
return TypeOracle::GetVoidType();
}
@@ -347,27 +343,27 @@ VisitResult ImplementationVisitor::Visit(ConditionalExpression* expr) {
source_out() << "auto " << f1 << " = [=]() ";
{
ScopedIndent indent(this, false);
- source_out() << "" << std::endl;
+ source_out() << "\n";
left = Visit(expr->if_true);
GenerateIndent();
- source_out() << "return " << RValueFlattenStructs(left) << ";" << std::endl;
+ source_out() << "return " << RValueFlattenStructs(left) << ";\n";
}
- source_out() << ";" << std::endl;
+ source_out() << ";\n";
GenerateIndent();
source_out() << "auto " << f2 << " = [=]() ";
{
ScopedIndent indent(this, false);
- source_out() << "" << std::endl;
+ source_out() << "\n";
right = Visit(expr->if_false);
GenerateIndent();
- source_out() << "return " << RValueFlattenStructs(right) << ";"
- << std::endl;
+ source_out() << "return " << RValueFlattenStructs(right) << ";\n";
}
- source_out() << ";" << std::endl;
+ source_out() << ";\n";
const Type* common_type = GetCommonType(left.type(), right.type());
std::string result_var = NewTempVariable();
- Variable* result = GenerateVariableDeclaration(expr, result_var, common_type);
+ Variable* result =
+ GenerateVariableDeclaration(expr, result_var, false, common_type);
{
ScopedIndent indent(this);
@@ -414,7 +410,7 @@ VisitResult ImplementationVisitor::Visit(LogicalOrExpression* expr) {
Label* true_label = declarations()->LookupLabel(kTrueLabelName);
GenerateIndent();
source_out() << "GotoIf(" << RValueFlattenStructs(left_result) << ", "
- << true_label->generated() << ");" << std::endl;
+ << true_label->generated() << ");\n";
} else if (!left_result.type()->IsConstexprBool()) {
GenerateLabelBind(false_label);
}
@@ -447,7 +443,7 @@ VisitResult ImplementationVisitor::Visit(LogicalAndExpression* expr) {
Label* false_label = declarations()->LookupLabel(kFalseLabelName);
GenerateIndent();
source_out() << "GotoIfNot(" << RValueFlattenStructs(left_result) << ", "
- << false_label->generated() << ");" << std::endl;
+ << false_label->generated() << ");\n";
} else if (!left_result.type()->IsConstexprBool()) {
GenerateLabelBind(true_label);
}
@@ -494,7 +490,7 @@ VisitResult ImplementationVisitor::Visit(AssignmentExpression* expr) {
GenerateFetchFromLocation(expr->location, location_ref);
assignment_value = Visit(expr->value);
Arguments args;
- args.parameters = {assignment_value, assignment_value};
+ args.parameters = {location_value, assignment_value};
assignment_value = GenerateCall(*expr->op, args);
GenerateAssignToLocation(expr->location, location_ref, assignment_value);
} else {
@@ -518,14 +514,26 @@ VisitResult ImplementationVisitor::Visit(NumberLiteralExpression* expr) {
}
}
std::string temp = GenerateNewTempVariable(result_type);
- source_out() << expr->number << ";" << std::endl;
+ source_out() << expr->number << ";\n";
return VisitResult{result_type, temp};
}
+VisitResult ImplementationVisitor::Visit(AssumeTypeImpossibleExpression* expr) {
+ VisitResult result = Visit(expr->expression);
+ const Type* result_type =
+ SubtractType(result.type(), declarations()->GetType(expr->excluded_type));
+ if (result_type->IsNever()) {
+ ReportError("unreachable code");
+ }
+ return VisitResult{result_type, "UncheckedCast<" +
+ result_type->GetGeneratedTNodeTypeName() +
+ ">(" + result.RValue() + ")"};
+}
+
VisitResult ImplementationVisitor::Visit(StringLiteralExpression* expr) {
std::string temp = GenerateNewTempVariable(TypeOracle::GetConstStringType());
source_out() << "\"" << expr->literal.substr(1, expr->literal.size() - 2)
- << "\";" << std::endl;
+ << "\";\n";
return VisitResult{TypeOracle::GetConstStringType(), temp};
}
@@ -610,14 +618,14 @@ const Type* ImplementationVisitor::Visit(IfStatement* stmt) {
source_out() << "if ((" << RValueFlattenStructs(expression_result)
<< ")) ";
ScopedIndent indent(this, false);
- source_out() << std::endl;
+ source_out() << "\n";
left_result = Visit(stmt->if_true);
}
if (has_else) {
source_out() << " else ";
ScopedIndent indent(this, false);
- source_out() << std::endl;
+ source_out() << "\n";
right_result = Visit(*stmt->if_false);
}
if (left_result->IsNever() != right_result->IsNever()) {
@@ -628,7 +636,7 @@ const Type* ImplementationVisitor::Visit(IfStatement* stmt) {
ReportError(stream.str());
}
- source_out() << std::endl;
+ source_out() << "\n";
return left_result;
} else {
@@ -712,14 +720,14 @@ const Type* ImplementationVisitor::Visit(DebugStatement* stmt) {
GenerateIndent();
source_out() << "Print(\""
<< "halting because of '" << stmt->reason << "' at "
- << PositionAsString(stmt->pos) << "\");" << std::endl;
+ << PositionAsString(stmt->pos) << "\");\n";
#endif
GenerateIndent();
if (stmt->never_continues) {
- source_out() << "Unreachable();" << std::endl;
+ source_out() << "Unreachable();\n";
return TypeOracle::GetNeverType();
} else {
- source_out() << "DebugBreak();" << std::endl;
+ source_out() << "DebugBreak();\n";
return TypeOracle::GetVoidType();
}
}
@@ -781,10 +789,9 @@ const Type* ImplementationVisitor::Visit(AssertStatement* stmt) {
GenerateIndent();
source_out() << "Print(\""
<< "assert '" << FormatAssertSource(stmt->source)
- << "' failed at " << PositionAsString(stmt->pos) << "\");"
- << std::endl;
+ << "' failed at " << PositionAsString(stmt->pos) << "\");\n";
GenerateIndent();
- source_out() << "Unreachable();" << std::endl;
+ source_out() << "Unreachable();\n";
GenerateLabelBind(true_label);
}
@@ -825,12 +832,11 @@ const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
if (Builtin::cast(current_callable)->IsVarArgsJavaScript()) {
GenerateIndent();
source_out() << "arguments->PopAndReturn("
- << RValueFlattenStructs(return_result) << ");"
- << std::endl;
+ << RValueFlattenStructs(return_result) << ");\n";
} else {
GenerateIndent();
- source_out() << "Return(" << RValueFlattenStructs(return_result) << ");"
- << std::endl;
+ source_out() << "Return(" << RValueFlattenStructs(return_result)
+ << ");\n";
}
} else {
UNREACHABLE();
@@ -870,7 +876,7 @@ const Type* ImplementationVisitor::Visit(ForOfLoopStatement* stmt) {
const Type* common_type = GetCommonType(begin.type(), end.type());
Variable* index_var = GenerateVariableDeclaration(
stmt, std::string(kForIndexValueVariable) + "_" + NewTempVariable(),
- common_type, begin);
+ false, common_type, begin);
VisitResult index_for_read = {index_var->type(), index_var};
@@ -890,8 +896,12 @@ const Type* ImplementationVisitor::Visit(ForOfLoopStatement* stmt) {
GenerateLabelBind(body_label);
VisitResult element_result =
GenerateCall("[]", {{expression_result, index_for_read}, {}});
- GenerateVariableDeclaration(stmt->var_declaration,
- stmt->var_declaration->name, {}, element_result);
+ base::Optional<const Type*> declared_type;
+ if (stmt->var_declaration->type)
+ declared_type = declarations()->GetType(*stmt->var_declaration->type);
+ GenerateVariableDeclaration(
+ stmt->var_declaration, stmt->var_declaration->name,
+ stmt->var_declaration->const_qualified, declared_type, element_result);
Visit(stmt->body);
GenerateLabelGoto(increment_label);
@@ -932,7 +942,7 @@ const Type* ImplementationVisitor::Visit(TryLabelStatement* stmt) {
Declarations::NodeScopeActivator scope(declarations(),
stmt->label_blocks[i]->body);
for (auto& v : label->GetParameters()) {
- GenerateVariableDeclaration(stmt, v->name(), v->type());
+ GenerateVariableDeclaration(stmt, v->name(), false, v->type());
v->Define();
}
++i;
@@ -1002,33 +1012,47 @@ const Type* ImplementationVisitor::Visit(ForLoopStatement* stmt) {
if (stmt->var_declaration) Visit(*stmt->var_declaration);
- Label* body_label = nullptr;
- Label* exit_label = nullptr;
- {
- Declarations::NodeScopeActivator scope(declarations(), stmt->test);
- body_label = declarations()->LookupLabel(kTrueLabelName);
- GenerateLabelDefinition(body_label);
- exit_label = declarations()->LookupLabel(kFalseLabelName);
- GenerateLabelDefinition(exit_label);
- }
+ Label* body_label = declarations()->LookupLabel(kTrueLabelName);
+ GenerateLabelDefinition(body_label);
+ Label* exit_label = declarations()->LookupLabel(kFalseLabelName);
+ GenerateLabelDefinition(exit_label);
Label* header_label = declarations()->DeclarePrivateLabel("header");
GenerateLabelDefinition(header_label, stmt);
GenerateLabelGoto(header_label);
GenerateLabelBind(header_label);
- Label* assignment_label = declarations()->DeclarePrivateLabel("assignment");
- GenerateLabelDefinition(assignment_label);
+ // The continue label is where "continue" statements jump to. If no action
+ // expression is provided, we jump directly to the header.
+ Label* continue_label = header_label;
- BreakContinueActivator activator(global_context_, exit_label,
- assignment_label);
+ // The action label is only needed when an action expression was provided.
+ Label* action_label = nullptr;
+ if (stmt->action) {
+ action_label = declarations()->DeclarePrivateLabel("action");
+ GenerateLabelDefinition(action_label);
+
+ // The action expression needs to be executed on a continue.
+ continue_label = action_label;
+ }
+
+ BreakContinueActivator activator(global_context_, exit_label, continue_label);
std::vector<Label*> labels = {body_label, exit_label};
- if (GenerateExpressionBranch(stmt->test, labels, {stmt->body},
- assignment_label)) {
+ bool generate_action = true;
+ if (stmt->test) {
+ generate_action = GenerateExpressionBranch(*stmt->test, labels,
+ {stmt->body}, continue_label);
+ } else {
+ GenerateLabelGoto(body_label);
+ generate_action =
+ GenerateLabeledStatementBlocks({stmt->body}, labels, continue_label);
+ }
+
+ if (generate_action && stmt->action) {
ScopedIndent indent(this);
- GenerateLabelBind(assignment_label);
- Visit(stmt->action);
+ GenerateLabelBind(action_label);
+ Visit(*stmt->action);
GenerateLabelGoto(header_label);
}
@@ -1081,8 +1105,7 @@ void ImplementationVisitor::GenerateFunctionDeclaration(
std::ostream& o, const std::string& macro_prefix, const std::string& name,
const Signature& signature, const NameVector& parameter_names) {
if (global_context_.verbose()) {
- std::cout << "generating source for declaration " << name << ""
- << std::endl;
+ std::cout << "generating source for declaration " << name << "\n";
}
// Quite a hack here. Make sure that TNode is namespace qualified if the
@@ -1162,11 +1185,17 @@ void FailMacroLookup(const std::string& reason, const std::string& name,
} // namespace
-Callable* ImplementationVisitor::LookupCall(const std::string& name,
- const Arguments& arguments) {
+Callable* ImplementationVisitor::LookupCall(
+ const std::string& name, const Arguments& arguments,
+ const TypeVector& specialization_types) {
Callable* result = nullptr;
TypeVector parameter_types(arguments.parameters.GetTypeVector());
- Declarable* declarable = declarations()->Lookup(name);
+ bool has_template_arguments = !specialization_types.empty();
+ std::string mangled_name = name;
+ if (has_template_arguments) {
+ mangled_name = GetGeneratedCallableName(name, specialization_types);
+ }
+ Declarable* declarable = declarations()->Lookup(mangled_name);
if (declarable->IsBuiltin()) {
result = Builtin::cast(declarable);
} else if (declarable->IsRuntimeFunction()) {
@@ -1238,11 +1267,21 @@ Callable* ImplementationVisitor::LookupCall(const std::string& name,
ReportError(stream.str());
}
+ if (has_template_arguments) {
+ Generic* generic = *result->generic();
+ CallableNode* callable = generic->declaration()->callable;
+ if (generic->declaration()->body) {
+ QueueGenericSpecialization({generic, specialization_types}, callable,
+ callable->signature.get(),
+ generic->declaration()->body);
+ }
+ }
+
return result;
}
void ImplementationVisitor::GetFlattenedStructsVars(
- const Variable* base, std::set<const Variable*>& vars) {
+ const Variable* base, std::set<const Variable*>* vars) {
const Type* type = base->type();
if (base->IsConst()) return;
if (type->IsStructType()) {
@@ -1253,7 +1292,7 @@ void ImplementationVisitor::GetFlattenedStructsVars(
Variable::cast(declarations()->LookupValue(field_var_name)), vars);
}
} else {
- vars.insert(base);
+ vars->insert(base);
}
}
@@ -1263,11 +1302,18 @@ void ImplementationVisitor::GenerateChangedVarsFromControlSplit(AstNode* node) {
node, declarations()->GetCurrentSpecializationTypeNamesVector());
std::set<const Variable*> flattened_vars;
for (auto v : changed_vars) {
- GetFlattenedStructsVars(v, flattened_vars);
- }
+ GetFlattenedStructsVars(v, &flattened_vars);
+ }
+ std::vector<const Variable*> flattened_vars_sorted(flattened_vars.begin(),
+ flattened_vars.end());
+ auto compare_variables = [](const Variable* a, const Variable* b) {
+ return a->value() < b->value();
+ };
+ std::sort(flattened_vars_sorted.begin(), flattened_vars_sorted.end(),
+ compare_variables);
source_out() << "{";
- PrintCommaSeparatedList(source_out(), flattened_vars,
- [&](const Variable* v) { return v->value(); });
+ PrintCommaSeparatedList(source_out(), flattened_vars_sorted,
+ [](const Variable* v) { return v->value(); });
source_out() << "}";
}
@@ -1287,9 +1333,9 @@ const Type* ImplementationVisitor::GetCommonType(const Type* left,
VisitResult ImplementationVisitor::GenerateCopy(const VisitResult& to_copy) {
std::string temp = GenerateNewTempVariable(to_copy.type());
- source_out() << RValueFlattenStructs(to_copy) << ";" << std::endl;
+ source_out() << RValueFlattenStructs(to_copy) << ";\n";
GenerateIndent();
- source_out() << "USE(" << temp << ");" << std::endl;
+ source_out() << "USE(" << temp << ");\n";
return VisitResult(to_copy.type(), temp);
}
@@ -1348,11 +1394,6 @@ LocationReference ImplementationVisitor::GetLocationReference(
declarations()->LookupValue((*result.declarable())->name() + "." +
expr->field),
{}, {});
-
- } else {
- return LocationReference(
- nullptr,
- VisitResult(result.type(), result.RValue() + "." + expr->field), {});
}
}
return LocationReference(nullptr, result, {});
@@ -1399,21 +1440,13 @@ VisitResult ImplementationVisitor::GenerateFetchFromLocation(
VisitResult ImplementationVisitor::GenerateFetchFromLocation(
FieldAccessExpression* expr, LocationReference reference) {
- const Type* type = reference.base.type();
if (reference.value != nullptr) {
return GenerateFetchFromLocation(reference);
- } else if (const StructType* struct_type = StructType::DynamicCast(type)) {
- auto& fields = struct_type->fields();
- auto i = std::find_if(
- fields.begin(), fields.end(),
- [&](const NameAndType& f) { return f.name == expr->field; });
- if (i == fields.end()) {
- std::stringstream s;
- s << "\"" << expr->field << "\" is not a field of struct type \""
- << struct_type->name() << "\"";
- ReportError(s.str());
- }
- return VisitResult(i->type, reference.base.RValue());
+ }
+ const Type* type = reference.base.type();
+ if (const StructType* struct_type = StructType::DynamicCast(type)) {
+ return VisitResult(struct_type->GetFieldType(expr->field),
+ reference.base.RValue() + "." + expr->field);
} else {
Arguments arguments;
arguments.parameters = {reference.base};
@@ -1451,7 +1484,7 @@ void ImplementationVisitor::GenerateAssignToVariable(Variable* var,
GenerateIndent();
VisitResult var_value = {var->type(), var};
source_out() << var_value.LValue() << " = "
- << RValueFlattenStructs(casted_value) << ";" << std::endl;
+ << RValueFlattenStructs(casted_value) << ";\n";
}
var->Define();
}
@@ -1469,7 +1502,7 @@ void ImplementationVisitor::GenerateAssignToLocation(
ReportError(s.str());
}
GenerateAssignToVariable(var, assignment_value);
- } else if (auto access = FieldAccessExpression::cast(location)) {
+ } else if (auto access = FieldAccessExpression::DynamicCast(location)) {
GenerateCall(std::string(".") + access->field + "=",
{{reference.base, assignment_value}, {}});
} else {
@@ -1492,40 +1525,52 @@ void ImplementationVisitor::GenerateVariableDeclaration(const Variable* var) {
GenerateIndent();
if (var_type->IsConstexpr()) {
source_out() << var_type->GetGeneratedTypeName();
- source_out() << " " << value << "_impl;" << std::endl;
+ source_out() << " " << value << "_impl;\n";
} else if (var->IsConst()) {
source_out() << "TNode<" << var->type()->GetGeneratedTNodeTypeName();
source_out() << "> " << var->value() << "_impl;\n";
} else {
source_out() << "TVARIABLE(";
source_out() << var_type->GetGeneratedTNodeTypeName();
- source_out() << ", " << value << "_impl);" << std::endl;
+ source_out() << ", " << value << "_impl);\n";
}
GenerateIndent();
- source_out() << "auto " << value << " = &" << value << "_impl;"
- << std::endl;
+ source_out() << "auto " << value << " = &" << value << "_impl;\n";
GenerateIndent();
- source_out() << "USE(" << value << ");" << std::endl;
+ source_out() << "USE(" << value << ");\n";
+ }
+}
+
+Variable* ImplementationVisitor::GeneratePredeclaredVariableDeclaration(
+ const std::string& name,
+ const base::Optional<VisitResult>& initialization) {
+ Variable* variable = Variable::cast(declarations()->LookupValue(name));
+ GenerateVariableDeclaration(variable);
+ if (initialization) {
+ GenerateAssignToVariable(variable, *initialization);
}
+ return variable;
}
Variable* ImplementationVisitor::GenerateVariableDeclaration(
- AstNode* node, const std::string& name,
+ AstNode* node, const std::string& name, bool is_const,
const base::Optional<const Type*>& type,
const base::Optional<VisitResult>& initialization) {
-
Variable* variable = nullptr;
- if (declarations()->TryLookup(name)) {
+ if (declarations()->IsDeclaredInCurrentScope(name)) {
variable = Variable::cast(declarations()->LookupValue(name));
} else {
- variable = declarations()->DeclareVariable(name, *type, false);
- // Because the variable is being defined during code generation, it must be
- // assumed that it changes along all control split paths because it's no
- // longer possible to run the control-flow anlaysis in the declaration pass
- // over the variable.
- global_context_.MarkVariableChanged(
- node, declarations()->GetCurrentSpecializationTypeNamesVector(),
- variable);
+ variable = declarations()->DeclareVariable(
+ name, type ? *type : initialization->type(), is_const);
+ if (!is_const) {
+ // Because the variable is being defined during code generation, it must
+ // be assumed that it changes along all control split paths because it's
+ // no longer possible to run the control-flow anlaysis in the declaration
+ // pass over the variable.
+ global_context_.MarkVariableChanged(
+ node, declarations()->GetCurrentSpecializationTypeNamesVector(),
+ variable);
+ }
}
GenerateVariableDeclaration(variable);
if (initialization) {
@@ -1543,9 +1588,9 @@ void ImplementationVisitor::GenerateParameter(
source_out() << "UncheckedCast<" << val->type()->GetGeneratedTNodeTypeName()
<< ">(Parameter(Descriptor::k" << CamelifyString(parameter_name)
- << "));" << std::endl;
+ << "));\n";
GenerateIndent();
- source_out() << "USE(" << var << ");" << std::endl;
+ source_out() << "USE(" << var << ");\n";
}
void ImplementationVisitor::GenerateParameterList(const NameVector& list,
@@ -1643,13 +1688,15 @@ VisitResult ImplementationVisitor::GeneratePointerCall(
if (!no_result) {
source_out() << ")";
}
- source_out() << ");" << std::endl;
+ source_out() << ");\n";
return VisitResult(type->return_type(), result_variable_name);
}
VisitResult ImplementationVisitor::GenerateCall(
- const std::string& callable_name, Arguments arguments, bool is_tailcall) {
- Callable* callable = LookupCall(callable_name, arguments);
+ const std::string& callable_name, Arguments arguments,
+ const TypeVector& specialization_types, bool is_tailcall) {
+ Callable* callable =
+ LookupCall(callable_name, arguments, specialization_types);
// Operators used in a branching context can also be function calls that never
// return but have a True and False label
@@ -1771,7 +1818,7 @@ VisitResult ImplementationVisitor::GenerateCall(
!result_type->IsConstexpr()) {
source_out() << ")";
}
- source_out() << ");" << std::endl;
+ source_out() << ");\n";
return VisitResult(result_type, result_variable_name);
}
@@ -1811,34 +1858,22 @@ VisitResult ImplementationVisitor::Visit(CallExpression* expr,
bool is_tailcall) {
Arguments arguments;
std::string name = expr->callee.name;
- bool has_template_arguments = expr->callee.generic_arguments.size() != 0;
- if (has_template_arguments) {
TypeVector specialization_types =
GetTypeVector(expr->callee.generic_arguments);
- name = GetGeneratedCallableName(name, specialization_types);
- for (auto generic :
- declarations()->LookupGeneric(expr->callee.name)->list()) {
- CallableNode* callable = generic->declaration()->callable;
- if (generic->declaration()->body) {
- QueueGenericSpecialization({generic, specialization_types}, callable,
- callable->signature.get(),
- generic->declaration()->body);
- }
- }
- }
- for (Expression* arg : expr->arguments)
- arguments.parameters.push_back(Visit(arg));
- arguments.labels = LabelsFromIdentifiers(expr->labels);
- VisitResult result;
- if (!has_template_arguments &&
- declarations()->Lookup(expr->callee.name)->IsValue()) {
- result = GeneratePointerCall(&expr->callee, arguments, is_tailcall);
+ bool has_template_arguments = !specialization_types.empty();
+ for (Expression* arg : expr->arguments)
+ arguments.parameters.push_back(Visit(arg));
+ arguments.labels = LabelsFromIdentifiers(expr->labels);
+ VisitResult result;
+ if (!has_template_arguments &&
+ declarations()->Lookup(expr->callee.name)->IsValue()) {
+ result = GeneratePointerCall(&expr->callee, arguments, is_tailcall);
} else {
- result = GenerateCall(name, arguments, is_tailcall);
+ result = GenerateCall(name, arguments, specialization_types, is_tailcall);
}
if (!result.type()->IsVoidOrNever()) {
GenerateIndent();
- source_out() << "USE(" << RValueFlattenStructs(result) << ");" << std::endl;
+ source_out() << "USE(" << RValueFlattenStructs(result) << ");\n";
}
if (is_tailcall) {
result = {TypeOracle::GetNeverType(), ""};
@@ -1853,8 +1888,8 @@ bool ImplementationVisitor::GenerateLabeledStatementBlocks(
auto label_iterator = statement_labels.begin();
for (Statement* block : blocks) {
GenerateIndent();
- source_out() << "if (" << (*label_iterator)->generated() << "->is_used())"
- << std::endl;
+ source_out() << "if (" << (*label_iterator)->generated()
+ << "->is_used())\n";
ScopedIndent indent(this);
GenerateLabelBind(*label_iterator++);
@@ -1872,7 +1907,7 @@ void ImplementationVisitor::GenerateBranch(const VisitResult& condition,
GenerateIndent();
source_out() << "Branch(" << RValueFlattenStructs(condition) << ", "
<< true_label->generated() << ", " << false_label->generated()
- << ");" << std::endl;
+ << ");\n";
}
bool ImplementationVisitor::GenerateExpressionBranch(
@@ -1907,7 +1942,7 @@ VisitResult ImplementationVisitor::GenerateImplicitConvert(
source.type())) {
std::string name =
GetGeneratedCallableName(kFromConstexprMacroName, {destination_type});
- return GenerateCall(name, {{source}, {}}, false);
+ return GenerateCall(name, {{source}, {}}, {}, false);
} else if (IsAssignableFrom(destination_type, source.type())) {
source.SetType(destination_type);
return source;
@@ -1917,7 +1952,6 @@ VisitResult ImplementationVisitor::GenerateImplicitConvert(
<< " as a value of type " << *destination_type;
ReportError(s.str());
}
- return VisitResult(TypeOracle::GetVoidType(), "");
}
std::string ImplementationVisitor::NewTempVariable() {
@@ -1943,22 +1977,22 @@ void ImplementationVisitor::GenerateLabelDefinition(Label* label,
source_out() << ", ";
GenerateChangedVarsFromControlSplit(node);
}
- source_out() << ");" << std::endl;
+ source_out() << ");\n";
GenerateIndent();
- source_out() << "Label* " + label_string + " = &" << label_string_impl << ";"
- << std::endl;
+ source_out() << "Label* " + label_string + " = &" << label_string_impl
+ << ";\n";
GenerateIndent();
- source_out() << "USE(" << label_string << ");" << std::endl;
+ source_out() << "USE(" << label_string << ");\n";
}
void ImplementationVisitor::GenerateLabelBind(Label* label) {
GenerateIndent();
- source_out() << "BIND(" << label->generated() << ");" << std::endl;
+ source_out() << "BIND(" << label->generated() << ");\n";
}
void ImplementationVisitor::GenerateLabelGoto(Label* label) {
GenerateIndent();
- source_out() << "Goto(" << label->generated() << ");" << std::endl;
+ source_out() << "Goto(" << label->generated() << ");\n";
}
std::vector<Label*> ImplementationVisitor::LabelsFromIdentifiers(
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index 82cbb48ce8..43520239da 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -19,8 +19,8 @@ namespace internal {
namespace torque {
struct LocationReference {
- LocationReference(Value* v, VisitResult b, VisitResult i)
- : value(v), base(b), index(i) {}
+ LocationReference(Value* value, VisitResult base, VisitResult index)
+ : value(value), base(base), index(index) {}
Value* value;
VisitResult base;
VisitResult index;
@@ -115,13 +115,11 @@ class ImplementationVisitor : public FileVisitor {
VisitResult Visit(LogicalOrExpression* expr);
VisitResult Visit(LogicalAndExpression* expr);
- LocationReference GetLocationReference(
- TorqueParser::LocationExpressionContext* locationExpression);
-
VisitResult Visit(IncrementDecrementExpression* expr);
VisitResult Visit(AssignmentExpression* expr);
VisitResult Visit(StringLiteralExpression* expr);
VisitResult Visit(NumberLiteralExpression* expr);
+ VisitResult Visit(AssumeTypeImpossibleExpression* expr);
const Type* Visit(TryLabelStatement* stmt);
const Type* Visit(ReturnStatement* stmt);
@@ -156,14 +154,14 @@ class ImplementationVisitor : public FileVisitor {
: new_lines_(new_lines), visitor_(visitor) {
if (new_lines) visitor->GenerateIndent();
visitor->source_out() << "{";
- if (new_lines) visitor->source_out() << std::endl;
+ if (new_lines) visitor->source_out() << "\n";
visitor->indent_++;
}
~ScopedIndent() {
visitor_->indent_--;
visitor_->GenerateIndent();
visitor_->source_out() << "}";
- if (new_lines_) visitor_->source_out() << std::endl;
+ if (new_lines_) visitor_->source_out() << "\n";
}
private:
@@ -171,12 +169,13 @@ class ImplementationVisitor : public FileVisitor {
ImplementationVisitor* visitor_;
};
- Callable* LookupCall(const std::string& name, const Arguments& arguments);
+ Callable* LookupCall(const std::string& name, const Arguments& arguments,
+ const TypeVector& specialization_types);
bool GenerateChangedVarFromControlSplit(const Variable* v, bool first = true);
void GetFlattenedStructsVars(const Variable* base,
- std::set<const Variable*>& vars);
+ std::set<const Variable*>* vars);
void GenerateChangedVarsFromControlSplit(AstNode* node);
@@ -192,8 +191,12 @@ class ImplementationVisitor : public FileVisitor {
void GenerateVariableDeclaration(const Variable* var);
+ Variable* GeneratePredeclaredVariableDeclaration(
+ const std::string& name,
+ const base::Optional<VisitResult>& initialization);
+
Variable* GenerateVariableDeclaration(
- AstNode* node, const std::string& name,
+ AstNode* node, const std::string& name, bool is_const,
const base::Optional<const Type*>& type,
const base::Optional<VisitResult>& initialization = {});
@@ -202,7 +205,9 @@ class ImplementationVisitor : public FileVisitor {
void GenerateParameterList(const NameVector& list, size_t first = 0);
VisitResult GenerateCall(const std::string& callable_name,
- Arguments parameters, bool tail_call = false);
+ Arguments parameters,
+ const TypeVector& specialization_types = {},
+ bool tail_call = false);
VisitResult GeneratePointerCall(Expression* callee,
const Arguments& parameters, bool tail_call);
diff --git a/deps/v8/src/torque/scope.h b/deps/v8/src/torque/scope.h
index be000233bd..21438da8fe 100644
--- a/deps/v8/src/torque/scope.h
+++ b/deps/v8/src/torque/scope.h
@@ -5,9 +5,9 @@
#ifndef V8_TORQUE_SCOPE_H_
#define V8_TORQUE_SCOPE_H_
+#include <map>
#include <string>
-#include "./antlr4-runtime.h"
#include "src/torque/ast.h"
#include "src/torque/types.h"
#include "src/torque/utils.h"
@@ -17,6 +17,8 @@ namespace internal {
namespace torque {
class ScopeChain;
+class Variable;
+class Declarable;
class Scope {
public:
diff --git a/deps/v8/src/torque/source-positions.cc b/deps/v8/src/torque/source-positions.cc
new file mode 100644
index 0000000000..b10c98f125
--- /dev/null
+++ b/deps/v8/src/torque/source-positions.cc
@@ -0,0 +1,17 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/source-positions.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+DEFINE_CONTEXTUAL_VARIABLE(CurrentSourceFile)
+DEFINE_CONTEXTUAL_VARIABLE(CurrentSourcePosition)
+DEFINE_CONTEXTUAL_VARIABLE(SourceFileMap)
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/source-positions.h b/deps/v8/src/torque/source-positions.h
new file mode 100644
index 0000000000..bd5aaa3ec3
--- /dev/null
+++ b/deps/v8/src/torque/source-positions.h
@@ -0,0 +1,55 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_SOURCE_POSITIONS_H_
+#define V8_TORQUE_SOURCE_POSITIONS_H_
+
+#include "src/torque/contextual.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+class SourceId {
+ private:
+ explicit SourceId(int id) : id_(id) {}
+ int id_;
+ friend class SourceFileMap;
+};
+
+struct SourcePosition {
+ SourceId source;
+ int line;
+ int column;
+};
+
+DECLARE_CONTEXTUAL_VARIABLE(CurrentSourceFile, SourceId)
+DECLARE_CONTEXTUAL_VARIABLE(CurrentSourcePosition, SourcePosition)
+
+class SourceFileMap : public ContextualClass<SourceFileMap> {
+ public:
+ SourceFileMap() {}
+ static const std::string& GetSource(SourceId source) {
+ return Get().sources_[source.id_];
+ }
+
+ static SourceId AddSource(std::string path) {
+ Get().sources_.push_back(std::move(path));
+ return SourceId(static_cast<int>(Get().sources_.size()) - 1);
+ }
+
+ private:
+ std::vector<std::string> sources_;
+};
+
+inline std::string PositionAsString(SourcePosition pos) {
+ return SourceFileMap::GetSource(pos.source) + ":" +
+ std::to_string(pos.line + 1) + ":" + std::to_string(pos.column + 1);
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_SOURCE_POSITIONS_H_
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
new file mode 100644
index 0000000000..92c3fa8815
--- /dev/null
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -0,0 +1,1280 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cctype>
+
+#include "src/torque/earley-parser.h"
+#include "src/torque/torque-parser.h"
+#include "src/torque/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+DEFINE_CONTEXTUAL_VARIABLE(CurrentAst);
+
+using TypeList = std::vector<TypeExpression*>;
+using GenericParameters = std::vector<std::string>;
+
+struct ExpressionWithSource {
+ Expression* expression;
+ std::string source;
+};
+
+struct TypeswitchCase {
+ SourcePosition pos;
+ base::Optional<std::string> name;
+ TypeExpression* type;
+ Statement* block;
+};
+
+enum class ParseResultHolderBase::TypeId {
+ kStdString,
+ kBool,
+ kStdVectorOfString,
+ kExpressionPtr,
+ kLocationExpressionPtr,
+ kStatementPtr,
+ kDeclarationPtr,
+ kTypeExpressionPtr,
+ kLabelBlockPtr,
+ kNameAndTypeExpression,
+ kStdVectorOfNameAndTypeExpression,
+ kIncrementDecrementOperator,
+ kOptionalStdString,
+ kStdVectorOfStatementPtr,
+ kStdVectorOfDeclarationPtr,
+ kStdVectorOfExpressionPtr,
+ kExpressionWithSource,
+ kParameterList,
+ kRangeExpression,
+ kOptionalRangeExpression,
+ kTypeList,
+ kOptionalTypeList,
+ kLabelAndTypes,
+ kStdVectorOfLabelAndTypes,
+ kStdVectorOfLabelBlockPtr,
+ kOptionalStatementPtr,
+ kOptionalExpressionPtr,
+ kTypeswitchCase,
+ kStdVectorOfTypeswitchCase
+};
+
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<std::string>::id =
+ ParseResultTypeId::kStdString;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<bool>::id =
+ ParseResultTypeId::kBool;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<std::vector<std::string>>::id =
+ ParseResultTypeId::kStdVectorOfString;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<Declaration*>::id =
+ ParseResultTypeId::kDeclarationPtr;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<TypeExpression*>::id =
+ ParseResultTypeId::kTypeExpressionPtr;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<LabelBlock*>::id =
+ ParseResultTypeId::kLabelBlockPtr;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<Expression*>::id =
+ ParseResultTypeId::kExpressionPtr;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<LocationExpression*>::id =
+ ParseResultTypeId::kLocationExpressionPtr;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<Statement*>::id =
+ ParseResultTypeId::kStatementPtr;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<NameAndTypeExpression>::id =
+ ParseResultTypeId::kNameAndTypeExpression;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<std::vector<NameAndTypeExpression>>::id =
+ ParseResultTypeId::kStdVectorOfNameAndTypeExpression;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<IncrementDecrementOperator>::id =
+ ParseResultTypeId::kIncrementDecrementOperator;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<base::Optional<std::string>>::id =
+ ParseResultTypeId::kOptionalStdString;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<std::vector<Statement*>>::id =
+ ParseResultTypeId::kStdVectorOfStatementPtr;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<std::vector<Declaration*>>::id =
+ ParseResultTypeId::kStdVectorOfDeclarationPtr;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<std::vector<Expression*>>::id =
+ ParseResultTypeId::kStdVectorOfExpressionPtr;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<ExpressionWithSource>::id =
+ ParseResultTypeId::kExpressionWithSource;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<ParameterList>::id =
+ ParseResultTypeId::kParameterList;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<RangeExpression>::id =
+ ParseResultTypeId::kRangeExpression;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<base::Optional<RangeExpression>>::id =
+ ParseResultTypeId::kOptionalRangeExpression;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<TypeList>::id =
+ ParseResultTypeId::kTypeList;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<base::Optional<TypeList>>::id =
+ ParseResultTypeId::kOptionalTypeList;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<LabelAndTypes>::id =
+ ParseResultTypeId::kLabelAndTypes;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<std::vector<LabelAndTypes>>::id =
+ ParseResultTypeId::kStdVectorOfLabelAndTypes;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<std::vector<LabelBlock*>>::id =
+ ParseResultTypeId::kStdVectorOfLabelBlockPtr;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<base::Optional<Statement*>>::id =
+ ParseResultTypeId::kOptionalStatementPtr;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<base::Optional<Expression*>>::id =
+ ParseResultTypeId::kOptionalExpressionPtr;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<TypeswitchCase>::id = ParseResultTypeId::kTypeswitchCase;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<std::vector<TypeswitchCase>>::id =
+ ParseResultTypeId::kStdVectorOfTypeswitchCase;
+
+namespace {
+
+base::Optional<ParseResult> AddGlobalDeclaration(
+ ParseResultIterator* child_results) {
+ auto declaration = child_results->NextAs<Declaration*>();
+ CurrentAst::Get().declarations().push_back(declaration);
+ return base::nullopt;
+}
+
+template <class T, class... Args>
+T* MakeNode(Args... args) {
+ return CurrentAst::Get().AddNode(std::unique_ptr<T>(
+ new T(CurrentSourcePosition::Get(), std::move(args)...)));
+}
+
+base::Optional<ParseResult> MakeCall(ParseResultIterator* child_results) {
+ auto callee = child_results->NextAs<std::string>();
+ auto generic_args = child_results->NextAs<TypeList>();
+ auto args = child_results->NextAs<std::vector<Expression*>>();
+ auto labels = child_results->NextAs<std::vector<std::string>>();
+ Expression* result =
+ MakeNode<CallExpression>(callee, false, generic_args, args, labels);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeBinaryOperator(
+ ParseResultIterator* child_results) {
+ auto left = child_results->NextAs<Expression*>();
+ auto op = child_results->NextAs<std::string>();
+ auto right = child_results->NextAs<Expression*>();
+ Expression* result = MakeNode<CallExpression>(
+ op, true, TypeList{}, std::vector<Expression*>{left, right},
+ std::vector<std::string>{});
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeUnaryOperator(
+ ParseResultIterator* child_results) {
+ auto op = child_results->NextAs<std::string>();
+ auto e = child_results->NextAs<Expression*>();
+ Expression* result = MakeNode<CallExpression>(op, true, TypeList{},
+ std::vector<Expression*>{e},
+ std::vector<std::string>{});
+ return ParseResult{result};
+}
+
+template <bool has_varargs>
+base::Optional<ParseResult> MakeParameterListFromTypes(
+ ParseResultIterator* child_results) {
+ auto types = child_results->NextAs<TypeList>();
+ ParameterList result;
+ result.types = std::move(types);
+ result.has_varargs = has_varargs;
+ return ParseResult{std::move(result)};
+}
+template <bool has_varargs>
+base::Optional<ParseResult> MakeParameterListFromNameAndTypeList(
+ ParseResultIterator* child_results) {
+ auto params = child_results->NextAs<std::vector<NameAndTypeExpression>>();
+ std::string arguments_variable = "";
+ if (child_results->HasNext()) {
+ arguments_variable = child_results->NextAs<std::string>();
+ }
+ ParameterList result;
+ for (NameAndTypeExpression& pair : params) {
+ result.names.push_back(std::move(pair.name));
+ result.types.push_back(pair.type);
+ }
+ result.has_varargs = has_varargs;
+ result.arguments_variable = arguments_variable;
+ return ParseResult{std::move(result)};
+}
+
+base::Optional<ParseResult> MakeAssertStatement(
+ ParseResultIterator* child_results) {
+ auto kind = child_results->NextAs<std::string>();
+ auto expr_with_source = child_results->NextAs<ExpressionWithSource>();
+ DCHECK(kind == "assert" || kind == "check");
+ Statement* result = MakeNode<AssertStatement>(
+ kind == "assert", expr_with_source.expression, expr_with_source.source);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeDebugStatement(
+ ParseResultIterator* child_results) {
+ auto kind = child_results->NextAs<std::string>();
+ DCHECK(kind == "unreachable" || kind == "debug");
+ Statement* result = MakeNode<DebugStatement>(kind, kind == "unreachable");
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeVoidType(ParseResultIterator* child_results) {
+ TypeExpression* result = MakeNode<BasicTypeExpression>(false, "void");
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeExternalMacro(
+ ParseResultIterator* child_results) {
+ auto operator_name = child_results->NextAs<base::Optional<std::string>>();
+ auto name = child_results->NextAs<std::string>();
+ auto generic_parameters = child_results->NextAs<GenericParameters>();
+ auto args = child_results->NextAs<ParameterList>();
+ auto return_type = child_results->NextAs<TypeExpression*>();
+ auto labels = child_results->NextAs<LabelAndTypesVector>();
+ MacroDeclaration* macro = MakeNode<ExternalMacroDeclaration>(
+ name, operator_name, args, return_type, labels);
+ Declaration* result;
+ if (generic_parameters.empty()) {
+ result = MakeNode<StandardDeclaration>(macro, nullptr);
+ } else {
+ result = MakeNode<GenericDeclaration>(macro, generic_parameters);
+ }
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeTorqueMacroDeclaration(
+ ParseResultIterator* child_results) {
+ auto operator_name = child_results->NextAs<base::Optional<std::string>>();
+ auto name = child_results->NextAs<std::string>();
+ auto generic_parameters = child_results->NextAs<GenericParameters>();
+ auto args = child_results->NextAs<ParameterList>();
+ auto return_type = child_results->NextAs<TypeExpression*>();
+ auto labels = child_results->NextAs<LabelAndTypesVector>();
+ auto body = child_results->NextAs<base::Optional<Statement*>>();
+ MacroDeclaration* macro = MakeNode<TorqueMacroDeclaration>(
+ name, operator_name, args, return_type, labels);
+ Declaration* result;
+ if (generic_parameters.empty()) {
+ if (!body) ReportError("A non-generic declaration needs a body.");
+ result = MakeNode<StandardDeclaration>(macro, *body);
+ } else {
+ result = MakeNode<GenericDeclaration>(macro, generic_parameters, body);
+ }
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeTorqueBuiltinDeclaration(
+ ParseResultIterator* child_results) {
+ auto javascript_linkage = child_results->NextAs<bool>();
+ auto name = child_results->NextAs<std::string>();
+ auto generic_parameters = child_results->NextAs<GenericParameters>();
+ auto args = child_results->NextAs<ParameterList>();
+ auto return_type = child_results->NextAs<TypeExpression*>();
+ auto body = child_results->NextAs<base::Optional<Statement*>>();
+ BuiltinDeclaration* builtin = MakeNode<TorqueBuiltinDeclaration>(
+ javascript_linkage, name, args, return_type);
+ Declaration* result;
+ if (generic_parameters.empty()) {
+ if (!body) ReportError("A non-generic declaration needs a body.");
+ result = MakeNode<StandardDeclaration>(builtin, *body);
+ } else {
+ result = MakeNode<GenericDeclaration>(builtin, generic_parameters, body);
+ }
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeConstDeclaration(
+ ParseResultIterator* child_results) {
+ auto name = child_results->NextAs<std::string>();
+ auto type = child_results->NextAs<TypeExpression*>();
+ auto expression = child_results->NextAs<Expression*>();
+ Declaration* result =
+ MakeNode<ConstDeclaration>(std::move(name), type, expression);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeExternConstDeclaration(
+ ParseResultIterator* child_results) {
+ auto name = child_results->NextAs<std::string>();
+ auto type = child_results->NextAs<TypeExpression*>();
+ auto literal = child_results->NextAs<std::string>();
+ Declaration* result = MakeNode<ExternConstDeclaration>(std::move(name), type,
+ std::move(literal));
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeTypeAliasDeclaration(
+ ParseResultIterator* child_results) {
+ auto name = child_results->NextAs<std::string>();
+ auto type = child_results->NextAs<TypeExpression*>();
+ Declaration* result = MakeNode<TypeAliasDeclaration>(std::move(name), type);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeTypeDeclaration(
+ ParseResultIterator* child_results) {
+ auto name = child_results->NextAs<std::string>();
+ auto extends = child_results->NextAs<base::Optional<std::string>>();
+ auto generates = child_results->NextAs<base::Optional<std::string>>();
+ auto constexpr_generates =
+ child_results->NextAs<base::Optional<std::string>>();
+ Declaration* result = MakeNode<TypeDeclaration>(
+ std::move(name), std::move(extends), std::move(generates),
+ std::move(constexpr_generates));
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeExplicitModuleDeclaration(
+ ParseResultIterator* child_results) {
+ auto name = child_results->NextAs<std::string>();
+ auto declarations = child_results->NextAs<std::vector<Declaration*>>();
+ Declaration* result = MakeNode<ExplicitModuleDeclaration>(
+ std::move(name), std::move(declarations));
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeSpecializationDeclaration(
+ ParseResultIterator* child_results) {
+ auto name = child_results->NextAs<std::string>();
+ auto generic_parameters =
+ child_results->NextAs<std::vector<TypeExpression*>>();
+ auto parameters = child_results->NextAs<ParameterList>();
+ auto return_type = child_results->NextAs<TypeExpression*>();
+ auto labels = child_results->NextAs<LabelAndTypesVector>();
+ auto body = child_results->NextAs<Statement*>();
+ Declaration* result = MakeNode<SpecializationDeclaration>(
+ std::move(name), std::move(generic_parameters), std::move(parameters),
+ return_type, std::move(labels), body);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeStructDeclaration(
+ ParseResultIterator* child_results) {
+ auto name = child_results->NextAs<std::string>();
+ auto fields = child_results->NextAs<std::vector<NameAndTypeExpression>>();
+ Declaration* result =
+ MakeNode<StructDeclaration>(std::move(name), std::move(fields));
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeExternalBuiltin(
+ ParseResultIterator* child_results) {
+ auto js_linkage = child_results->NextAs<bool>();
+ auto name = child_results->NextAs<std::string>();
+ auto generic_parameters = child_results->NextAs<GenericParameters>();
+ auto args = child_results->NextAs<ParameterList>();
+ auto return_type = child_results->NextAs<TypeExpression*>();
+ BuiltinDeclaration* builtin =
+ MakeNode<ExternalBuiltinDeclaration>(js_linkage, name, args, return_type);
+ Declaration* result;
+ if (generic_parameters.empty()) {
+ result = MakeNode<StandardDeclaration>(builtin, nullptr);
+ } else {
+ result = MakeNode<GenericDeclaration>(builtin, generic_parameters);
+ }
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeExternalRuntime(
+ ParseResultIterator* child_results) {
+ auto name = child_results->NextAs<std::string>();
+ auto args = child_results->NextAs<ParameterList>();
+ auto return_type = child_results->NextAs<TypeExpression*>();
+ ExternalRuntimeDeclaration* runtime =
+ MakeNode<ExternalRuntimeDeclaration>(name, args, return_type);
+ Declaration* result = MakeNode<StandardDeclaration>(runtime, nullptr);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> StringLiteralUnquoteAction(
+ ParseResultIterator* child_results) {
+ return ParseResult{
+ StringLiteralUnquote(child_results->NextAs<std::string>())};
+}
+
+base::Optional<ParseResult> MakeBasicTypeExpression(
+ ParseResultIterator* child_results) {
+ auto is_constexpr = child_results->NextAs<bool>();
+ auto name = child_results->NextAs<std::string>();
+ TypeExpression* result =
+ MakeNode<BasicTypeExpression>(is_constexpr, std::move(name));
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeFunctionTypeExpression(
+ ParseResultIterator* child_results) {
+ auto parameters = child_results->NextAs<std::vector<TypeExpression*>>();
+ auto return_type = child_results->NextAs<TypeExpression*>();
+ TypeExpression* result =
+ MakeNode<FunctionTypeExpression>(std::move(parameters), return_type);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeUnionTypeExpression(
+ ParseResultIterator* child_results) {
+ auto a = child_results->NextAs<TypeExpression*>();
+ auto b = child_results->NextAs<TypeExpression*>();
+ TypeExpression* result = MakeNode<UnionTypeExpression>(a, b);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeExpressionStatement(
+ ParseResultIterator* child_results) {
+ auto expression = child_results->NextAs<Expression*>();
+ Statement* result = MakeNode<ExpressionStatement>(expression);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeIfStatement(
+ ParseResultIterator* child_results) {
+ auto is_constexpr = child_results->NextAs<bool>();
+ auto condition = child_results->NextAs<Expression*>();
+ auto if_true = child_results->NextAs<Statement*>();
+ auto if_false = child_results->NextAs<base::Optional<Statement*>>();
+
+ if (if_false && !(BlockStatement::DynamicCast(if_true) &&
+ (BlockStatement::DynamicCast(*if_false) ||
+ IfStatement::DynamicCast(*if_false)))) {
+ ReportError("if-else statements require curly braces");
+ }
+
+ Statement* result =
+ MakeNode<IfStatement>(is_constexpr, condition, if_true, if_false);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeTypeswitchStatement(
+ ParseResultIterator* child_results) {
+ auto expression = child_results->NextAs<Expression*>();
+ auto cases = child_results->NextAs<std::vector<TypeswitchCase>>();
+ CurrentSourcePosition::Scope current_source_position(
+ child_results->matched_input().pos);
+
+ // typeswitch (expression) case (x1 : T1) {
+ // ...b1
+ // } case (x2 : T2) {
+ // ...b2
+ // } case (x3 : T3) {
+ // ...b3
+ // }
+ //
+ // desugars to
+ //
+ // {
+ // const _value = expression;
+ // try {
+ // const x1 : T1 = cast<T1>(_value) otherwise _NextCase;
+ // ...b1
+ // } label _NextCase {
+ // try {
+ // const x2 : T2 = cast<T2>(%assume_impossible<T1>(_value));
+ // ...b2
+ // } label _NextCase {
+ // const x3 : T3 = %assume_impossible<T1|T2>(_value);
+ // ...b3
+ // }
+ // }
+ // }
+
+ BlockStatement* current_block = MakeNode<BlockStatement>();
+ Statement* result = current_block;
+ {
+ CurrentSourcePosition::Scope current_source_position(expression->pos);
+ current_block->statements.push_back(MakeNode<VarDeclarationStatement>(
+ true, "_value", base::nullopt, expression));
+ }
+
+ TypeExpression* accumulated_types;
+ for (size_t i = 0; i < cases.size(); ++i) {
+ CurrentSourcePosition::Scope current_source_position(cases[i].pos);
+ Expression* value = MakeNode<IdentifierExpression>("_value");
+ if (i >= 1) {
+ value =
+ MakeNode<AssumeTypeImpossibleExpression>(accumulated_types, value);
+ }
+ BlockStatement* case_block;
+ if (i < cases.size() - 1) {
+ value = MakeNode<CallExpression>(
+ "cast", false, std::vector<TypeExpression*>{cases[i].type},
+ std::vector<Expression*>{value},
+ std::vector<std::string>{"_NextCase"});
+ case_block = MakeNode<BlockStatement>();
+ } else {
+ case_block = current_block;
+ }
+ std::string name = "_case_value";
+ if (cases[i].name) name = *cases[i].name;
+ case_block->statements.push_back(
+ MakeNode<VarDeclarationStatement>(true, name, cases[i].type, value));
+ case_block->statements.push_back(cases[i].block);
+ if (i < cases.size() - 1) {
+ BlockStatement* next_block = MakeNode<BlockStatement>();
+ current_block->statements.push_back(MakeNode<TryLabelStatement>(
+ case_block, std::vector<LabelBlock*>{MakeNode<LabelBlock>(
+ "_NextCase", ParameterList::Empty(), next_block)}));
+ current_block = next_block;
+ }
+ accumulated_types =
+ i > 0 ? MakeNode<UnionTypeExpression>(accumulated_types, cases[i].type)
+ : cases[i].type;
+ }
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeTypeswitchCase(
+ ParseResultIterator* child_results) {
+ auto name = child_results->NextAs<base::Optional<std::string>>();
+ auto type = child_results->NextAs<TypeExpression*>();
+ auto block = child_results->NextAs<Statement*>();
+ return ParseResult{TypeswitchCase{child_results->matched_input().pos,
+ std::move(name), type, block}};
+}
+
+base::Optional<ParseResult> MakeWhileStatement(
+ ParseResultIterator* child_results) {
+ auto condition = child_results->NextAs<Expression*>();
+ auto body = child_results->NextAs<Statement*>();
+ Statement* result = MakeNode<WhileStatement>(condition, body);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeReturnStatement(
+ ParseResultIterator* child_results) {
+ auto value = child_results->NextAs<base::Optional<Expression*>>();
+ Statement* result = MakeNode<ReturnStatement>(value);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeTailCallStatement(
+ ParseResultIterator* child_results) {
+ auto value = child_results->NextAs<Expression*>();
+ Statement* result = MakeNode<TailCallStatement>(CallExpression::cast(value));
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeVarDeclarationStatement(
+ ParseResultIterator* child_results) {
+ auto kind = child_results->NextAs<std::string>();
+ bool const_qualified = kind == "const";
+ if (!const_qualified) DCHECK_EQ("let", kind);
+ auto name = child_results->NextAs<std::string>();
+ auto type = child_results->NextAs<TypeExpression*>();
+ base::Optional<Expression*> initializer;
+ if (child_results->HasNext())
+ initializer = child_results->NextAs<Expression*>();
+ Statement* result = MakeNode<VarDeclarationStatement>(
+ const_qualified, std::move(name), type, initializer);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeBreakStatement(
+ ParseResultIterator* child_results) {
+ Statement* result = MakeNode<BreakStatement>();
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeContinueStatement(
+ ParseResultIterator* child_results) {
+ Statement* result = MakeNode<ContinueStatement>();
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeGotoStatement(
+ ParseResultIterator* child_results) {
+ auto label = child_results->NextAs<std::string>();
+ auto arguments = child_results->NextAs<std::vector<Expression*>>();
+ Statement* result =
+ MakeNode<GotoStatement>(std::move(label), std::move(arguments));
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeBlockStatement(
+ ParseResultIterator* child_results) {
+ auto deferred = child_results->NextAs<bool>();
+ auto statements = child_results->NextAs<std::vector<Statement*>>();
+ Statement* result = MakeNode<BlockStatement>(deferred, std::move(statements));
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeTryLabelStatement(
+ ParseResultIterator* child_results) {
+ auto try_block = child_results->NextAs<Statement*>();
+ auto label_blocks = child_results->NextAs<std::vector<LabelBlock*>>();
+ Statement* result =
+ MakeNode<TryLabelStatement>(try_block, std::move(label_blocks));
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeForOfLoopStatement(
+ ParseResultIterator* child_results) {
+ auto var_decl = child_results->NextAs<Statement*>();
+ auto iterable = child_results->NextAs<Expression*>();
+ auto range = child_results->NextAs<base::Optional<RangeExpression>>();
+ auto body = child_results->NextAs<Statement*>();
+ Statement* result =
+ MakeNode<ForOfLoopStatement>(var_decl, iterable, range, body);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeForLoopStatement(
+ ParseResultIterator* child_results) {
+ auto var_decl = child_results->NextAs<base::Optional<Statement*>>();
+ auto test = child_results->NextAs<base::Optional<Expression*>>();
+ auto action = child_results->NextAs<base::Optional<Expression*>>();
+ auto body = child_results->NextAs<Statement*>();
+ Statement* result = MakeNode<ForLoopStatement>(var_decl, test, action, body);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeLabelBlock(ParseResultIterator* child_results) {
+ auto label = child_results->NextAs<std::string>();
+ auto parameters = child_results->NextAs<ParameterList>();
+ auto body = child_results->NextAs<Statement*>();
+ LabelBlock* result =
+ MakeNode<LabelBlock>(std::move(label), std::move(parameters), body);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeRangeExpression(
+ ParseResultIterator* child_results) {
+ auto begin = child_results->NextAs<base::Optional<Expression*>>();
+ auto end = child_results->NextAs<base::Optional<Expression*>>();
+ RangeExpression result = {begin, end};
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeExpressionWithSource(
+ ParseResultIterator* child_results) {
+ auto e = child_results->NextAs<Expression*>();
+ return ParseResult{
+ ExpressionWithSource{e, child_results->matched_input().ToString()}};
+}
+
+base::Optional<ParseResult> MakeIdentifierExpression(
+ ParseResultIterator* child_results) {
+ auto name = child_results->NextAs<std::string>();
+ auto generic_arguments =
+ child_results->NextAs<std::vector<TypeExpression*>>();
+ LocationExpression* result = MakeNode<IdentifierExpression>(
+ std::move(name), std::move(generic_arguments));
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeFieldAccessExpression(
+ ParseResultIterator* child_results) {
+ auto object = child_results->NextAs<Expression*>();
+ auto field = child_results->NextAs<std::string>();
+ LocationExpression* result =
+ MakeNode<FieldAccessExpression>(object, std::move(field));
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeElementAccessExpression(
+ ParseResultIterator* child_results) {
+ auto object = child_results->NextAs<Expression*>();
+ auto field = child_results->NextAs<Expression*>();
+ LocationExpression* result = MakeNode<ElementAccessExpression>(object, field);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeStructExpression(
+ ParseResultIterator* child_results) {
+ auto name = child_results->NextAs<std::string>();
+ auto expressions = child_results->NextAs<std::vector<Expression*>>();
+ Expression* result =
+ MakeNode<StructExpression>(std::move(name), std::move(expressions));
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeAssignmentExpression(
+ ParseResultIterator* child_results) {
+ auto location = child_results->NextAs<LocationExpression*>();
+ auto op = child_results->NextAs<base::Optional<std::string>>();
+ auto value = child_results->NextAs<Expression*>();
+ Expression* result =
+ MakeNode<AssignmentExpression>(location, std::move(op), value);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeNumberLiteralExpression(
+ ParseResultIterator* child_results) {
+ auto number = child_results->NextAs<std::string>();
+ Expression* result = MakeNode<NumberLiteralExpression>(std::move(number));
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeStringLiteralExpression(
+ ParseResultIterator* child_results) {
+ auto literal = child_results->NextAs<std::string>();
+ Expression* result = MakeNode<StringLiteralExpression>(std::move(literal));
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeIncrementDecrementExpressionPostfix(
+ ParseResultIterator* child_results) {
+ auto location = child_results->NextAs<LocationExpression*>();
+ auto op = child_results->NextAs<IncrementDecrementOperator>();
+ Expression* result =
+ MakeNode<IncrementDecrementExpression>(location, op, true);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeIncrementDecrementExpressionPrefix(
+ ParseResultIterator* child_results) {
+ auto op = child_results->NextAs<IncrementDecrementOperator>();
+ auto location = child_results->NextAs<LocationExpression*>();
+ Expression* result =
+ MakeNode<IncrementDecrementExpression>(location, op, false);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeLogicalOrExpression(
+ ParseResultIterator* child_results) {
+ auto left = child_results->NextAs<Expression*>();
+ auto right = child_results->NextAs<Expression*>();
+ Expression* result = MakeNode<LogicalOrExpression>(left, right);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeLogicalAndExpression(
+ ParseResultIterator* child_results) {
+ auto left = child_results->NextAs<Expression*>();
+ auto right = child_results->NextAs<Expression*>();
+ Expression* result = MakeNode<LogicalAndExpression>(left, right);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeConditionalExpression(
+ ParseResultIterator* child_results) {
+ auto condition = child_results->NextAs<Expression*>();
+ auto if_true = child_results->NextAs<Expression*>();
+ auto if_false = child_results->NextAs<Expression*>();
+ Expression* result =
+ MakeNode<ConditionalExpression>(condition, if_true, if_false);
+ return ParseResult{result};
+}
+
+base::Optional<ParseResult> MakeLabelAndTypes(
+ ParseResultIterator* child_results) {
+ auto name = child_results->NextAs<std::string>();
+ auto types = child_results->NextAs<std::vector<TypeExpression*>>();
+ return ParseResult{LabelAndTypes{std::move(name), std::move(types)}};
+}
+
+base::Optional<ParseResult> MakeNameAndType(
+ ParseResultIterator* child_results) {
+ auto name = child_results->NextAs<std::string>();
+ auto type = child_results->NextAs<TypeExpression*>();
+ return ParseResult{NameAndTypeExpression{std::move(name), type}};
+}
+
+base::Optional<ParseResult> ExtractAssignmentOperator(
+ ParseResultIterator* child_results) {
+ auto op = child_results->NextAs<std::string>();
+ base::Optional<std::string> result = std::string(op.begin(), op.end() - 1);
+ return ParseResult(std::move(result));
+}
+
+struct TorqueGrammar : Grammar {
+ static bool MatchWhitespace(InputPosition* pos) {
+ while (true) {
+ if (MatchChar(std::isspace, pos)) continue;
+ if (MatchString("//", pos)) {
+ while (MatchChar([](char c) { return c != '\n'; }, pos)) {
+ }
+ continue;
+ }
+ return true;
+ }
+ }
+
+ static bool MatchIdentifier(InputPosition* pos) {
+ if (!MatchChar(std::isalpha, pos)) return false;
+ while (MatchChar(std::isalnum, pos) || MatchString("_", pos)) {
+ }
+ return true;
+ }
+
+ static bool MatchStringLiteral(InputPosition* pos) {
+ InputPosition current = *pos;
+ if (MatchString("\"", &current)) {
+ while (
+ (MatchString("\\", &current) && MatchAnyChar(&current)) ||
+ MatchChar([](char c) { return c != '"' && c != '\n'; }, &current)) {
+ }
+ if (MatchString("\"", &current)) {
+ *pos = current;
+ return true;
+ }
+ }
+ current = *pos;
+ if (MatchString("'", &current)) {
+ while (
+ (MatchString("\\", &current) && MatchAnyChar(&current)) ||
+ MatchChar([](char c) { return c != '\'' && c != '\n'; }, &current)) {
+ }
+ if (MatchString("'", &current)) {
+ *pos = current;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static bool MatchHexLiteral(InputPosition* pos) {
+ InputPosition current = *pos;
+ MatchString("-", &current);
+ if (MatchString("0x", &current) && MatchChar(std::isxdigit, &current)) {
+ while (MatchChar(std::isxdigit, &current)) {
+ }
+ *pos = current;
+ return true;
+ }
+ return false;
+ }
+
+ static bool MatchDecimalLiteral(InputPosition* pos) {
+ InputPosition current = *pos;
+ bool found_digit = false;
+ MatchString("-", &current);
+ while (MatchChar(std::isdigit, &current)) found_digit = true;
+ MatchString(".", &current);
+ while (MatchChar(std::isdigit, &current)) found_digit = true;
+ if (!found_digit) return false;
+ *pos = current;
+ if ((MatchString("e", &current) || MatchString("E", &current)) &&
+ (MatchString("+", &current) || MatchString("-", &current) || true) &&
+ MatchChar(std::isdigit, &current)) {
+ while (MatchChar(std::isdigit, &current)) {
+ }
+ *pos = current;
+ return true;
+ }
+ return true;
+ }
+
+ TorqueGrammar() : Grammar(&file) { SetWhitespace(MatchWhitespace); }
+
+ // Result: std::string
+ Symbol identifier = {Rule({Pattern(MatchIdentifier)}, YieldMatchedInput)};
+
+ // Result: std::string
+ Symbol stringLiteral = {
+ Rule({Pattern(MatchStringLiteral)}, YieldMatchedInput)};
+
+ // Result: std::string
+ Symbol externalString = {Rule({&stringLiteral}, StringLiteralUnquoteAction)};
+
+ // Result: std::string
+ Symbol decimalLiteral = {
+ Rule({Pattern(MatchDecimalLiteral)}, YieldMatchedInput),
+ Rule({Pattern(MatchHexLiteral)}, YieldMatchedInput)};
+
+ // Result: TypeList
+ Symbol* typeList = List<TypeExpression*>(&type, Token(","));
+
+ // Result: TypeExpression*
+ Symbol simpleType = {
+ Rule({Token("("), &type, Token(")")}),
+ Rule({CheckIf(Token("constexpr")), &identifier}, MakeBasicTypeExpression),
+ Rule({Token("builtin"), Token("("), typeList, Token(")"), Token("=>"),
+ &simpleType},
+ MakeFunctionTypeExpression)};
+
+ // Result: TypeExpression*
+ Symbol type = {Rule({&simpleType}), Rule({&type, Token("|"), &simpleType},
+ MakeUnionTypeExpression)};
+
+ // Result: GenericParameters
+ Symbol genericParameters = {
+ Rule({Token("<"),
+ List<std::string>(
+ Sequence({&identifier, Token(":"), Token("type")}), Token(",")),
+ Token(">")})};
+
+ // Result: TypeList
+ Symbol genericSpecializationTypeList = {
+ Rule({Token("<"), typeList, Token(">")})};
+
+ // Result: base::Optional<TypeList>
+ Symbol* optionalGenericParameters = Optional<TypeList>(&genericParameters);
+
+ // Result: ParameterList
+ Symbol typeListMaybeVarArgs = {
+ Rule({Token("("), List<TypeExpression*>(Sequence({&type, Token(",")})),
+ Token("..."), Token(")")},
+ MakeParameterListFromTypes<true>),
+ Rule({Token("("), typeList, Token(")")},
+ MakeParameterListFromTypes<false>)};
+
+ // Result: LabelAndTypes
+ Symbol labelParameter = {Rule(
+ {&identifier,
+ TryOrDefault<TypeList>(Sequence({Token("("), typeList, Token(")")}))},
+ MakeLabelAndTypes)};
+
+ // Result: TypeExpression*
+ Symbol optionalReturnType = {Rule({Token(":"), &type}),
+ Rule({}, MakeVoidType)};
+
+ // Result: LabelAndTypesVector
+ Symbol* optionalLabelList{TryOrDefault<LabelAndTypesVector>(
+ Sequence({Token("labels"),
+ NonemptyList<LabelAndTypes>(&labelParameter, Token(","))}))};
+
+ // Result: std::vector<std::string>
+ Symbol* optionalOtherwise{TryOrDefault<std::vector<std::string>>(
+ Sequence({Token("otherwise"),
+ NonemptyList<std::string>(&identifier, Token(","))}))};
+
+ // Result: NameAndTypeExpression
+ Symbol nameAndType = {
+ Rule({&identifier, Token(":"), &type}, MakeNameAndType)};
+
+ // Result: ParameterList
+ Symbol parameterListNoVararg = {
+ Rule({Token("("), List<NameAndTypeExpression>(&nameAndType, Token(",")),
+ Token(")")},
+ MakeParameterListFromNameAndTypeList<false>)};
+
+ // Result: ParameterList
+ Symbol parameterListAllowVararg = {
+ Rule({&parameterListNoVararg}),
+ Rule({Token("("),
+ NonemptyList<NameAndTypeExpression>(&nameAndType, Token(",")),
+ Token(","), Token("..."), &identifier, Token(")")},
+ MakeParameterListFromNameAndTypeList<true>)};
+
+ // Result: std::string
+ Symbol* OneOf(std::vector<std::string> alternatives) {
+ Symbol* result = NewSymbol();
+ for (const std::string& s : alternatives) {
+ result->AddRule(Rule({Token(s)}, YieldMatchedInput));
+ }
+ return result;
+ }
+
+ // Result: Expression*
+ Symbol* BinaryOperator(Symbol* nextLevel, Symbol* op) {
+ Symbol* result = NewSymbol();
+ *result = {Rule({nextLevel}),
+ Rule({result, op, nextLevel}, MakeBinaryOperator)};
+ return result;
+ }
+
+ // Result: Expression*
+ Symbol* expression = &assignmentExpression;
+
+ // Result: IncrementDecrementOperator
+ Symbol incrementDecrementOperator = {
+ Rule({Token("++")},
+ YieldIntegralConstant<IncrementDecrementOperator,
+ IncrementDecrementOperator::kIncrement>),
+ Rule({Token("--")},
+ YieldIntegralConstant<IncrementDecrementOperator,
+ IncrementDecrementOperator::kDecrement>)};
+
+ // Result: LocationExpression*
+ Symbol locationExpression = {
+ Rule(
+ {&identifier, TryOrDefault<TypeList>(&genericSpecializationTypeList)},
+ MakeIdentifierExpression),
+ Rule({&primaryExpression, Token("."), &identifier},
+ MakeFieldAccessExpression),
+ Rule({&primaryExpression, Token("["), expression, Token("]")},
+ MakeElementAccessExpression)};
+
+ // Result: std::vector<Expression*>
+ Symbol argumentList = {Rule(
+ {Token("("), List<Expression*>(expression, Token(",")), Token(")")})};
+
+ // Result: Expression*
+ Symbol callExpression = {
+ Rule({&identifier, TryOrDefault<TypeList>(&genericSpecializationTypeList),
+ &argumentList, optionalOtherwise},
+ MakeCall)};
+
+ // Result: Expression*
+ Symbol primaryExpression = {
+ Rule({&callExpression}),
+ Rule({&locationExpression},
+ CastParseResult<LocationExpression*, Expression*>),
+ Rule({&decimalLiteral}, MakeNumberLiteralExpression),
+ Rule({&stringLiteral}, MakeStringLiteralExpression),
+ Rule({&identifier, Token("{"), List<Expression*>(expression, Token(",")),
+ Token("}")},
+ MakeStructExpression),
+ Rule({Token("("), expression, Token(")")})};
+
+ // Result: Expression*
+ Symbol unaryExpression = {
+ Rule({&primaryExpression}),
+ Rule({OneOf({"+", "-", "!", "~"}), &unaryExpression}, MakeUnaryOperator),
+ Rule({&incrementDecrementOperator, &locationExpression},
+ MakeIncrementDecrementExpressionPrefix),
+ Rule({&locationExpression, &incrementDecrementOperator},
+ MakeIncrementDecrementExpressionPostfix)};
+
+ // Result: Expression*
+ Symbol* multiplicativeExpression =
+ BinaryOperator(&unaryExpression, OneOf({"*", "/", "%"}));
+
+ // Result: Expression*
+ Symbol* additiveExpression =
+ BinaryOperator(multiplicativeExpression, OneOf({"+", "-"}));
+
+ // Result: Expression*
+ Symbol* shiftExpression =
+ BinaryOperator(additiveExpression, OneOf({"<<", ">>", ">>>"}));
+
+ // Do not allow expressions like a < b > c because this is never
+ // useful and ambiguous with template parameters.
+ // Result: Expression*
+ Symbol relationalExpression = {
+ Rule({shiftExpression}),
+ Rule({shiftExpression, OneOf({"<", ">", "<=", ">="}), shiftExpression},
+ MakeBinaryOperator)};
+
+ // Result: Expression*
+ Symbol* equalityExpression =
+ BinaryOperator(&relationalExpression, OneOf({"==", "!="}));
+
+ // Result: Expression*
+ Symbol* bitwiseExpression =
+ BinaryOperator(equalityExpression, OneOf({"&", "|"}));
+
+ // Result: Expression*
+ Symbol logicalAndExpression = {
+ Rule({bitwiseExpression}),
+ Rule({&logicalAndExpression, Token("&&"), bitwiseExpression},
+ MakeLogicalAndExpression)};
+
+ // Result: Expression*
+ Symbol logicalOrExpression = {
+ Rule({&logicalAndExpression}),
+ Rule({&logicalOrExpression, Token("||"), &logicalAndExpression},
+ MakeLogicalOrExpression)};
+
+ // Result: Expression*
+ Symbol conditionalExpression = {
+ Rule({&logicalOrExpression}),
+ Rule({&logicalOrExpression, Token("?"), expression, Token(":"),
+ &conditionalExpression},
+ MakeConditionalExpression)};
+
+ // Result: base::Optional<std::string>
+ Symbol assignmentOperator = {
+ Rule({Token("=")}, YieldDefaultValue<base::Optional<std::string>>),
+ Rule({OneOf({"*=", "/=", "%=", "+=", "-=", "<<=", ">>=", ">>>=", "&=",
+ "^=", "|="})},
+ ExtractAssignmentOperator)};
+
+ // Result: Expression*
+ Symbol assignmentExpression = {
+ Rule({&conditionalExpression}),
+ Rule({&locationExpression, &assignmentOperator, &assignmentExpression},
+ MakeAssignmentExpression)};
+
+ // Result: Statement*
+ Symbol block = {Rule({CheckIf(Token("deferred")), Token("{"),
+ List<Statement*>(&statement), Token("}")},
+ MakeBlockStatement)};
+
+ // Result: LabelBlock*
+ Symbol labelBlock = {
+ Rule({Token("label"), &identifier,
+ TryOrDefault<ParameterList>(&parameterListNoVararg), &block},
+ MakeLabelBlock)};
+
+ // Result: ExpressionWithSource
+ Symbol expressionWithSource = {Rule({expression}, MakeExpressionWithSource)};
+
+ // Result: RangeExpression
+ Symbol rangeSpecifier = {
+ Rule({Token("["), Optional<Expression*>(expression), Token(":"),
+ Optional<Expression*>(expression), Token("]")},
+ MakeRangeExpression)};
+
+ // Result: Statement*
+ Symbol varDeclaration = {
+ Rule({OneOf({"let", "const"}), &identifier, Token(":"), &type},
+ MakeVarDeclarationStatement)};
+
+ // Result: Statement*
+ Symbol varDeclarationWithInitialization = {
+ Rule({OneOf({"let", "const"}), &identifier, Token(":"), &type, Token("="),
+ expression},
+ MakeVarDeclarationStatement)};
+
+ // Disallow ambiguous dangling else by only allowing an {atomarStatement} as
+ // a then-clause. Result: Statement*
+ Symbol atomarStatement = {
+ Rule({&block}),
+ Rule({expression, Token(";")}, MakeExpressionStatement),
+ Rule({Token("return"), Optional<Expression*>(expression), Token(";")},
+ MakeReturnStatement),
+ Rule({Token("tail"), &callExpression, Token(";")}, MakeTailCallStatement),
+ Rule({Token("break"), Token(";")}, MakeBreakStatement),
+ Rule({Token("continue"), Token(";")}, MakeContinueStatement),
+ Rule({Token("goto"), &identifier,
+ TryOrDefault<std::vector<Expression*>>(&argumentList), Token(";")},
+ MakeGotoStatement),
+ Rule({OneOf({"debug", "unreachable"}), Token(";")}, MakeDebugStatement)};
+
+ // Result: Statement*
+ Symbol statement = {
+ Rule({&atomarStatement}),
+ Rule({&varDeclaration, Token(";")}),
+ Rule({&varDeclarationWithInitialization, Token(";")}),
+ Rule({Token("if"), CheckIf(Token("constexpr")), Token("("), expression,
+ Token(")"), &atomarStatement,
+ Optional<Statement*>(Sequence({Token("else"), &statement}))},
+ MakeIfStatement),
+ Rule(
+ {
+ Token("typeswitch"), Token("("), expression, Token(")"),
+ Token("{"), NonemptyList<TypeswitchCase>(&typeswitchCase),
+ Token("}"),
+ },
+ MakeTypeswitchStatement),
+ Rule({Token("try"), &block, NonemptyList<LabelBlock*>(&labelBlock)},
+ MakeTryLabelStatement),
+ Rule({OneOf({"assert", "check"}), Token("("), &expressionWithSource,
+ Token(")"), Token(";")},
+ MakeAssertStatement),
+ Rule({Token("while"), Token("("), expression, Token(")"),
+ &atomarStatement},
+ MakeWhileStatement),
+ Rule({Token("for"), Token("("), &varDeclaration, Token("of"), expression,
+ Optional<RangeExpression>(&rangeSpecifier), Token(")"),
+ &atomarStatement},
+ MakeForOfLoopStatement),
+ Rule({Token("for"), Token("("),
+ Optional<Statement*>(&varDeclarationWithInitialization), Token(";"),
+ Optional<Expression*>(expression), Token(";"),
+ Optional<Expression*>(expression), Token(")"), &atomarStatement},
+ MakeForLoopStatement)};
+
+ // Result: TypeswitchCase
+ Symbol typeswitchCase = {
+ Rule({Token("case"), Token("("),
+ Optional<std::string>(Sequence({&identifier, Token(":")})), &type,
+ Token(")"), &block},
+ MakeTypeswitchCase)};
+
+ // Result: base::Optional<Statement*>
+ Symbol optionalBody = {
+ Rule({&block}, CastParseResult<Statement*, base::Optional<Statement*>>),
+ Rule({Token(";")}, YieldDefaultValue<base::Optional<Statement*>>)};
+
+ // Result: Declaration*
+ Symbol declaration = {
+ Rule({Token("const"), &identifier, Token(":"), &type, Token("="),
+ expression, Token(";")},
+ MakeConstDeclaration),
+ Rule({Token("const"), &identifier, Token(":"), &type, Token("generates"),
+ &externalString, Token(";")},
+ MakeExternConstDeclaration),
+ Rule({Token("type"), &identifier,
+ Optional<std::string>(Sequence({Token("extends"), &identifier})),
+ Optional<std::string>(
+ Sequence({Token("generates"), &externalString})),
+ Optional<std::string>(
+ Sequence({Token("constexpr"), &externalString})),
+ Token(";")},
+ MakeTypeDeclaration),
+ Rule({Token("type"), &identifier, Token("="), &type, Token(";")},
+ MakeTypeAliasDeclaration),
+ Rule({Token("extern"),
+ Optional<std::string>(
+ Sequence({Token("operator"), &externalString})),
+ Token("macro"), &identifier,
+ TryOrDefault<GenericParameters>(&genericParameters),
+ &typeListMaybeVarArgs, &optionalReturnType, optionalLabelList,
+ Token(";")},
+ MakeExternalMacro),
+ Rule({Token("extern"), CheckIf(Token("javascript")), Token("builtin"),
+ &identifier, TryOrDefault<GenericParameters>(&genericParameters),
+ &typeListMaybeVarArgs, &optionalReturnType, Token(";")},
+ MakeExternalBuiltin),
+ Rule({Token("extern"), Token("runtime"), &identifier,
+ &typeListMaybeVarArgs, &optionalReturnType, Token(";")},
+ MakeExternalRuntime),
+ Rule({Optional<std::string>(
+ Sequence({Token("operator"), &externalString})),
+ Token("macro"), &identifier,
+ TryOrDefault<GenericParameters>(&genericParameters),
+ &parameterListNoVararg, &optionalReturnType, optionalLabelList,
+ &optionalBody},
+ MakeTorqueMacroDeclaration),
+ Rule({CheckIf(Token("javascript")), Token("builtin"), &identifier,
+ TryOrDefault<GenericParameters>(&genericParameters),
+ &parameterListAllowVararg, &optionalReturnType, &optionalBody},
+ MakeTorqueBuiltinDeclaration),
+ Rule({&identifier, &genericSpecializationTypeList,
+ &parameterListAllowVararg, &optionalReturnType, optionalLabelList,
+ &block},
+ MakeSpecializationDeclaration),
+ Rule({Token("struct"), &identifier, Token("{"),
+ List<NameAndTypeExpression>(Sequence({&nameAndType, Token(";")})),
+ Token("}")},
+ MakeStructDeclaration)};
+
+ // Result: Declaration*
+ Symbol moduleDeclaration = {
+ Rule({Token("module"), &identifier, Token("{"),
+ List<Declaration*>(&declaration), Token("}")},
+ MakeExplicitModuleDeclaration)};
+
+ Symbol file = {Rule({&file, &moduleDeclaration}, AddGlobalDeclaration),
+ Rule({&file, &declaration}, AddGlobalDeclaration), Rule({})};
+};
+
+} // namespace
+
+void ParseTorque(const std::string& input) { TorqueGrammar().Parse(input); }
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/torque-parser.h b/deps/v8/src/torque/torque-parser.h
new file mode 100644
index 0000000000..99ac7cb75c
--- /dev/null
+++ b/deps/v8/src/torque/torque-parser.h
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_TORQUE_PARSER_H_
+#define V8_TORQUE_TORQUE_PARSER_H_
+
+#include "src/torque/ast.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+DECLARE_CONTEXTUAL_VARIABLE(CurrentAst, Ast);
+
+// Adds the parsed input to {CurrentAst}
+void ParseTorque(const std::string& input);
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_TORQUE_PARSER_H_
diff --git a/deps/v8/src/torque/torque.cc b/deps/v8/src/torque/torque.cc
index c771623738..8fcd0be6eb 100644
--- a/deps/v8/src/torque/torque.cc
+++ b/deps/v8/src/torque/torque.cc
@@ -2,17 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <fstream>
#include <iostream>
-#include "./antlr4-runtime.h"
-#include "src/torque/TorqueBaseVisitor.h"
-#include "src/torque/TorqueLexer.h"
-#include "src/torque/ast-generator.h"
#include "src/torque/declarable.h"
#include "src/torque/declaration-visitor.h"
#include "src/torque/global-context.h"
#include "src/torque/implementation-visitor.h"
#include "src/torque/scope.h"
+#include "src/torque/torque-parser.h"
#include "src/torque/type-oracle.h"
#include "src/torque/types.h"
#include "src/torque/utils.h"
@@ -21,46 +19,13 @@ namespace v8 {
namespace internal {
namespace torque {
-size_t Label::next_id_ = 0;
-
-class FailedParseErrorStrategy : public antlr4::DefaultErrorStrategy {
- public:
- FailedParseErrorStrategy() : DefaultErrorStrategy(), failed_(false) {}
- void reportError(antlr4::Parser* recognizer,
- const antlr4::RecognitionException& e) override {
- antlr4::DefaultErrorStrategy::reportError(recognizer, e);
- failed_ = true;
- }
-
- bool FailedParse() const { return failed_; }
-
- public:
- bool failed_;
-};
-
-class TorqueErrorListener : public antlr4::BaseErrorListener {
- public:
- TorqueErrorListener() : BaseErrorListener() {}
-
- void syntaxError(antlr4::Recognizer* recognizer,
- antlr4::Token* /*offendingSymbol*/, size_t line,
- size_t charPositionInLine, const std::string& msg,
- std::exception_ptr /*e*/) {
- std::cerr << recognizer->getInputStream()->getSourceName() << ": " << line
- << ":" << charPositionInLine << " " << msg << "\n";
- }
-};
-
int WrappedMain(int argc, const char** argv) {
std::string output_directory;
- std::vector<SourceFileContext> file_contexts;
- AstGenerator ast_generator;
- SourceFileContext context;
- size_t lexer_errors = 0;
- auto error_strategy = std::make_shared<FailedParseErrorStrategy>();
- TorqueErrorListener error_listener;
bool verbose = false;
- SourceFileMap::Scope scope;
+ SourceFileMap::Scope source_file_map_scope;
+ CurrentSourceFile::Scope unknown_sourcefile_scope(
+ SourceFileMap::AddSource("<unknown>"));
+ CurrentAst::Scope ast_scope;
for (int i = 1; i < argc; ++i) {
// Check for options
if (!strcmp("-o", argv[i])) {
@@ -75,31 +40,16 @@ int WrappedMain(int argc, const char** argv) {
// Otherwise it's a .tq
// file, parse it and
// remember the syntax tree
- context.name = argv[i];
- context.stream = std::unique_ptr<antlr4::ANTLRFileStream>(
- new antlr4::ANTLRFileStream(context.name.c_str()));
- context.lexer =
- std::unique_ptr<TorqueLexer>(new TorqueLexer(context.stream.get()));
- context.lexer->removeErrorListeners();
- context.lexer->addErrorListener(&error_listener);
- context.tokens = std::unique_ptr<antlr4::CommonTokenStream>(
- new antlr4::CommonTokenStream(context.lexer.get()));
- context.tokens->fill();
- lexer_errors += context.lexer->getNumberOfSyntaxErrors();
- context.parser =
- std::unique_ptr<TorqueParser>(new TorqueParser(context.tokens.get()));
- context.parser->setErrorHandler(error_strategy);
- context.parser->removeErrorListeners();
- context.parser->addErrorListener(&error_listener);
- context.file = context.parser->file();
- ast_generator.visitSourceFile(&context);
- }
-
- if (lexer_errors != 0 || error_strategy->FailedParse()) {
- return -1;
+ std::string path = argv[i];
+ SourceId source_id = SourceFileMap::AddSource(path);
+ CurrentSourceFile::Scope source_id_scope(source_id);
+ std::ifstream file_stream(path);
+ std::string file_content = {std::istreambuf_iterator<char>(file_stream),
+ std::istreambuf_iterator<char>()};
+ ParseTorque(file_content);
}
- GlobalContext global_context(std::move(ast_generator).GetAst());
+ GlobalContext global_context(std::move(CurrentAst::Get()));
if (verbose) global_context.SetVerbose();
TypeOracle::Scope type_oracle(global_context.declarations());
diff --git a/deps/v8/src/torque/type-oracle.cc b/deps/v8/src/torque/type-oracle.cc
new file mode 100644
index 0000000000..f1c29cc13c
--- /dev/null
+++ b/deps/v8/src/torque/type-oracle.cc
@@ -0,0 +1,15 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/type-oracle.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+DEFINE_CONTEXTUAL_VARIABLE(TypeOracle)
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index 3ba7846678..261f085edb 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <fstream>
#include <iostream>
#include "src/torque/declarable.h"
@@ -33,6 +32,7 @@ std::string Type::ToString() const {
}
bool Type::IsSubtypeOf(const Type* supertype) const {
+ if (IsNever()) return true;
if (const UnionType* union_type = UnionType::DynamicCast(supertype)) {
return union_type->IsSupertypeOf(this);
}
@@ -154,6 +154,36 @@ const Type* UnionType::NonConstexprVersion() const {
return this;
}
+void UnionType::RecomputeParent() {
+ const Type* parent = nullptr;
+ for (const Type* t : types_) {
+ if (parent == nullptr) {
+ parent = t;
+ } else {
+ parent = CommonSupertype(parent, t);
+ }
+ }
+ set_parent(parent);
+}
+
+void UnionType::Subtract(const Type* t) {
+ for (auto it = types_.begin(); it != types_.end();) {
+ if ((*it)->IsSubtypeOf(t)) {
+ it = types_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+ if (types_.size() == 0) types_.insert(TypeOracle::GetNeverType());
+ RecomputeParent();
+}
+
+const Type* SubtractType(const Type* a, const Type* b) {
+ UnionType result = UnionType::FromType(a);
+ result.Subtract(b);
+ return TypeOracle::GetUnionType(result);
+}
+
std::string StructType::ToExplicitString() const {
std::stringstream result;
result << "{";
@@ -271,6 +301,7 @@ std::string VisitResult::LValue() const {
}
std::string VisitResult::RValue() const {
+ std::string result;
if (declarable()) {
auto value = *declarable();
if (value->IsVariable() && !Variable::cast(value)->IsDefined()) {
@@ -278,10 +309,12 @@ std::string VisitResult::RValue() const {
s << "\"" << value->name() << "\" is used before it is defined";
ReportError(s.str());
}
- return value->RValue();
+ result = value->RValue();
} else {
- return value_;
+ result = value_;
}
+ return "implicit_cast<" + type()->GetGeneratedTypeName() + ">(" + result +
+ ")";
}
} // namespace torque
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index f1b6cd9c7e..24acaea5c7 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -34,6 +34,7 @@ static const char* const CONST_FLOAT64_TYPE_STRING = "constexpr float64";
class Label;
class Value;
+class Module;
class TypeBase {
public:
@@ -247,13 +248,6 @@ class UnionType final : public Type {
return base::nullopt;
}
- const Type* Normalize() const {
- if (types_.size() == 1) {
- return parent();
- }
- return this;
- }
-
bool IsSubtypeOf(const Type* other) const override {
for (const Type* member : types_) {
if (!member->IsSubtypeOf(other)) return false;
@@ -287,6 +281,8 @@ class UnionType final : public Type {
}
}
+ void Subtract(const Type* t);
+
static UnionType FromType(const Type* t) {
const UnionType* union_type = UnionType::DynamicCast(t);
return union_type ? UnionType(*union_type) : UnionType(t);
@@ -294,10 +290,13 @@ class UnionType final : public Type {
private:
explicit UnionType(const Type* t) : Type(Kind::kUnionType, t), types_({t}) {}
+ void RecomputeParent();
std::set<const Type*, TypeLess> types_;
};
+const Type* SubtractType(const Type* a, const Type* b);
+
class StructType final : public Type {
public:
DECLARE_TYPE_BOILERPLATE(StructType);
@@ -310,6 +309,15 @@ class StructType final : public Type {
bool IsConstexpr() const override { return false; }
const std::vector<NameAndType>& fields() const { return fields_; }
+ const Type* GetFieldType(const std::string& fieldname) const {
+ for (const NameAndType& field : fields()) {
+ if (field.name == fieldname) return field.type;
+ }
+ std::stringstream s;
+ s << "\"" << fieldname << "\" is not a field of struct type \"" << name()
+ << "\"";
+ ReportError(s.str());
+ }
const std::string& name() const { return name_; }
Module* module() const { return module_; }
@@ -341,14 +349,13 @@ class VisitResult {
: type_(type), value_(value), declarable_{} {}
VisitResult(const Type* type, const Value* declarable);
const Type* type() const { return type_; }
- // const std::string& variable() const { return variable_; }
base::Optional<const Value*> declarable() const { return declarable_; }
std::string LValue() const;
std::string RValue() const;
void SetType(const Type* new_type) { type_ = new_type; }
private:
- const Type* type_;
+ const Type* type_ = nullptr;
std::string value_;
base::Optional<const Value*> declarable_;
};
diff --git a/deps/v8/src/torque/utils.cc b/deps/v8/src/torque/utils.cc
index 24aeb6fc11..fb3f66ab02 100644
--- a/deps/v8/src/torque/utils.cc
+++ b/deps/v8/src/torque/utils.cc
@@ -14,13 +14,71 @@ namespace v8 {
namespace internal {
namespace torque {
+std::string StringLiteralUnquote(const std::string& s) {
+ DCHECK(('"' == s.front() && '"' == s.back()) ||
+ ('\'' == s.front() && '\'' == s.back()));
+ std::stringstream result;
+ for (size_t i = 1; i < s.length() - 1; ++i) {
+ if (s[i] == '\\') {
+ switch (s[++i]) {
+ case 'n':
+ result << '\n';
+ break;
+ case 'r':
+ result << '\r';
+ break;
+ case 't':
+ result << '\t';
+ break;
+ case '\'':
+ case '"':
+ case '\\':
+ result << s[i];
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ result << s[i];
+ }
+ }
+ return result.str();
+}
+
+std::string StringLiteralQuote(const std::string& s) {
+ std::stringstream result;
+ result << '"';
+ for (size_t i = 0; i < s.length() - 1; ++i) {
+ switch (s[i]) {
+ case '\n':
+ result << "\\n";
+ break;
+ case '\r':
+ result << "\\r";
+ break;
+ case '\t':
+ result << "\\t";
+ break;
+ case '\'':
+ case '"':
+ case '\\':
+ result << "\\" << s[i];
+ break;
+ default:
+ result << s[i];
+ }
+ }
+ result << '"';
+ return result.str();
+}
+
std::string CurrentPositionAsString() {
return PositionAsString(CurrentSourcePosition::Get());
}
[[noreturn]] void ReportError(const std::string& error) {
std::cerr << CurrentPositionAsString() << ": Torque error: " << error << "\n";
- throw(-1);
+ std::abort();
}
std::string CamelifyString(const std::string& underscore_string) {
diff --git a/deps/v8/src/torque/utils.h b/deps/v8/src/torque/utils.h
index 59379fa526..0612048589 100644
--- a/deps/v8/src/torque/utils.h
+++ b/deps/v8/src/torque/utils.h
@@ -17,6 +17,9 @@ namespace torque {
typedef std::vector<std::string> NameVector;
+std::string StringLiteralUnquote(const std::string& s);
+std::string StringLiteralQuote(const std::string& s);
+
[[noreturn]] void ReportError(const std::string& error);
std::string CamelifyString(const std::string& underscore_string);
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index b22e48ef34..2ca28d9321 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -131,8 +131,8 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
int index =
is_special_transition
? array->SearchSpecial(Symbol::cast(*name), &insertion_index)
- : array->Search(isolate_, details.kind(), *name,
- details.attributes(), &insertion_index);
+ : array->Search(details.kind(), *name, details.attributes(),
+ &insertion_index);
// If an existing entry was found, overwrite it and return.
if (index != kNotFound) {
array->SetRawTarget(index, HeapObjectReference::Weak(*target));
@@ -178,8 +178,8 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
int index =
is_special_transition
? array->SearchSpecial(Symbol::cast(*name), &insertion_index)
- : array->Search(isolate_, details.kind(), *name,
- details.attributes(), &insertion_index);
+ : array->Search(details.kind(), *name, details.attributes(),
+ &insertion_index);
if (index == kNotFound) {
++new_nof;
} else {
@@ -220,7 +220,7 @@ Map* TransitionsAccessor::SearchTransition(Name* name, PropertyKind kind,
return map;
}
case kFullTransitionArray: {
- int transition = transitions()->Search(isolate_, kind, name, attributes);
+ int transition = transitions()->Search(kind, name, attributes);
if (transition == kNotFound) return nullptr;
return transitions()->GetTarget(transition);
}
@@ -544,8 +544,8 @@ void TransitionsAccessor::CheckNewTransitionsAreConsistent(
new_target_index = new_transitions->SearchSpecial(Symbol::cast(key));
} else {
PropertyDetails details = GetTargetDetails(key, target);
- new_target_index = new_transitions->Search(isolate_, details.kind(),
- key, details.attributes());
+ new_target_index =
+ new_transitions->Search(details.kind(), key, details.attributes());
}
DCHECK_NE(TransitionArray::kNotFound, new_target_index);
DCHECK_EQ(target, new_transitions->GetTarget(new_target_index));
@@ -556,8 +556,7 @@ void TransitionsAccessor::CheckNewTransitionsAreConsistent(
// Private non-static helper functions (operating on full transition arrays).
-int TransitionArray::SearchDetails(Isolate* isolate, int transition,
- PropertyKind kind,
+int TransitionArray::SearchDetails(int transition, PropertyKind kind,
PropertyAttributes attributes,
int* out_insertion_index) {
int nof_transitions = number_of_transitions();
@@ -581,13 +580,12 @@ int TransitionArray::SearchDetails(Isolate* isolate, int transition,
return kNotFound;
}
-int TransitionArray::Search(Isolate* isolate, PropertyKind kind, Name* name,
+int TransitionArray::Search(PropertyKind kind, Name* name,
PropertyAttributes attributes,
int* out_insertion_index) {
int transition = SearchName(name, out_insertion_index);
if (transition == kNotFound) return kNotFound;
- return SearchDetails(isolate, transition, kind, attributes,
- out_insertion_index);
+ return SearchDetails(transition, kind, attributes, out_insertion_index);
}
void TransitionArray::Sort() {
diff --git a/deps/v8/src/transitions.h b/deps/v8/src/transitions.h
index 99fba563ea..9684815239 100644
--- a/deps/v8/src/transitions.h
+++ b/deps/v8/src/transitions.h
@@ -297,8 +297,8 @@ class TransitionArray : public WeakFixedArray {
}
// Search a transition for a given kind, property name and attributes.
- int Search(Isolate* isolate, PropertyKind kind, Name* name,
- PropertyAttributes attributes, int* out_insertion_index = nullptr);
+ int Search(PropertyKind kind, Name* name, PropertyAttributes attributes,
+ int* out_insertion_index = nullptr);
// Search a non-property transition (like elements kind, observe or frozen
// transitions).
@@ -307,7 +307,7 @@ class TransitionArray : public WeakFixedArray {
}
// Search a first transition for a given property name.
inline int SearchName(Name* name, int* out_insertion_index = nullptr);
- int SearchDetails(Isolate* isolate, int transition, PropertyKind kind,
+ int SearchDetails(int transition, PropertyKind kind,
PropertyAttributes attributes, int* out_insertion_index);
inline int number_of_transitions() const;
diff --git a/deps/v8/src/trap-handler/handler-outside.cc b/deps/v8/src/trap-handler/handler-outside.cc
index 635326dcf1..2d75d2d7e4 100644
--- a/deps/v8/src/trap-handler/handler-outside.cc
+++ b/deps/v8/src/trap-handler/handler-outside.cc
@@ -134,7 +134,6 @@ CodeProtectionInfo* CreateHandlerData(
int RegisterHandlerData(
Address base, size_t size, size_t num_protected_instructions,
const ProtectedInstructionData* protected_instructions) {
- // TODO(eholk): in debug builds, make sure this data isn't already registered.
CodeProtectionInfo* data = CreateHandlerData(
base, size, num_protected_instructions, protected_instructions);
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index bd4ccc1f65..c25de9c1e9 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -77,6 +77,13 @@ inline bool IsTrapHandlerEnabled() {
extern THREAD_LOCAL int g_thread_in_wasm_code;
+// Return the address of the thread-local {g_thread_in_wasm_code} variable. This
+// pointer can be accessed and modified as long as the thread calling this
+// function exists. Only use if from the same thread do avoid race conditions.
+inline int* GetThreadInWasmThreadLocalAddress() {
+ return &g_thread_in_wasm_code;
+}
+
inline bool IsThreadInWasm() { return g_thread_in_wasm_code; }
inline void SetThreadInWasm() {
diff --git a/deps/v8/src/turbo-assembler.cc b/deps/v8/src/turbo-assembler.cc
index 079feabb2e..d6134806fa 100644
--- a/deps/v8/src/turbo-assembler.cc
+++ b/deps/v8/src/turbo-assembler.cc
@@ -7,6 +7,7 @@
#include "src/builtins/builtins.h"
#include "src/builtins/constants-table-builder.h"
#include "src/heap/heap-inl.h"
+#include "src/lsan.h"
#include "src/snapshot/serializer-common.h"
namespace v8 {
@@ -25,7 +26,6 @@ TurboAssemblerBase::TurboAssemblerBase(Isolate* isolate,
void TurboAssemblerBase::IndirectLoadConstant(Register destination,
Handle<HeapObject> object) {
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
CHECK(root_array_available_);
// Before falling back to the (fairly slow) lookup from the constants table,
@@ -47,6 +47,7 @@ void TurboAssemblerBase::IndirectLoadConstant(Register destination,
LoadRootRelative(destination,
RootRegisterOffsetForBuiltinIndex(maybe_builtin_index_));
} else {
+ CHECK(isolate()->ShouldLoadConstantsFromRootList());
// Ensure the given object is in the builtins constants table and fetch its
// index.
BuiltinsConstantsTableBuilder* builder =
@@ -60,7 +61,6 @@ void TurboAssemblerBase::IndirectLoadConstant(Register destination,
void TurboAssemblerBase::IndirectLoadExternalReference(
Register destination, ExternalReference reference) {
- CHECK(isolate()->ShouldLoadConstantsFromRootList());
CHECK(root_array_available_);
if (IsAddressableThroughRootRegister(isolate(), reference)) {
@@ -118,5 +118,17 @@ int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltinIndex(
builtin_index * kPointerSize;
}
+void TurboAssemblerBase::RecordCommentForOffHeapTrampoline(int builtin_index) {
+ if (!FLAG_code_comments) return;
+ size_t len = strlen("-- Inlined Trampoline to --") +
+ strlen(Builtins::name(builtin_index)) + 1;
+ Vector<char> buffer = Vector<char>::New(static_cast<int>(len));
+ char* buffer_start = buffer.start();
+ LSAN_IGNORE_OBJECT(buffer_start);
+ SNPrintF(buffer, "-- Inlined Trampoline to %s --",
+ Builtins::name(builtin_index));
+ RecordComment(buffer_start);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/turbo-assembler.h b/deps/v8/src/turbo-assembler.h
index 67f895b1ca..44fbbca64c 100644
--- a/deps/v8/src/turbo-assembler.h
+++ b/deps/v8/src/turbo-assembler.h
@@ -6,6 +6,7 @@
#define V8_TURBO_ASSEMBLER_H_
#include "src/assembler-arch.h"
+#include "src/base/template-utils.h"
#include "src/heap/heap.h"
namespace v8 {
@@ -13,7 +14,7 @@ namespace internal {
// Common base class for platform-specific TurboAssemblers containing
// platform-independent bits.
-class TurboAssemblerBase : public Assembler {
+class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
public:
Isolate* isolate() const { return isolate_; }
@@ -26,7 +27,9 @@ class TurboAssemblerBase : public Assembler {
void set_root_array_available(bool v) { root_array_available_ = v; }
bool trap_on_abort() const { return trap_on_abort_; }
- void set_trap_on_abort(bool v) { trap_on_abort_ = v; }
+
+ bool should_abort_hard() const { return hard_abort_; }
+ void set_abort_hard(bool v) { hard_abort_ = v; }
void set_builtin_index(int i) { maybe_builtin_index_ = i; }
@@ -67,6 +70,8 @@ class TurboAssemblerBase : public Assembler {
void* buffer, int buffer_size,
CodeObjectRequired create_code_object);
+ void RecordCommentForOffHeapTrampoline(int builtin_index);
+
Isolate* const isolate_ = nullptr;
// This handle will be patched with the code object on installation.
@@ -78,6 +83,9 @@ class TurboAssemblerBase : public Assembler {
// Immediately trap instead of calling {Abort} when debug code fails.
bool trap_on_abort_ = FLAG_trap_on_abort;
+ // Emit a C call to abort instead of a runtime call.
+ bool hard_abort_ = false;
+
// May be set while generating builtins.
int maybe_builtin_index_ = Builtins::kNoBuiltinId;
@@ -89,13 +97,13 @@ class TurboAssemblerBase : public Assembler {
// Avoids emitting calls to the {Builtins::kAbort} builtin when emitting debug
// code during the lifetime of this scope object. For disabling debug code
// entirely use the {DontEmitDebugCodeScope} instead.
-class TrapOnAbortScope BASE_EMBEDDED {
+class HardAbortScope BASE_EMBEDDED {
public:
- explicit TrapOnAbortScope(TurboAssemblerBase* assembler)
- : assembler_(assembler), old_value_(assembler->trap_on_abort()) {
- assembler_->set_trap_on_abort(true);
+ explicit HardAbortScope(TurboAssemblerBase* assembler)
+ : assembler_(assembler), old_value_(assembler->should_abort_hard()) {
+ assembler_->set_abort_hard(true);
}
- ~TrapOnAbortScope() { assembler_->set_trap_on_abort(old_value_); }
+ ~HardAbortScope() { assembler_->set_abort_hard(old_value_); }
private:
TurboAssemblerBase* assembler_;
@@ -108,6 +116,19 @@ class TrapOnAbortScope BASE_EMBEDDED {
// - WebAssembly: Call native {WasmCode} stub via {RelocInfo::WASM_STUB_CALL}.
enum class StubCallMode { kCallOnHeapBuiltin, kCallWasmRuntimeStub };
+#ifdef DEBUG
+template <typename RegType, typename... RegTypes,
+ // All arguments must be either Register or DoubleRegister.
+ typename = typename std::enable_if<
+ base::is_same<Register, RegType, RegTypes...>::value ||
+ base::is_same<DoubleRegister, RegType, RegTypes...>::value>::type>
+inline bool AreAliased(RegType first_reg, RegTypes... regs) {
+ int num_different_regs = NumRegs(RegType::ListOf(first_reg, regs...));
+ int num_given_regs = sizeof...(regs) + 1;
+ return num_different_regs < num_given_regs;
+}
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/unoptimized-compilation-info.cc b/deps/v8/src/unoptimized-compilation-info.cc
index 3e5d4cb9f9..b58fe97a2c 100644
--- a/deps/v8/src/unoptimized-compilation-info.cc
+++ b/deps/v8/src/unoptimized-compilation-info.cc
@@ -4,7 +4,6 @@
#include "src/unoptimized-compilation-info.h"
-#include "src/api.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/debug/debug.h"
@@ -50,10 +49,6 @@ int UnoptimizedCompilationInfo::num_parameters_including_this() const {
return scope()->num_parameters() + 1;
}
-bool UnoptimizedCompilationInfo::has_simple_parameters() {
- return scope()->has_simple_parameters();
-}
-
SourcePositionTableBuilder::RecordingMode
UnoptimizedCompilationInfo::SourcePositionRecordingMode() const {
return is_native() ? SourcePositionTableBuilder::OMIT_SOURCE_POSITIONS
diff --git a/deps/v8/src/unoptimized-compilation-info.h b/deps/v8/src/unoptimized-compilation-info.h
index 6df6d78c2c..53295819bb 100644
--- a/deps/v8/src/unoptimized-compilation-info.h
+++ b/deps/v8/src/unoptimized-compilation-info.h
@@ -55,8 +55,6 @@ class V8_EXPORT_PRIVATE UnoptimizedCompilationInfo final {
DeclarationScope* scope() const;
- bool has_simple_parameters();
-
int num_parameters() const;
int num_parameters_including_this() const;
diff --git a/deps/v8/src/uri.cc b/deps/v8/src/uri.cc
index 72fada759c..54566bb32e 100644
--- a/deps/v8/src/uri.cc
+++ b/deps/v8/src/uri.cc
@@ -7,7 +7,6 @@
#include <vector>
#include "src/char-predicates-inl.h"
-#include "src/handles.h"
#include "src/isolate-inl.h"
#include "src/string-search.h"
#include "src/unicode-inl.h"
diff --git a/deps/v8/src/uri.h b/deps/v8/src/uri.h
index dfa057fd09..cc861e93da 100644
--- a/deps/v8/src/uri.h
+++ b/deps/v8/src/uri.h
@@ -6,6 +6,7 @@
#define V8_URI_H_
#include "src/allocation.h"
+#include "src/maybe-handles.h"
#include "src/objects.h"
namespace v8 {
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index 64aa446ef2..052664f87f 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -6,6 +6,7 @@
#include <stdarg.h>
#include <sys/stat.h>
+#include <vector>
#include "src/base/functional.h"
#include "src/base/logging.h"
@@ -200,82 +201,61 @@ char* ReadLine(const char* prompt) {
return result;
}
+namespace {
-char* ReadCharsFromFile(FILE* file,
- int* size,
- int extra_space,
- bool verbose,
- const char* filename) {
+std::vector<char> ReadCharsFromFile(FILE* file, bool* exists, bool verbose,
+ const char* filename) {
if (file == nullptr || fseek(file, 0, SEEK_END) != 0) {
if (verbose) {
base::OS::PrintError("Cannot read from file %s.\n", filename);
}
- return nullptr;
+ *exists = false;
+ return std::vector<char>();
}
// Get the size of the file and rewind it.
- *size = static_cast<int>(ftell(file));
+ ptrdiff_t size = ftell(file);
rewind(file);
- char* result = NewArray<char>(*size + extra_space);
- for (int i = 0; i < *size && feof(file) == 0;) {
- int read = static_cast<int>(fread(&result[i], 1, *size - i, file));
- if (read != (*size - i) && ferror(file) != 0) {
+ std::vector<char> result(size);
+ for (ptrdiff_t i = 0; i < size && feof(file) == 0;) {
+ ptrdiff_t read = fread(result.data() + i, 1, size - i, file);
+ if (read != (size - i) && ferror(file) != 0) {
fclose(file);
- DeleteArray(result);
- return nullptr;
+ *exists = false;
+ return std::vector<char>();
}
i += read;
}
+ *exists = true;
return result;
}
-
-char* ReadCharsFromFile(const char* filename,
- int* size,
- int extra_space,
- bool verbose) {
+std::vector<char> ReadCharsFromFile(const char* filename, bool* exists,
+ bool verbose) {
FILE* file = base::OS::FOpen(filename, "rb");
- char* result = ReadCharsFromFile(file, size, extra_space, verbose, filename);
+ std::vector<char> result = ReadCharsFromFile(file, exists, verbose, filename);
if (file != nullptr) fclose(file);
return result;
}
-
-byte* ReadBytes(const char* filename, int* size, bool verbose) {
- char* chars = ReadCharsFromFile(filename, size, 0, verbose);
- return reinterpret_cast<byte*>(chars);
-}
-
-
-static Vector<const char> SetVectorContents(char* chars,
- int size,
- bool* exists) {
- if (!chars) {
- *exists = false;
- return Vector<const char>::empty();
+std::string VectorToString(const std::vector<char>& chars) {
+ if (chars.size() == 0) {
+ return std::string();
}
- chars[size] = '\0';
- *exists = true;
- return Vector<const char>(chars, size);
+ return std::string(chars.begin(), chars.end());
}
+} // namespace
-Vector<const char> ReadFile(const char* filename,
- bool* exists,
- bool verbose) {
- int size;
- char* result = ReadCharsFromFile(filename, &size, 1, verbose);
- return SetVectorContents(result, size, exists);
+std::string ReadFile(const char* filename, bool* exists, bool verbose) {
+ std::vector<char> result = ReadCharsFromFile(filename, exists, verbose);
+ return VectorToString(result);
}
-
-Vector<const char> ReadFile(FILE* file,
- bool* exists,
- bool verbose) {
- int size;
- char* result = ReadCharsFromFile(file, &size, 1, verbose, "");
- return SetVectorContents(result, size, exists);
+std::string ReadFile(FILE* file, bool* exists, bool verbose) {
+ std::vector<char> result = ReadCharsFromFile(file, exists, verbose, "");
+ return VectorToString(result);
}
@@ -405,7 +385,6 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
static bool g_memcopy_functions_initialized = false;
-
void init_memcopy_functions(Isolate* isolate) {
if (g_memcopy_functions_initialized) return;
g_memcopy_functions_initialized = true;
@@ -442,6 +421,14 @@ bool DoubleToBoolean(double d) {
return true;
}
+uintptr_t GetCurrentStackPosition() {
+#if V8_CC_MSVC
+ return reinterpret_cast<uintptr_t>(_AddressOfReturnAddress());
+#else
+ return reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
+#endif
+}
+
// The filter is a pattern that matches function names in this way:
// "*" all; the default
// "-" all but the top-level function
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index b922332172..9f4bb08614 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -9,6 +9,7 @@
#include <stdlib.h>
#include <string.h>
#include <cmath>
+#include <string>
#include <type_traits>
#include "include/v8.h"
@@ -1079,12 +1080,6 @@ inline void Flush() {
char* ReadLine(const char* prompt);
-// Read and return the raw bytes in a file. the size of the buffer is returned
-// in size.
-// The returned buffer must be freed by the caller.
-byte* ReadBytes(const char* filename, int* size, bool verbose = true);
-
-
// Append size chars from str to the file given by filename.
// The file is overwritten. Returns the number of chars written.
int AppendChars(const char* filename,
@@ -1228,16 +1223,11 @@ inline void MemsetPointer(T** dest, U* value, int counter) {
#undef STOS
}
-
-// Simple support to read a file into a 0-terminated C-string.
-// The returned buffer must be freed by the caller.
+// Simple support to read a file into std::string.
// On return, *exits tells whether the file existed.
-V8_EXPORT_PRIVATE Vector<const char> ReadFile(const char* filename,
- bool* exists,
- bool verbose = true);
-Vector<const char> ReadFile(FILE* file,
- bool* exists,
- bool verbose = true);
+V8_EXPORT_PRIVATE std::string ReadFile(const char* filename, bool* exists,
+ bool verbose = true);
+std::string ReadFile(FILE* file, bool* exists, bool verbose = true);
template <typename sourcechar, typename sinkchar>
V8_INLINE static void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src,
@@ -1572,92 +1562,11 @@ bool DoubleToBoolean(double d);
template <typename Stream>
bool StringToArrayIndex(Stream* stream, uint32_t* index);
-// Returns current value of top of the stack. Works correctly with ASAN.
-DISABLE_ASAN
-inline uintptr_t GetCurrentStackPosition() {
- // Takes the address of the limit variable in order to find out where
- // the top of stack is right now.
- uintptr_t limit = reinterpret_cast<uintptr_t>(&limit);
- return limit;
-}
-
-template <typename V>
-static inline V ReadUnalignedValue(Address p) {
- ASSERT_TRIVIALLY_COPYABLE(V);
-#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM)
- return *reinterpret_cast<const V*>(p);
-#else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
- V r;
- memmove(&r, reinterpret_cast<void*>(p), sizeof(V));
- return r;
-#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
-}
-
-template <typename V>
-static inline void WriteUnalignedValue(Address p, V value) {
- ASSERT_TRIVIALLY_COPYABLE(V);
-#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM)
- *(reinterpret_cast<V*>(p)) = value;
-#else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
- memmove(reinterpret_cast<void*>(p), &value, sizeof(V));
-#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
-}
-
-static inline double ReadFloatValue(Address p) {
- return ReadUnalignedValue<float>(p);
-}
-
-static inline double ReadDoubleValue(Address p) {
- return ReadUnalignedValue<double>(p);
-}
-
-static inline void WriteDoubleValue(Address p, double value) {
- WriteUnalignedValue(p, value);
-}
-
-static inline uint16_t ReadUnalignedUInt16(Address p) {
- return ReadUnalignedValue<uint16_t>(p);
-}
-
-static inline void WriteUnalignedUInt16(Address p, uint16_t value) {
- WriteUnalignedValue(p, value);
-}
-
-static inline uint32_t ReadUnalignedUInt32(Address p) {
- return ReadUnalignedValue<uint32_t>(p);
-}
-
-static inline void WriteUnalignedUInt32(Address p, uint32_t value) {
- WriteUnalignedValue(p, value);
-}
-
-template <typename V>
-static inline V ReadLittleEndianValue(Address p) {
-#if defined(V8_TARGET_LITTLE_ENDIAN)
- return ReadUnalignedValue<V>(p);
-#elif defined(V8_TARGET_BIG_ENDIAN)
- V ret{};
- const byte* src = reinterpret_cast<const byte*>(p);
- byte* dst = reinterpret_cast<byte*>(&ret);
- for (size_t i = 0; i < sizeof(V); i++) {
- dst[i] = src[sizeof(V) - i - 1];
- }
- return ret;
-#endif // V8_TARGET_LITTLE_ENDIAN
-}
-
-template <typename V>
-static inline void WriteLittleEndianValue(Address p, V value) {
-#if defined(V8_TARGET_LITTLE_ENDIAN)
- WriteUnalignedValue<V>(p, value);
-#elif defined(V8_TARGET_BIG_ENDIAN)
- byte* src = reinterpret_cast<byte*>(&value);
- byte* dst = reinterpret_cast<byte*>(p);
- for (size_t i = 0; i < sizeof(V); i++) {
- dst[i] = src[sizeof(V) - i - 1];
- }
-#endif // V8_TARGET_LITTLE_ENDIAN
-}
+// Returns the current stack top. Works correctly with ASAN and SafeStack.
+// GetCurrentStackPosition() should not be inlined, because it works on stack
+// frames if it were inlined into a function with a huge stack frame it would
+// return an address significantly above the actual current stack position.
+V8_NOINLINE uintptr_t GetCurrentStackPosition();
template <typename V>
static inline V ByteReverse(V value) {
@@ -1807,6 +1716,15 @@ class ThreadedList final {
V8_EXPORT_PRIVATE bool PassesFilter(Vector<const char> name,
Vector<const char> filter);
+// Zap the specified area with a specific byte pattern. This currently defaults
+// to int3 on x64 and ia32. On other architectures this will produce unspecified
+// instruction sequences.
+// TODO(jgruber): Better support for other architectures.
+V8_INLINE void ZapCode(Address addr, size_t size_in_bytes) {
+ static constexpr int kZapByte = 0xCC;
+ std::memset(reinterpret_cast<void*>(addr), kZapByte, size_in_bytes);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index e8f1f0e846..4d152d4d4e 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -5,7 +5,6 @@
#include "src/v8.h"
#include "src/api.h"
-#include "src/assembler.h"
#include "src/base/atomicops.h"
#include "src/base/once.h"
#include "src/base/platform/platform.h"
@@ -19,11 +18,13 @@
#include "src/libsampler/sampler.h"
#include "src/objects-inl.h"
#include "src/profiler/heap-profiler.h"
+#include "src/reloc-info.h"
#include "src/runtime-profiler.h"
#include "src/simulator.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
#include "src/tracing/tracing-category-observer.h"
+#include "src/wasm/wasm-engine.h"
namespace v8 {
namespace internal {
@@ -47,6 +48,7 @@ void V8::TearDown() {
#if defined(USE_SIMULATOR)
Simulator::GlobalTearDown();
#endif
+ wasm::WasmEngine::GlobalTearDown();
CallDescriptors::TearDown();
Bootstrapper::TearDownExtensions();
ElementsAccessor::TearDown();
@@ -84,6 +86,7 @@ void V8::InitializeOncePerProcessImpl() {
ElementsAccessor::InitializeOncePerProcess();
Bootstrapper::InitializeOncePerProcess();
CallDescriptors::InitializeOncePerProcess();
+ wasm::WasmEngine::InitializeOncePerProcess();
}
diff --git a/deps/v8/src/v8memory.h b/deps/v8/src/v8memory.h
index 2c38437b9e..bf62e3b9e6 100644
--- a/deps/v8/src/v8memory.h
+++ b/deps/v8/src/v8memory.h
@@ -5,78 +5,111 @@
#ifndef V8_V8MEMORY_H_
#define V8_V8MEMORY_H_
+#include "src/globals.h"
+
namespace v8 {
namespace internal {
// Memory provides an interface to 'raw' memory. It encapsulates the casts
// that typically are needed when incompatible pointer types are used.
-
-class Memory {
- public:
- static uint8_t& uint8_at(Address addr) {
- return *reinterpret_cast<uint8_t*>(addr);
- }
-
- static uint16_t& uint16_at(Address addr) {
- return *reinterpret_cast<uint16_t*>(addr);
- }
-
- static uint32_t& uint32_at(Address addr) {
- return *reinterpret_cast<uint32_t*>(addr);
- }
-
- static int32_t& int32_at(Address addr) {
- return *reinterpret_cast<int32_t*>(addr);
- }
-
- static uint64_t& uint64_at(Address addr) {
- return *reinterpret_cast<uint64_t*>(addr);
- }
-
- static int64_t& int64_at(Address addr) {
- return *reinterpret_cast<int64_t*>(addr);
- }
-
- static int& int_at(Address addr) {
- return *reinterpret_cast<int*>(addr);
- }
-
- static unsigned& unsigned_at(Address addr) {
- return *reinterpret_cast<unsigned*>(addr);
- }
-
- static intptr_t& intptr_at(Address addr) {
- return *reinterpret_cast<intptr_t*>(addr);
- }
-
- static uintptr_t& uintptr_at(Address addr) {
- return *reinterpret_cast<uintptr_t*>(addr);
- }
-
- static float& float_at(Address addr) {
- return *reinterpret_cast<float*>(addr);
- }
-
- static double& double_at(Address addr) {
- return *reinterpret_cast<double*>(addr);
- }
-
- static Address& Address_at(Address addr) {
- return *reinterpret_cast<Address*>(addr);
- }
-
- static Object*& Object_at(Address addr) {
- return *reinterpret_cast<Object**>(addr);
- }
-
- static Handle<Object>& Object_Handle_at(Address addr) {
- return *reinterpret_cast<Handle<Object>*>(addr);
- }
-
- static bool IsAddressInRange(Address base, Address address, uint32_t size) {
- return base <= address && address < base + size;
- }
-};
+// Note that this class currently relies on undefined behaviour. There is a
+// proposal (http://wg21.link/p0593r2) to make it defined behaviour though.
+template <class T>
+T& Memory(Address addr) {
+ return *reinterpret_cast<T*>(addr);
+}
+template <class T>
+T& Memory(byte* addr) {
+ return Memory<T>(reinterpret_cast<Address>(addr));
+}
+
+template <typename V>
+static inline V ReadUnalignedValue(Address p) {
+ ASSERT_TRIVIALLY_COPYABLE(V);
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM)
+ return *reinterpret_cast<const V*>(p);
+#else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
+ V r;
+ memmove(&r, reinterpret_cast<void*>(p), sizeof(V));
+ return r;
+#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
+}
+
+template <typename V>
+static inline void WriteUnalignedValue(Address p, V value) {
+ ASSERT_TRIVIALLY_COPYABLE(V);
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM)
+ *(reinterpret_cast<V*>(p)) = value;
+#else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
+ memmove(reinterpret_cast<void*>(p), &value, sizeof(V));
+#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
+}
+
+static inline double ReadFloatValue(Address p) {
+ return ReadUnalignedValue<float>(p);
+}
+
+static inline double ReadDoubleValue(Address p) {
+ return ReadUnalignedValue<double>(p);
+}
+
+static inline void WriteDoubleValue(Address p, double value) {
+ WriteUnalignedValue(p, value);
+}
+
+static inline uint16_t ReadUnalignedUInt16(Address p) {
+ return ReadUnalignedValue<uint16_t>(p);
+}
+
+static inline void WriteUnalignedUInt16(Address p, uint16_t value) {
+ WriteUnalignedValue(p, value);
+}
+
+static inline uint32_t ReadUnalignedUInt32(Address p) {
+ return ReadUnalignedValue<uint32_t>(p);
+}
+
+static inline void WriteUnalignedUInt32(Address p, uint32_t value) {
+ WriteUnalignedValue(p, value);
+}
+
+template <typename V>
+static inline V ReadLittleEndianValue(Address p) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ return ReadUnalignedValue<V>(p);
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ V ret{};
+ const byte* src = reinterpret_cast<const byte*>(p);
+ byte* dst = reinterpret_cast<byte*>(&ret);
+ for (size_t i = 0; i < sizeof(V); i++) {
+ dst[i] = src[sizeof(V) - i - 1];
+ }
+ return ret;
+#endif // V8_TARGET_LITTLE_ENDIAN
+}
+
+template <typename V>
+static inline void WriteLittleEndianValue(Address p, V value) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ WriteUnalignedValue<V>(p, value);
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ byte* src = reinterpret_cast<byte*>(&value);
+ byte* dst = reinterpret_cast<byte*>(p);
+ for (size_t i = 0; i < sizeof(V); i++) {
+ dst[i] = src[sizeof(V) - i - 1];
+ }
+#endif // V8_TARGET_LITTLE_ENDIAN
+}
+
+template <typename V>
+static inline V ReadLittleEndianValue(V* p) {
+ return ReadLittleEndianValue<V>(reinterpret_cast<Address>(p));
+}
+
+template <typename V>
+static inline void WriteLittleEndianValue(V* p, V value) {
+ WriteLittleEndianValue<V>(reinterpret_cast<Address>(p), value);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 4c9c9a9aa2..26ab746e8c 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -7,14 +7,16 @@
#include <type_traits>
#include "include/v8-value-serializer-version.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/base/logging.h"
#include "src/conversions.h"
#include "src/flags.h"
#include "src/handles-inl.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
+#include "src/maybe-handles-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
@@ -521,11 +523,13 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
return WriteWasmModule(Handle<WasmModuleObject>::cast(receiver));
}
break;
- case WASM_MEMORY_TYPE:
- if (FLAG_experimental_wasm_threads) {
+ case WASM_MEMORY_TYPE: {
+ auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
+ if (enabled_features.threads) {
return WriteWasmMemory(Handle<WasmMemoryObject>::cast(receiver));
}
break;
+ }
default:
break;
}
@@ -849,9 +853,9 @@ Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView* view) {
ArrayBufferViewTag tag = ArrayBufferViewTag::kInt8Array;
if (view->IsJSTypedArray()) {
switch (JSTypedArray::cast(view)->type()) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- tag = ArrayBufferViewTag::k##Type##Array; \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case kExternal##Type##Array: \
+ tag = ArrayBufferViewTag::k##Type##Array; \
break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -894,14 +898,13 @@ Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
memcpy(destination, wire_bytes.start(), wire_bytes.size());
}
- size_t module_size =
- wasm::GetSerializedNativeModuleSize(isolate_, native_module);
+ wasm::WasmSerializer wasm_serializer(isolate_, native_module);
+ size_t module_size = wasm_serializer.GetSerializedNativeModuleSize();
CHECK_GE(std::numeric_limits<uint32_t>::max(), module_size);
WriteVarint<uint32_t>(static_cast<uint32_t>(module_size));
uint8_t* module_buffer;
if (ReserveRawBytes(module_size).To(&module_buffer)) {
- if (!wasm::SerializeNativeModule(isolate_, native_module,
- {module_buffer, module_size})) {
+ if (!wasm_serializer.SerializeNativeModule({module_buffer, module_size})) {
return Nothing<bool>();
}
}
@@ -1730,10 +1733,10 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
AddObjectWithID(id, data_view);
return data_view;
}
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case ArrayBufferViewTag::k##Type##Array: \
- external_array_type = kExternal##Type##Array; \
- element_size = size; \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case ArrayBufferViewTag::k##Type##Array: \
+ external_array_type = kExternal##Type##Array; \
+ element_size = sizeof(ctype); \
break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -1805,8 +1808,11 @@ MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
wasm::DeserializeNativeModule(isolate_, compiled_bytes, wire_bytes);
if (result.is_null()) {
wasm::ErrorThrower thrower(isolate_, "ValueDeserializer::ReadWasmModule");
+ // TODO(titzer): are the current features appropriate for deserializing?
+ auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
result = isolate_->wasm_engine()->SyncCompile(
- isolate_, &thrower, wasm::ModuleWireBytes(wire_bytes));
+ isolate_, enabled_features, &thrower,
+ wasm::ModuleWireBytes(wire_bytes));
}
uint32_t id = next_id_++;
if (!result.is_null()) {
@@ -1818,7 +1824,8 @@ MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
MaybeHandle<WasmMemoryObject> ValueDeserializer::ReadWasmMemory() {
uint32_t id = next_id_++;
- if (!FLAG_experimental_wasm_threads) {
+ auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
+ if (!enabled_features.threads) {
return MaybeHandle<WasmMemoryObject>();
}
diff --git a/deps/v8/src/value-serializer.h b/deps/v8/src/value-serializer.h
index 91f8ecc7dd..ac683e8c75 100644
--- a/deps/v8/src/value-serializer.h
+++ b/deps/v8/src/value-serializer.h
@@ -12,6 +12,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/macros.h"
#include "src/identity-map.h"
+#include "src/maybe-handles.h"
#include "src/messages.h"
#include "src/vector.h"
#include "src/zone/zone.h"
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index ca55fe5d52..725bed590f 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -59,17 +59,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
BAILOUT("Store");
}
-void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned) {
- BAILOUT("ChangeEndiannessLoad");
-}
-
-void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
- StoreType type,
- LiftoffRegList pinned) {
- BAILOUT("ChangeEndiannessStore");
-}
-
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -249,7 +238,7 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
- UNREACHABLE();
+ // This is a nop on arm.
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index a8928210bb..cdc2dc2a45 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -281,17 +281,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
}
-void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned) {
- // Nop.
-}
-
-void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
- StoreType type,
- LiftoffRegList pinned) {
- // Nop.
-}
-
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index ae8c9e012f..1fef62542a 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -227,9 +227,10 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
// Wasm memory is limited to a size <2GB, so all offsets can be encoded as
// immediate value (in 31 bits, interpreted as signed value).
// If the offset is bigger, we always trap and this code is not reached.
- DCHECK(is_uint31(offset_imm));
+ // Note: We shouldn't have memories larger than 2GiB on 32-bit, but if we
+ // did, we encode {offset_im} as signed, and it will simply wrap around.
Operand src_op = offset_reg == no_reg
- ? Operand(src_addr, offset_imm)
+ ? Operand(src_addr, bit_cast<int32_t>(offset_imm))
: Operand(src_addr, offset_reg, times_1, offset_imm);
if (protected_load_pc) *protected_load_pc = pc_offset();
@@ -278,10 +279,9 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break;
case LoadType::kI64Load: {
// Compute the operand for the load of the upper half.
- DCHECK(is_uint31(offset_imm + 4));
Operand upper_src_op =
offset_reg == no_reg
- ? Operand(src_addr, offset_imm + 4)
+ ? Operand(src_addr, bit_cast<int32_t>(offset_imm + 4))
: Operand(src_addr, offset_reg, times_1, offset_imm + 4);
// The high word has to be mov'ed first, such that this is the protected
// instruction. The mov of the low word cannot segfault.
@@ -308,9 +308,8 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
// Wasm memory is limited to a size <2GB, so all offsets can be encoded as
// immediate value (in 31 bits, interpreted as signed value).
// If the offset is bigger, we always trap and this code is not reached.
- DCHECK(is_uint31(offset_imm));
Operand dst_op = offset_reg == no_reg
- ? Operand(dst_addr, offset_imm)
+ ? Operand(dst_addr, bit_cast<int32_t>(offset_imm))
: Operand(dst_addr, offset_reg, times_1, offset_imm);
if (protected_store_pc) *protected_store_pc = pc_offset();
@@ -342,10 +341,9 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
break;
case StoreType::kI64Store: {
// Compute the operand for the store of the upper half.
- DCHECK(is_uint31(offset_imm + 4));
Operand upper_dst_op =
offset_reg == no_reg
- ? Operand(dst_addr, offset_imm + 4)
+ ? Operand(dst_addr, bit_cast<int32_t>(offset_imm + 4))
: Operand(dst_addr, offset_reg, times_1, offset_imm + 4);
// The high word has to be mov'ed first, such that this is the protected
// instruction. The mov of the low word cannot segfault.
@@ -364,17 +362,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
}
-void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned) {
- // Nop.
-}
-
-void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
- StoreType type,
- LiftoffRegList pinned) {
- // Nop.
-}
-
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -893,7 +880,7 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
- UNREACHABLE();
+ // This is a nop on ia32.
}
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
index b7fdf5fe60..c8d8dab1d9 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -5,18 +5,9 @@
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
+#include "src/assembler-arch.h"
#include "src/reglist.h"
-#if V8_TARGET_ARCH_IA32
-#include "src/ia32/assembler-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "src/x64/assembler-x64.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/assembler-mips.h"
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/assembler-mips64.h"
-#endif
-
namespace v8 {
namespace internal {
namespace wasm {
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 0e913c19dc..1d604925cc 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -349,7 +349,7 @@ constexpr AssemblerOptions DefaultLiftoffOptions() {
LiftoffAssembler::LiftoffAssembler()
: TurboAssembler(nullptr, DefaultLiftoffOptions(), nullptr, 0,
CodeObjectRequired::kNo) {
- set_trap_on_abort(true); // Avoid calls to Abort.
+ set_abort_hard(true); // Avoid calls to Abort.
}
LiftoffAssembler::~LiftoffAssembler() {
@@ -446,7 +446,7 @@ void LiftoffAssembler::SpillAllRegisters() {
cache_state_.reset_used_registers();
}
-void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
+void LiftoffAssembler::PrepareCall(FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register* target,
LiftoffRegister* target_instance) {
@@ -555,7 +555,7 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
}
}
-void LiftoffAssembler::FinishCall(wasm::FunctionSig* sig,
+void LiftoffAssembler::FinishCall(FunctionSig* sig,
compiler::CallDescriptor* call_descriptor) {
const size_t return_count = sig->return_count();
if (return_count != 0) {
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 822c620b82..cfc412d671 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -314,11 +314,11 @@ class LiftoffAssembler : public TurboAssembler {
// Load parameters into the right registers / stack slots for the call.
// Move {*target} into another register if needed and update {*target} to that
// register, or {no_reg} if target was spilled to the stack.
- void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*,
+ void PrepareCall(FunctionSig*, compiler::CallDescriptor*,
Register* target = nullptr,
LiftoffRegister* target_instance = nullptr);
// Process return values of the call.
- void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*);
+ void FinishCall(FunctionSig*, compiler::CallDescriptor*);
// Move {src} into {dst}. {src} and {dst} must be different.
void Move(LiftoffRegister dst, LiftoffRegister src, ValueType);
@@ -362,10 +362,6 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister src, StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc = nullptr,
bool is_store_mem = false);
- inline void ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned);
- inline void ChangeEndiannessStore(LiftoffRegister src, StoreType type,
- LiftoffRegList pinned);
inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
ValueType);
inline void MoveStackValue(uint32_t dst_index, uint32_t src_index, ValueType);
@@ -448,6 +444,14 @@ class LiftoffAssembler : public TurboAssembler {
emit_i32_add(dst, lhs, rhs);
}
}
+ inline void emit_ptrsize_sub(Register dst, Register lhs, Register rhs) {
+ if (kPointerSize == 8) {
+ emit_i64_sub(LiftoffRegister(dst), LiftoffRegister(lhs),
+ LiftoffRegister(rhs));
+ } else {
+ emit_i32_sub(dst, lhs, rhs);
+ }
+ }
// f32 binops.
inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
@@ -532,13 +536,13 @@ class LiftoffAssembler : public TurboAssembler {
// this is the return value of the C function, stored in {rets[0]}. Further
// outputs (specified in {sig->returns()}) are read from the buffer and stored
// in the remaining {rets} registers.
- inline void CallC(wasm::FunctionSig* sig, const LiftoffRegister* args,
+ inline void CallC(FunctionSig* sig, const LiftoffRegister* args,
const LiftoffRegister* rets, ValueType out_argument_type,
int stack_bytes, ExternalReference ext_ref);
inline void CallNativeWasmCode(Address addr);
// Indirect call: If {target == no_reg}, then pop the target from the stack.
- inline void CallIndirect(wasm::FunctionSig* sig,
+ inline void CallIndirect(FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target);
inline void CallRuntimeStub(WasmCode::RuntimeStubId sid);
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 1130cf0cdd..dbd106d481 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -16,6 +16,7 @@
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/memory-tracing.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-opcodes.h"
@@ -62,9 +63,11 @@ constexpr LoadType::LoadTypeValue kPointerLoadType =
// thus store the label on the heap and keep a unique_ptr.
class MovableLabel {
public:
- Label* get() { return label_.get(); }
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(MovableLabel);
MovableLabel() : label_(new Label()) {}
+ Label* get() { return label_.get(); }
+
private:
std::unique_ptr<Label> label_;
};
@@ -72,6 +75,8 @@ class MovableLabel {
// On all other platforms, just store the Label directly.
class MovableLabel {
public:
+ MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(MovableLabel);
+
Label* get() { return &label_; }
private:
@@ -93,8 +98,7 @@ class LiftoffCompiler {
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(LiftoffCompiler);
// TODO(clemensh): Make this a template parameter.
- static constexpr wasm::Decoder::ValidateFlag validate =
- wasm::Decoder::kValidate;
+ static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
using Value = ValueBase;
@@ -111,7 +115,7 @@ class LiftoffCompiler {
MovableLabel label;
};
- using Decoder = WasmFullDecoder<validate, LiftoffCompiler>;
+ using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
struct OutOfLineCode {
MovableLabel label;
@@ -137,11 +141,6 @@ class LiftoffCompiler {
: descriptor_(
GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
env_(env),
- min_size_(uint64_t{env_->module->initial_pages} * wasm::kWasmPageSize),
- max_size_(uint64_t{env_->module->has_maximum_pages
- ? env_->module->maximum_pages
- : wasm::kV8MaxWasmMemoryPages} *
- wasm::kWasmPageSize),
compilation_zone_(compilation_zone),
safepoint_table_builder_(compilation_zone_) {}
@@ -165,20 +164,20 @@ class LiftoffCompiler {
return __ GetTotalFrameSlotCount();
}
- void unsupported(Decoder* decoder, const char* reason) {
+ void unsupported(FullDecoder* decoder, const char* reason) {
ok_ = false;
TRACE("unsupported: %s\n", reason);
decoder->errorf(decoder->pc(), "unsupported liftoff operation: %s", reason);
BindUnboundLabels(decoder);
}
- bool DidAssemblerBailout(Decoder* decoder) {
+ bool DidAssemblerBailout(FullDecoder* decoder) {
if (decoder->failed() || !__ did_bailout()) return false;
unsupported(decoder, __ bailout_reason());
return true;
}
- bool CheckSupportedType(Decoder* decoder,
+ bool CheckSupportedType(FullDecoder* decoder,
Vector<const ValueType> supported_types,
ValueType type, const char* context) {
char buffer[128];
@@ -195,7 +194,7 @@ class LiftoffCompiler {
return safepoint_table_builder_.GetCodeOffset();
}
- void BindUnboundLabels(Decoder* decoder) {
+ void BindUnboundLabels(FullDecoder* decoder) {
#ifdef DEBUG
// Bind all labels now, otherwise their destructor will fire a DCHECK error
// if they where referenced before.
@@ -215,7 +214,7 @@ class LiftoffCompiler {
#endif
}
- void StartFunction(Decoder* decoder) {
+ void StartFunction(FullDecoder* decoder) {
int num_locals = decoder->NumLocals();
__ set_num_locals(num_locals);
for (int i = 0; i < num_locals; ++i) {
@@ -306,7 +305,7 @@ class LiftoffCompiler {
__ bind(ool.continuation.get());
}
- void StartFunctionBody(Decoder* decoder, Control* block) {
+ void StartFunctionBody(FullDecoder* decoder, Control* block) {
for (uint32_t i = 0; i < __ num_locals(); ++i) {
if (!CheckSupportedType(decoder, kTypes_ilfd, __ local_type(i), "param"))
return;
@@ -422,7 +421,7 @@ class LiftoffCompiler {
}
}
- void FinishFunction(Decoder* decoder) {
+ void FinishFunction(FullDecoder* decoder) {
if (DidAssemblerBailout(decoder)) return;
for (OutOfLineCode& ool : out_of_line_code_) {
GenerateOutOfLineCode(ool);
@@ -435,23 +434,23 @@ class LiftoffCompiler {
DidAssemblerBailout(decoder);
}
- void OnFirstError(Decoder* decoder) {
+ void OnFirstError(FullDecoder* decoder) {
ok_ = false;
BindUnboundLabels(decoder);
asm_.AbortCompilation();
}
- void NextInstruction(Decoder* decoder, WasmOpcode opcode) {
+ void NextInstruction(FullDecoder* decoder, WasmOpcode opcode) {
TraceCacheState(decoder);
SLOW_DCHECK(__ ValidateCacheState());
DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode));
}
- void Block(Decoder* decoder, Control* block) {
+ void Block(FullDecoder* decoder, Control* block) {
block->label_state.stack_base = __ cache_state()->stack_height();
}
- void Loop(Decoder* decoder, Control* loop) {
+ void Loop(FullDecoder* decoder, Control* loop) {
loop->label_state.stack_base = __ cache_state()->stack_height();
// Before entering a loop, spill all locals to the stack, in order to free
@@ -471,9 +470,11 @@ class LiftoffCompiler {
StackCheck(decoder->position());
}
- void Try(Decoder* decoder, Control* block) { unsupported(decoder, "try"); }
+ void Try(FullDecoder* decoder, Control* block) {
+ unsupported(decoder, "try");
+ }
- void If(Decoder* decoder, const Value& cond, Control* if_block) {
+ void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
DCHECK_EQ(if_block, decoder->control_at(0));
DCHECK(if_block->is_if());
@@ -493,7 +494,7 @@ class LiftoffCompiler {
if_block->else_state->state.Split(*__ cache_state());
}
- void FallThruTo(Decoder* decoder, Control* c) {
+ void FallThruTo(FullDecoder* decoder, Control* c) {
if (c->end_merge.reached) {
__ MergeFullStackWith(c->label_state);
} else if (c->is_onearmed_if()) {
@@ -506,7 +507,7 @@ class LiftoffCompiler {
TraceCacheState(decoder);
}
- void PopControl(Decoder* decoder, Control* c) {
+ void PopControl(FullDecoder* decoder, Control* c) {
if (!c->is_loop() && c->end_merge.reached) {
__ cache_state()->Steal(c->label_state);
}
@@ -515,7 +516,7 @@ class LiftoffCompiler {
}
}
- void EndControl(Decoder* decoder, Control* c) {}
+ void EndControl(FullDecoder* decoder, Control* c) {}
enum CCallReturn : bool { kHasReturn = true, kNoReturn = false };
@@ -588,7 +589,7 @@ class LiftoffCompiler {
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == dst_rc ? __ GetUnusedRegister(dst_rc, {src})
: __ GetUnusedRegister(dst_rc);
- DCHECK_EQ(can_trap, trap_position > 0);
+ DCHECK_EQ(!!can_trap, trap_position > 0);
Label* trap = can_trap ? AddOutOfLineTrap(
trap_position,
WasmCode::kThrowWasmTrapFloatUnrepresentable)
@@ -614,7 +615,7 @@ class LiftoffCompiler {
__ PushRegister(dst_type, dst);
}
- void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
+ void UnOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& value, Value* result) {
#define CASE_I32_UNOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
@@ -747,7 +748,7 @@ class LiftoffCompiler {
}
}
- void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
+ void BinOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& lhs, const Value& rhs, Value* result) {
#define CASE_I32_BINOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
@@ -994,11 +995,11 @@ class LiftoffCompiler {
#undef CASE_CCALL_BINOP
}
- void I32Const(Decoder* decoder, Value* result, int32_t value) {
+ void I32Const(FullDecoder* decoder, Value* result, int32_t value) {
__ cache_state()->stack_state.emplace_back(kWasmI32, value);
}
- void I64Const(Decoder* decoder, Value* result, int64_t value) {
+ void I64Const(FullDecoder* decoder, Value* result, int64_t value) {
// The {VarState} stores constant values as int32_t, thus we only store
// 64-bit constants in this field if it fits in an int32_t. Larger values
// cannot be used as immediate value anyway, so we can also just put them in
@@ -1013,30 +1014,30 @@ class LiftoffCompiler {
}
}
- void F32Const(Decoder* decoder, Value* result, float value) {
+ void F32Const(FullDecoder* decoder, Value* result, float value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF32, reg);
}
- void F64Const(Decoder* decoder, Value* result, double value) {
+ void F64Const(FullDecoder* decoder, Value* result, double value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF64, reg);
}
- void RefNull(Decoder* decoder, Value* result) {
+ void RefNull(FullDecoder* decoder, Value* result) {
unsupported(decoder, "ref_null");
}
- void Drop(Decoder* decoder, const Value& value) {
+ void Drop(FullDecoder* decoder, const Value& value) {
auto& slot = __ cache_state()->stack_state.back();
// If the dropped slot contains a register, decrement it's use count.
if (slot.is_reg()) __ cache_state()->dec_used(slot.reg());
__ cache_state()->stack_state.pop_back();
}
- void DoReturn(Decoder* decoder, Vector<Value> values, bool implicit) {
+ void DoReturn(FullDecoder* decoder, Vector<Value> values, bool implicit) {
if (implicit) {
DCHECK_EQ(1, decoder->control_depth());
Control* func_block = decoder->control_at(0);
@@ -1060,7 +1061,7 @@ class LiftoffCompiler {
static_cast<uint32_t>(descriptor_->StackParameterCount()));
}
- void GetLocal(Decoder* decoder, Value* result,
+ void GetLocal(FullDecoder* decoder, Value* result,
const LocalIndexImmediate<validate>& imm) {
auto& slot = __ cache_state()->stack_state[imm.index];
DCHECK_EQ(slot.type(), imm.type);
@@ -1123,12 +1124,12 @@ class LiftoffCompiler {
if (!is_tee) __ cache_state()->stack_state.pop_back();
}
- void SetLocal(Decoder* decoder, const Value& value,
+ void SetLocal(FullDecoder* decoder, const Value& value,
const LocalIndexImmediate<validate>& imm) {
SetLocal(imm.index, false);
}
- void TeeLocal(Decoder* decoder, const Value& value, Value* result,
+ void TeeLocal(FullDecoder* decoder, const Value& value, Value* result,
const LocalIndexImmediate<validate>& imm) {
SetLocal(imm.index, true);
}
@@ -1138,7 +1139,6 @@ class LiftoffCompiler {
uint32_t* offset) {
LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg));
if (global->mutability && global->imported) {
- DCHECK(FLAG_experimental_wasm_mut_global);
LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kPointerLoadType);
__ Load(addr, addr.gp(), no_reg, global->index * sizeof(Address),
kPointerLoadType, pinned);
@@ -1150,7 +1150,7 @@ class LiftoffCompiler {
return addr;
}
- void GetGlobal(Decoder* decoder, Value* result,
+ void GetGlobal(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
const auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder, kTypes_ilfd, global->type, "global"))
@@ -1165,7 +1165,7 @@ class LiftoffCompiler {
__ PushRegister(global->type, value);
}
- void SetGlobal(Decoder* decoder, const Value& value,
+ void SetGlobal(FullDecoder* decoder, const Value& value,
const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index];
if (!CheckSupportedType(decoder, kTypes_ilfd, global->type, "global"))
@@ -1178,14 +1178,14 @@ class LiftoffCompiler {
__ Store(addr.gp(), no_reg, offset, reg, type, pinned);
}
- void Unreachable(Decoder* decoder) {
+ void Unreachable(FullDecoder* decoder) {
Label* unreachable_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapUnreachable);
__ emit_jump(unreachable_label);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
- void Select(Decoder* decoder, const Value& cond, const Value& fval,
+ void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
const Value& tval, Value* result) {
LiftoffRegList pinned;
Register condition = pinned.set(__ PopToRegister()).gp();
@@ -1219,11 +1219,9 @@ class LiftoffCompiler {
__ jmp(target->label.get());
}
- void Br(Decoder* decoder, Control* target) {
- Br(target);
- }
+ void Br(FullDecoder* decoder, Control* target) { Br(target); }
- void BrIf(Decoder* decoder, const Value& cond, Control* target) {
+ void BrIf(FullDecoder* decoder, const Value& cond, Control* target) {
Label cont_false;
Register value = __ PopToRegister().gp();
__ emit_cond_jump(kEqual, &cont_false, kWasmI32, value);
@@ -1234,7 +1232,7 @@ class LiftoffCompiler {
// Generate a branch table case, potentially reusing previously generated
// stack transfer code.
- void GenerateBrCase(Decoder* decoder, uint32_t br_depth,
+ void GenerateBrCase(FullDecoder* decoder, uint32_t br_depth,
std::map<uint32_t, MovableLabel>& br_targets) {
MovableLabel& label = br_targets[br_depth];
if (label.get()->is_bound()) {
@@ -1247,7 +1245,7 @@ class LiftoffCompiler {
// Generate a branch table for input in [min, max).
// TODO(wasm): Generate a real branch table (like TF TableSwitch).
- void GenerateBrTable(Decoder* decoder, LiftoffRegister tmp,
+ void GenerateBrTable(FullDecoder* decoder, LiftoffRegister tmp,
LiftoffRegister value, uint32_t min, uint32_t max,
BranchTableIterator<validate>& table_iterator,
std::map<uint32_t, MovableLabel>& br_targets) {
@@ -1273,7 +1271,7 @@ class LiftoffCompiler {
br_targets);
}
- void BrTable(Decoder* decoder, const BranchTableImmediate<validate>& imm,
+ void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm,
const Value& key) {
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
@@ -1298,7 +1296,7 @@ class LiftoffCompiler {
DCHECK(!table_iterator.has_next());
}
- void Else(Decoder* decoder, Control* if_block) {
+ void Else(FullDecoder* decoder, Control* if_block) {
if (if_block->reachable()) __ emit_jump(if_block->label.get());
__ bind(if_block->else_state->label.get());
__ cache_state()->Steal(if_block->else_state->state);
@@ -1318,17 +1316,17 @@ class LiftoffCompiler {
// Returns true if the memory access is statically known to be out of bounds
// (a jump to the trap was generated then); return false otherwise.
- bool BoundsCheckMem(Decoder* decoder, uint32_t access_size, uint32_t offset,
- Register index, LiftoffRegList pinned) {
- const bool statically_oob =
- access_size > max_size_ || offset > max_size_ - access_size;
+ bool BoundsCheckMem(FullDecoder* decoder, uint32_t access_size,
+ uint32_t offset, Register index, LiftoffRegList pinned) {
+ const bool statically_oob = access_size > env_->max_memory_size ||
+ offset > env_->max_memory_size - access_size;
if (!statically_oob &&
(FLAG_wasm_no_bounds_checks || env_->use_trap_handler)) {
return false;
}
- // TODO(eholk): This adds protected instruction information for the jump
+ // TODO(wasm): This adds protected instruction information for the jump
// instruction we are about to generate. It would be better to just not add
// protected instruction info when the pc is 0.
Label* trap_label = AddOutOfLineTrap(
@@ -1347,7 +1345,7 @@ class LiftoffCompiler {
DCHECK(!env_->use_trap_handler);
DCHECK(!FLAG_wasm_no_bounds_checks);
- uint32_t end_offset = offset + access_size - 1;
+ uint64_t end_offset = uint64_t{offset} + access_size - 1u;
// If the end offset is larger than the smallest memory, dynamically check
// the end offset against the actual memory size, which is not known at
@@ -1355,19 +1353,30 @@ class LiftoffCompiler {
LiftoffRegister end_offset_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned);
- LOAD_INSTANCE_FIELD(mem_size, MemorySize, LoadType::kI32Load);
- __ LoadConstant(end_offset_reg, WasmValue(end_offset));
- if (end_offset >= min_size_) {
- __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32,
- end_offset_reg.gp(), mem_size.gp());
+ LOAD_INSTANCE_FIELD(mem_size, MemorySize, kPointerLoadType);
+
+ if (kPointerSize == 8) {
+ __ LoadConstant(end_offset_reg, WasmValue(end_offset));
+ } else {
+ __ LoadConstant(end_offset_reg,
+ WasmValue(static_cast<uint32_t>(end_offset)));
+ }
+
+ if (end_offset >= env_->min_memory_size) {
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label,
+ LiftoffAssembler::kWasmIntPtr, end_offset_reg.gp(),
+ mem_size.gp());
}
// Just reuse the end_offset register for computing the effective size.
LiftoffRegister effective_size_reg = end_offset_reg;
- __ emit_i32_sub(effective_size_reg.gp(), mem_size.gp(),
- end_offset_reg.gp());
+ __ emit_ptrsize_sub(effective_size_reg.gp(), mem_size.gp(),
+ end_offset_reg.gp());
+
+ __ emit_i32_to_intptr(index, index);
- __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32, index,
+ __ emit_cond_jump(kUnsignedGreaterEqual, trap_label,
+ LiftoffAssembler::kWasmIntPtr, index,
effective_size_reg.gp());
return false;
}
@@ -1385,27 +1394,27 @@ class LiftoffCompiler {
__ LoadConstant(address, WasmValue(offset));
__ emit_i32_add(address.gp(), address.gp(), index);
- // Get a register to hold the stack slot for wasm::MemoryTracingInfo.
+ // Get a register to hold the stack slot for MemoryTracingInfo.
LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- // Allocate stack slot for wasm::MemoryTracingInfo.
- __ AllocateStackSlot(info.gp(), sizeof(wasm::MemoryTracingInfo));
+ // Allocate stack slot for MemoryTracingInfo.
+ __ AllocateStackSlot(info.gp(), sizeof(MemoryTracingInfo));
- // Now store all information into the wasm::MemoryTracingInfo struct.
- __ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, address),
- address, StoreType::kI32Store, pinned);
+ // Now store all information into the MemoryTracingInfo struct.
+ __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, address), address,
+ StoreType::kI32Store, pinned);
__ LoadConstant(address, WasmValue(is_store ? 1 : 0));
- __ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, is_store),
- address, StoreType::kI32Store8, pinned);
+ __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, is_store), address,
+ StoreType::kI32Store8, pinned);
__ LoadConstant(address, WasmValue(static_cast<int>(rep)));
- __ Store(info.gp(), no_reg, offsetof(wasm::MemoryTracingInfo, mem_rep),
- address, StoreType::kI32Store8, pinned);
+ __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, mem_rep), address,
+ StoreType::kI32Store8, pinned);
source_position_table_builder_.AddPosition(__ pc_offset(),
SourcePosition(position), false);
Register args[] = {info.gp()};
GenerateRuntimeCall(Runtime::kWasmTraceMemory, arraysize(args), args);
- __ DeallocateStackSlot(sizeof(wasm::MemoryTracingInfo));
+ __ DeallocateStackSlot(sizeof(MemoryTracingInfo));
}
void GenerateRuntimeCall(Runtime::FunctionId runtime_function, int num_args,
@@ -1462,7 +1471,7 @@ class LiftoffCompiler {
return index;
}
- void LoadMem(Decoder* decoder, LoadType type,
+ void LoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, Value* result) {
ValueType value_type = type.value_type();
@@ -1495,7 +1504,7 @@ class LiftoffCompiler {
}
}
- void StoreMem(Decoder* decoder, StoreType type,
+ void StoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, const Value& value_val) {
ValueType value_type = type.value_type();
@@ -1525,7 +1534,7 @@ class LiftoffCompiler {
}
}
- void CurrentMemoryPages(Decoder* decoder, Value* result) {
+ void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
LiftoffRegList pinned;
LiftoffRegister mem_size = pinned.set(__ GetUnusedRegister(kGpReg));
LiftoffRegister tmp_const =
@@ -1533,12 +1542,12 @@ class LiftoffCompiler {
LOAD_INSTANCE_FIELD(mem_size, MemorySize, LoadType::kI32Load);
// TODO(clemensh): Shift by immediate directly.
__ LoadConstant(tmp_const,
- WasmValue(int32_t{WhichPowerOf2(wasm::kWasmPageSize)}));
+ WasmValue(int32_t{WhichPowerOf2(kWasmPageSize)}));
__ emit_i32_shr(mem_size.gp(), mem_size.gp(), tmp_const.gp(), pinned);
__ PushRegister(kWasmI32, mem_size);
}
- void GrowMemory(Decoder* decoder, const Value& value, Value* result_val) {
+ void GrowMemory(FullDecoder* decoder, const Value& value, Value* result_val) {
// Pop the input, then spill all cache registers to make the runtime call.
LiftoffRegList pinned;
LiftoffRegister input = pinned.set(__ PopToRegister());
@@ -1559,7 +1568,7 @@ class LiftoffCompiler {
Register param_reg = descriptor.GetRegisterParameter(0);
if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32);
- __ CallRuntimeStub(wasm::WasmCode::kWasmGrowMemory);
+ __ CallRuntimeStub(WasmCode::kWasmGrowMemory);
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
@@ -1570,7 +1579,8 @@ class LiftoffCompiler {
__ PushRegister(kWasmI32, result);
}
- void CallDirect(Decoder* decoder, const CallFunctionImmediate<validate>& imm,
+ void CallDirect(FullDecoder* decoder,
+ const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) {
if (imm.sig->return_count() > 1)
return unsupported(decoder, "multi-return");
@@ -1634,7 +1644,7 @@ class LiftoffCompiler {
}
}
- void CallIndirect(Decoder* decoder, const Value& index_val,
+ void CallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
if (imm.sig->return_count() > 1) {
@@ -1758,36 +1768,36 @@ class LiftoffCompiler {
__ FinishCall(imm.sig, call_descriptor);
}
- void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
+ void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
unsupported(decoder, "simd");
}
- void SimdLaneOp(Decoder* decoder, WasmOpcode opcode,
+ void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdLaneImmediate<validate>& imm,
const Vector<Value> inputs, Value* result) {
unsupported(decoder, "simd");
}
- void SimdShiftOp(Decoder* decoder, WasmOpcode opcode,
+ void SimdShiftOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdShiftImmediate<validate>& imm, const Value& input,
Value* result) {
unsupported(decoder, "simd");
}
- void Simd8x16ShuffleOp(Decoder* decoder,
+ void Simd8x16ShuffleOp(FullDecoder* decoder,
const Simd8x16ShuffleImmediate<validate>& imm,
const Value& input0, const Value& input1,
Value* result) {
unsupported(decoder, "simd");
}
- void Throw(Decoder* decoder, const ExceptionIndexImmediate<validate>&,
+ void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>&,
Control* block, const Vector<Value>& args) {
unsupported(decoder, "throw");
}
- void CatchException(Decoder* decoder,
+ void CatchException(FullDecoder* decoder,
const ExceptionIndexImmediate<validate>& imm,
Control* block, Vector<Value> caught_values) {
unsupported(decoder, "catch");
}
- void AtomicOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
+ void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
unsupported(decoder, "atomicop");
}
@@ -1796,9 +1806,6 @@ class LiftoffCompiler {
LiftoffAssembler asm_;
compiler::CallDescriptor* const descriptor_;
ModuleEnv* const env_;
- // {min_size_} and {max_size_} are cached values computed from the ModuleEnv.
- const uint64_t min_size_;
- const uint64_t max_size_;
bool ok_ = true;
std::vector<OutOfLineCode> out_of_line_code_;
SourcePositionTableBuilder source_position_table_builder_;
@@ -1812,7 +1819,7 @@ class LiftoffCompiler {
// patch the actually needed stack size in the end.
uint32_t pc_offset_stack_frame_construction_ = 0;
- void TraceCacheState(Decoder* decoder) const {
+ void TraceCacheState(FullDecoder* decoder) const {
#ifdef DEBUG
if (!FLAG_trace_liftoff || !FLAG_trace_wasm_decoder) return;
StdoutStream os;
@@ -1832,7 +1839,7 @@ class LiftoffCompiler {
} // namespace
-bool LiftoffCompilationUnit::ExecuteCompilation() {
+bool LiftoffCompilationUnit::ExecuteCompilation(WasmFeatures* detected) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"ExecuteLiftoffCompilation");
base::ElapsedTimer compile_timer;
@@ -1841,18 +1848,18 @@ bool LiftoffCompilationUnit::ExecuteCompilation() {
}
Zone zone(wasm_unit_->wasm_engine_->allocator(), "LiftoffCompilationZone");
- const wasm::WasmModule* module =
+ const WasmModule* module =
wasm_unit_->env_ ? wasm_unit_->env_->module : nullptr;
auto call_descriptor =
compiler::GetWasmCallDescriptor(&zone, wasm_unit_->func_body_.sig);
base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
base::in_place, wasm_unit_->counters_->liftoff_compile_time());
- wasm::WasmFullDecoder<wasm::Decoder::kValidate, wasm::LiftoffCompiler>
- decoder(&zone, module, wasm_unit_->func_body_, call_descriptor,
- wasm_unit_->env_, &zone);
+ WasmFullDecoder<Decoder::kValidate, LiftoffCompiler> decoder(
+ &zone, module, wasm_unit_->native_module_->enabled_features(), detected,
+ wasm_unit_->func_body_, call_descriptor, wasm_unit_->env_, &zone);
decoder.Decode();
liftoff_compile_time_scope.reset();
- wasm::LiftoffCompiler* compiler = &decoder.interface();
+ LiftoffCompiler* compiler = &decoder.interface();
if (decoder.failed()) return false; // validation error
if (!compiler->ok()) {
// Liftoff compilation failed.
@@ -1883,13 +1890,13 @@ bool LiftoffCompilationUnit::ExecuteCompilation() {
code_ = wasm_unit_->native_module_->AddCode(
wasm_unit_->func_index_, desc, frame_slot_count, safepoint_table_offset,
0, std::move(protected_instructions), std::move(source_positions),
- wasm::WasmCode::kLiftoff);
+ WasmCode::kLiftoff);
wasm_unit_->native_module_->PublishCode(code_);
return true;
}
-wasm::WasmCode* LiftoffCompilationUnit::FinishCompilation(wasm::ErrorThrower*) {
+WasmCode* LiftoffCompilationUnit::FinishCompilation(ErrorThrower*) {
return code_;
}
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.h b/deps/v8/src/wasm/baseline/liftoff-compiler.h
index ce828c459b..c7696cbb56 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.h
@@ -11,6 +11,7 @@ namespace v8 {
namespace internal {
namespace wasm {
+struct WasmFeatures;
class ErrorThrower;
class WasmCode;
class WasmCompilationUnit;
@@ -20,8 +21,8 @@ class LiftoffCompilationUnit final {
explicit LiftoffCompilationUnit(WasmCompilationUnit* wasm_unit)
: wasm_unit_(wasm_unit) {}
- bool ExecuteCompilation();
- wasm::WasmCode* FinishCompilation(wasm::ErrorThrower*);
+ bool ExecuteCompilation(WasmFeatures* detected);
+ WasmCode* FinishCompilation(ErrorThrower*);
private:
WasmCompilationUnit* const wasm_unit_;
diff --git a/deps/v8/src/wasm/baseline/mips/OWNERS b/deps/v8/src/wasm/baseline/mips/OWNERS
index cf2df277c9..c653ce404d 100644
--- a/deps/v8/src/wasm/baseline/mips/OWNERS
+++ b/deps/v8/src/wasm/baseline/mips/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index d2ea65211b..bb18994618 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -98,6 +98,135 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
}
}
+#if defined(V8_TARGET_BIG_ENDIAN)
+inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
+ LoadType type, LiftoffRegList pinned) {
+ bool is_float = false;
+ LiftoffRegister tmp = dst;
+ switch (type.value()) {
+ case LoadType::kI64Load8U:
+ case LoadType::kI64Load8S:
+ // Swap low and high registers.
+ assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
+ assm->TurboAssembler::Move(tmp.low_gp(), tmp.high_gp());
+ assm->TurboAssembler::Move(tmp.high_gp(), kScratchReg);
+ V8_FALLTHROUGH;
+ case LoadType::kI32Load8U:
+ case LoadType::kI32Load8S:
+ // No need to change endianness for byte size.
+ return;
+ case LoadType::kF32Load:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst);
+ V8_FALLTHROUGH;
+ case LoadType::kI32Load:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ break;
+ case LoadType::kI32Load16S:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
+ break;
+ case LoadType::kI32Load16U:
+ assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
+ break;
+ case LoadType::kF64Load:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpRegPair, pinned);
+ assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
+ V8_FALLTHROUGH;
+ case LoadType::kI64Load:
+ assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
+ assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
+ assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
+ break;
+ case LoadType::kI64Load16U:
+ assm->TurboAssembler::ByteSwapUnsigned(tmp.low_gp(), tmp.high_gp(), 2);
+ assm->TurboAssembler::Move(tmp.high_gp(), zero_reg);
+ break;
+ case LoadType::kI64Load16S:
+ assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 2);
+ assm->sra(tmp.high_gp(), tmp.high_gp(), 31);
+ break;
+ case LoadType::kI64Load32U:
+ assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
+ assm->TurboAssembler::Move(tmp.high_gp(), zero_reg);
+ break;
+ case LoadType::kI64Load32S:
+ assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
+ assm->sra(tmp.high_gp(), tmp.high_gp(), 31);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (is_float) {
+ switch (type.value()) {
+ case LoadType::kF32Load:
+ assm->emit_type_conversion(kExprF32ReinterpretI32, dst, tmp);
+ break;
+ case LoadType::kF64Load:
+ assm->emit_type_conversion(kExprF64ReinterpretI64, dst, tmp);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned) {
+ bool is_float = false;
+ LiftoffRegister tmp = src;
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ // Swap low and high registers.
+ assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
+ assm->TurboAssembler::Move(tmp.low_gp(), tmp.high_gp());
+ assm->TurboAssembler::Move(tmp.high_gp(), kScratchReg);
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store8:
+ // No need to change endianness for byte size.
+ return;
+ case StoreType::kF32Store:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store:
+ case StoreType::kI32Store16:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ break;
+ case StoreType::kF64Store:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpRegPair, pinned);
+ assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
+ V8_FALLTHROUGH;
+ case StoreType::kI64Store:
+ case StoreType::kI64Store32:
+ case StoreType::kI64Store16:
+ assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
+ assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
+ assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (is_float) {
+ switch (type.value()) {
+ case StoreType::kF32Store:
+ assm->emit_type_conversion(kExprF32ReinterpretI32, src, tmp);
+ break;
+ case StoreType::kF64Store:
+ assm->emit_type_conversion(kExprF64ReinterpretI64, src, tmp);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+#endif // V8_TARGET_BIG_ENDIAN
+
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
@@ -248,7 +377,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
#if defined(V8_TARGET_BIG_ENDIAN)
if (is_load_mem) {
- ChangeEndiannessLoad(dst, type, pinned);
+ liftoff::ChangeEndiannessLoad(this, dst, type, pinned);
}
#endif
}
@@ -273,7 +402,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
src = tmp;
pinned.set(tmp);
- ChangeEndiannessStore(src, type, pinned);
+ liftoff::ChangeEndiannessStore(this, src, type, pinned);
}
#endif
@@ -316,134 +445,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
}
-void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned) {
- bool is_float = false;
- LiftoffRegister tmp = dst;
- switch (type.value()) {
- case LoadType::kI64Load8U:
- case LoadType::kI64Load8S:
- // Swap low and high registers.
- TurboAssembler::Move(kScratchReg, tmp.low_gp());
- TurboAssembler::Move(tmp.low_gp(), tmp.high_gp());
- TurboAssembler::Move(tmp.high_gp(), kScratchReg);
- V8_FALLTHROUGH;
- case LoadType::kI32Load8U:
- case LoadType::kI32Load8S:
- // No need to change endianness for byte size.
- return;
- case LoadType::kF32Load:
- is_float = true;
- tmp = GetUnusedRegister(kGpReg, pinned);
- emit_type_conversion(kExprI32ReinterpretF32, tmp, dst);
- V8_FALLTHROUGH;
- case LoadType::kI32Load:
- TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
- break;
- case LoadType::kI32Load16S:
- TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
- break;
- case LoadType::kI32Load16U:
- TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
- break;
- case LoadType::kF64Load:
- is_float = true;
- tmp = GetUnusedRegister(kGpRegPair, pinned);
- emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
- V8_FALLTHROUGH;
- case LoadType::kI64Load:
- TurboAssembler::Move(kScratchReg, tmp.low_gp());
- TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
- TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
- break;
- case LoadType::kI64Load16U:
- TurboAssembler::ByteSwapUnsigned(tmp.low_gp(), tmp.high_gp(), 2);
- TurboAssembler::Move(tmp.high_gp(), zero_reg);
- break;
- case LoadType::kI64Load16S:
- TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 2);
- sra(tmp.high_gp(), tmp.high_gp(), 31);
- break;
- case LoadType::kI64Load32U:
- TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
- TurboAssembler::Move(tmp.high_gp(), zero_reg);
- break;
- case LoadType::kI64Load32S:
- TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
- sra(tmp.high_gp(), tmp.high_gp(), 31);
- break;
- default:
- UNREACHABLE();
- }
-
- if (is_float) {
- switch (type.value()) {
- case LoadType::kF32Load:
- emit_type_conversion(kExprF32ReinterpretI32, dst, tmp);
- break;
- case LoadType::kF64Load:
- emit_type_conversion(kExprF64ReinterpretI64, dst, tmp);
- break;
- default:
- UNREACHABLE();
- }
- }
-}
-
-void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
- StoreType type,
- LiftoffRegList pinned) {
- bool is_float = false;
- LiftoffRegister tmp = src;
- switch (type.value()) {
- case StoreType::kI64Store8:
- // Swap low and high registers.
- TurboAssembler::Move(kScratchReg, tmp.low_gp());
- TurboAssembler::Move(tmp.low_gp(), tmp.high_gp());
- TurboAssembler::Move(tmp.high_gp(), kScratchReg);
- V8_FALLTHROUGH;
- case StoreType::kI32Store8:
- // No need to change endianness for byte size.
- return;
- case StoreType::kF32Store:
- is_float = true;
- tmp = GetUnusedRegister(kGpReg, pinned);
- emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
- V8_FALLTHROUGH;
- case StoreType::kI32Store:
- case StoreType::kI32Store16:
- TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
- break;
- case StoreType::kF64Store:
- is_float = true;
- tmp = GetUnusedRegister(kGpRegPair, pinned);
- emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
- V8_FALLTHROUGH;
- case StoreType::kI64Store:
- case StoreType::kI64Store32:
- case StoreType::kI64Store16:
- TurboAssembler::Move(kScratchReg, tmp.low_gp());
- TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
- TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
- break;
- default:
- UNREACHABLE();
- }
-
- if (is_float) {
- switch (type.value()) {
- case StoreType::kF32Store:
- emit_type_conversion(kExprF32ReinterpretI32, src, tmp);
- break;
- case StoreType::kF64Store:
- emit_type_conversion(kExprF64ReinterpretI64, src, tmp);
- break;
- default:
- UNREACHABLE();
- }
- }
-}
-
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -745,7 +746,7 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
- UNREACHABLE();
+ // This is a nop on mips32.
}
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
diff --git a/deps/v8/src/wasm/baseline/mips64/OWNERS b/deps/v8/src/wasm/baseline/mips64/OWNERS
index cf2df277c9..c653ce404d 100644
--- a/deps/v8/src/wasm/baseline/mips64/OWNERS
+++ b/deps/v8/src/wasm/baseline/mips64/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index fdbbe0f7d4..4bbfc18251 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -88,6 +88,115 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
}
}
+#if defined(V8_TARGET_BIG_ENDIAN)
+inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
+ LoadType type, LiftoffRegList pinned) {
+ bool is_float = false;
+ LiftoffRegister tmp = dst;
+ switch (type.value()) {
+ case LoadType::kI64Load8U:
+ case LoadType::kI64Load8S:
+ case LoadType::kI32Load8U:
+ case LoadType::kI32Load8S:
+ // No need to change endianness for byte size.
+ return;
+ case LoadType::kF32Load:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst);
+ V8_FALLTHROUGH;
+ case LoadType::kI64Load32U:
+ assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4);
+ assm->dsrl32(tmp.gp(), tmp.gp(), 0);
+ break;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32S:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ assm->dsra32(tmp.gp(), tmp.gp(), 0);
+ break;
+ case LoadType::kI32Load16S:
+ case LoadType::kI64Load16S:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
+ assm->dsra32(tmp.gp(), tmp.gp(), 0);
+ break;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
+ assm->dsrl32(tmp.gp(), tmp.gp(), 0);
+ break;
+ case LoadType::kF64Load:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
+ V8_FALLTHROUGH;
+ case LoadType::kI64Load:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (is_float) {
+ switch (type.value()) {
+ case LoadType::kF32Load:
+ assm->emit_type_conversion(kExprF32ReinterpretI32, dst, tmp);
+ break;
+ case LoadType::kF64Load:
+ assm->emit_type_conversion(kExprF64ReinterpretI64, dst, tmp);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned) {
+ bool is_float = false;
+ LiftoffRegister tmp = src;
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ // No need to change endianness for byte size.
+ return;
+ case StoreType::kF32Store:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store:
+ case StoreType::kI32Store16:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ break;
+ case StoreType::kF64Store:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
+ V8_FALLTHROUGH;
+ case StoreType::kI64Store:
+ case StoreType::kI64Store32:
+ case StoreType::kI64Store16:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (is_float) {
+ switch (type.value()) {
+ case StoreType::kF32Store:
+ assm->emit_type_conversion(kExprF32ReinterpretI32, src, tmp);
+ break;
+ case StoreType::kF64Store:
+ assm->emit_type_conversion(kExprF64ReinterpretI64, src, tmp);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+#endif // V8_TARGET_BIG_ENDIAN
+
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
@@ -212,7 +321,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
#if defined(V8_TARGET_BIG_ENDIAN)
if (is_load_mem) {
- ChangeEndiannessLoad(dst, type, pinned);
+ liftoff::ChangeEndiannessLoad(this, dst, type, pinned);
}
#endif
}
@@ -237,7 +346,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
src = tmp;
pinned.set(tmp);
- ChangeEndiannessStore(src, type, pinned);
+ liftoff::ChangeEndiannessStore(this, src, type, pinned);
}
#endif
@@ -269,114 +378,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
}
-void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned) {
- bool is_float = false;
- LiftoffRegister tmp = dst;
- switch (type.value()) {
- case LoadType::kI64Load8U:
- case LoadType::kI64Load8S:
- case LoadType::kI32Load8U:
- case LoadType::kI32Load8S:
- // No need to change endianness for byte size.
- return;
- case LoadType::kF32Load:
- is_float = true;
- tmp = GetUnusedRegister(kGpReg, pinned);
- emit_type_conversion(kExprI32ReinterpretF32, tmp, dst);
- V8_FALLTHROUGH;
- case LoadType::kI64Load32U:
- TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4);
- dsrl32(tmp.gp(), tmp.gp(), 0);
- break;
- case LoadType::kI32Load:
- case LoadType::kI64Load32S:
- TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
- dsra32(tmp.gp(), tmp.gp(), 0);
- break;
- case LoadType::kI32Load16S:
- case LoadType::kI64Load16S:
- TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
- dsra32(tmp.gp(), tmp.gp(), 0);
- break;
- case LoadType::kI32Load16U:
- case LoadType::kI64Load16U:
- TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
- dsrl32(tmp.gp(), tmp.gp(), 0);
- break;
- case LoadType::kF64Load:
- is_float = true;
- tmp = GetUnusedRegister(kGpReg, pinned);
- emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
- V8_FALLTHROUGH;
- case LoadType::kI64Load:
- TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
- break;
- default:
- UNREACHABLE();
- }
-
- if (is_float) {
- switch (type.value()) {
- case LoadType::kF32Load:
- emit_type_conversion(kExprF32ReinterpretI32, dst, tmp);
- break;
- case LoadType::kF64Load:
- emit_type_conversion(kExprF64ReinterpretI64, dst, tmp);
- break;
- default:
- UNREACHABLE();
- }
- }
-}
-
-void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
- StoreType type,
- LiftoffRegList pinned) {
- bool is_float = false;
- LiftoffRegister tmp = src;
- switch (type.value()) {
- case StoreType::kI64Store8:
- case StoreType::kI32Store8:
- // No need to change endianness for byte size.
- return;
- case StoreType::kF32Store:
- is_float = true;
- tmp = GetUnusedRegister(kGpReg, pinned);
- emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
- V8_FALLTHROUGH;
- case StoreType::kI32Store:
- case StoreType::kI32Store16:
- TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
- break;
- case StoreType::kF64Store:
- is_float = true;
- tmp = GetUnusedRegister(kGpReg, pinned);
- emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
- V8_FALLTHROUGH;
- case StoreType::kI64Store:
- case StoreType::kI64Store32:
- case StoreType::kI64Store16:
- TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
- break;
- default:
- UNREACHABLE();
- }
-
- if (is_float) {
- switch (type.value()) {
- case StoreType::kF32Store:
- emit_type_conversion(kExprF32ReinterpretI32, src, tmp);
- break;
- case StoreType::kF64Store:
- emit_type_conversion(kExprF64ReinterpretI64, src, tmp);
- break;
- default:
- UNREACHABLE();
- }
- }
-}
-
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index a4bd20622e..9164db2188 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -59,17 +59,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
BAILOUT("Store");
}
-void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned) {
- BAILOUT("ChangeEndiannessLoad");
-}
-
-void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
- StoreType type,
- LiftoffRegList pinned) {
- BAILOUT("ChangeEndiannessStore");
-}
-
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -258,7 +247,11 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
- UNREACHABLE();
+#ifdef V8_TARGET_ARCH_PPC64
+ BAILOUT("emit_i32_to_intptr");
+#else
+// This is a nop on ppc32.
+#endif
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index ee142c7be4..e39dd90166 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -59,17 +59,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
BAILOUT("Store");
}
-void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned) {
- BAILOUT("ChangeEndiannessLoad");
-}
-
-void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
- StoreType type,
- LiftoffRegList pinned) {
- BAILOUT("ChangeEndiannessStore");
-}
-
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -258,7 +247,11 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
- UNREACHABLE();
+#ifdef V8_TARGET_ARCH_S390X
+ BAILOUT("emit_i32_to_intptr");
+#else
+// This is a nop on s390.
+#endif
}
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index b8d08c56aa..f6a8e09b4e 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -23,6 +23,17 @@ namespace wasm {
namespace liftoff {
+static_assert((kLiftoffAssemblerGpCacheRegs &
+ Register::ListOf<kScratchRegister>()) == 0,
+ "scratch register must not be used as cache registers");
+
+constexpr DoubleRegister kScratchDoubleReg2 = xmm14;
+static_assert(kScratchDoubleReg != kScratchDoubleReg2, "collision");
+static_assert(
+ (kLiftoffAssemblerFpCacheRegs &
+ DoubleRegister::ListOf<kScratchDoubleReg, kScratchDoubleReg2>()) == 0,
+ "scratch registers must not be used as cache registers");
+
// rbp-8 holds the stack marker, rbp-16 is the instance parameter, first stack
// slot is located at rbp-24.
constexpr int32_t kConstantStackSpace = 16;
@@ -38,13 +49,18 @@ inline Operand GetStackSlot(uint32_t index) {
inline Operand GetInstanceOperand() { return Operand(rbp, -16); }
inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
- uint32_t offset_imm, LiftoffRegList pinned) {
- // Wasm memory is limited to a size <2GB, so all offsets can be encoded as
- // immediate value (in 31 bits, interpreted as signed value).
- // If the offset is bigger, we always trap and this code is not reached.
- DCHECK(is_uint31(offset_imm));
- if (offset == no_reg) return Operand(addr, offset_imm);
- return Operand(addr, offset, times_1, offset_imm);
+ uint32_t offset_imm) {
+ if (is_uint31(offset_imm)) {
+ if (offset == no_reg) return Operand(addr, offset_imm);
+ return Operand(addr, offset, times_1, offset_imm);
+ }
+ // Offset immediate does not fit in 31 bits.
+ Register scratch = kScratchRegister;
+ assm->movl(scratch, Immediate(offset_imm));
+ if (offset != no_reg) {
+ assm->addq(scratch, offset);
+ }
+ return Operand(addr, scratch, times_1, 0);
}
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
@@ -192,8 +208,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
if (emit_debug_code() && offset_reg != no_reg) {
AssertZeroExtended(offset_reg);
}
- Operand src_op =
- liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm, pinned);
+ Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) {
case LoadType::kI32Load8U:
@@ -244,8 +259,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
if (emit_debug_code() && offset_reg != no_reg) {
AssertZeroExtended(offset_reg);
}
- Operand dst_op =
- liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, pinned);
+ Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
if (protected_store_pc) *protected_store_pc = pc_offset();
switch (type.value()) {
case StoreType::kI32Store8:
@@ -274,17 +288,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
}
-void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
- LiftoffRegList pinned) {
- // Nop.
-}
-
-void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
- StoreType type,
- LiftoffRegList pinned) {
- // Nop.
-}
-
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
@@ -296,9 +299,8 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
DCHECK_NE(dst_index, src_index);
if (cache_state_.has_unused_register(kGpReg)) {
- LiftoffRegister reg = GetUnusedRegister(kGpReg);
- Fill(reg, src_index, type);
- Spill(dst_index, reg, type);
+ Fill(LiftoffRegister{kScratchRegister}, src_index, type);
+ Spill(dst_index, LiftoffRegister{kScratchRegister}, type);
} else {
pushq(liftoff::GetStackSlot(src_index));
popq(liftoff::GetStackSlot(dst_index));
@@ -465,10 +467,8 @@ void EmitIntDivOrRem(LiftoffAssembler* assm, Register dst, Register lhs,
// unconditionally, as the cache state will also be modified unconditionally.
liftoff::SpillRegisters(assm, rdx, rax);
if (rhs == rax || rhs == rdx) {
- LiftoffRegList unavailable = LiftoffRegList::ForRegs(rax, rdx, lhs);
- Register tmp = assm->GetUnusedRegister(kGpReg, unavailable).gp();
- iop(mov, tmp, rhs);
- rhs = tmp;
+ iop(mov, kScratchRegister, rhs);
+ rhs = kScratchRegister;
}
// Check for division by zero.
@@ -1098,10 +1098,8 @@ inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
}
CpuFeatureScope feature(assm, SSE4_1);
- LiftoffRegList pinned = LiftoffRegList::ForRegs(src, dst);
- DoubleRegister rounded =
- pinned.set(assm->GetUnusedRegister(kFpReg, pinned)).fp();
- DoubleRegister converted_back = assm->GetUnusedRegister(kFpReg, pinned).fp();
+ DoubleRegister rounded = kScratchDoubleReg;
+ DoubleRegister converted_back = kScratchDoubleReg2;
if (std::is_same<double, src_type>::value) { // f64
assm->Roundsd(rounded, src, kRoundToZero);
@@ -1380,14 +1378,8 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
}
DCHECK_LE(arg_bytes, stack_bytes);
-// Pass a pointer to the buffer with the arguments to the C function.
-// On win64, the first argument is in {rcx}, otherwise it is {rdi}.
-#ifdef _WIN64
- constexpr Register kFirstArgReg = rcx;
-#else
- constexpr Register kFirstArgReg = rdi;
-#endif
- movp(kFirstArgReg, rsp);
+ // Pass a pointer to the buffer with the arguments to the C function.
+ movp(arg_reg_1, rsp);
constexpr int kNumCCallArgs = 1;
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index cca823b84d..3dd9aff9c6 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -11,7 +11,7 @@
#include "src/base/compiler-specific.h"
#include "src/flags.h"
#include "src/signature.h"
-#include "src/utils.h"
+#include "src/v8memory.h"
#include "src/wasm/wasm-result.h"
#include "src/zone/zone-containers.h"
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 621f905d44..3e0a0da46e 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -8,9 +8,11 @@
// Do only include this header for implementing new Interface of the
// WasmFullDecoder.
+#include "src/base/platform/elapsed-timer.h"
#include "src/bit-vector.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
@@ -37,17 +39,21 @@ struct WasmException;
return true; \
}())
-#define RET_ON_PROTOTYPE_OPCODE(flag) \
+#define RET_ON_PROTOTYPE_OPCODE(feat) \
DCHECK(!this->module_ || this->module_->origin == kWasmOrigin); \
- if (!FLAG_experimental_wasm_##flag) { \
- this->error("Invalid opcode (enable with --experimental-wasm-" #flag ")"); \
+ if (!this->enabled_.feat) { \
+ this->error("Invalid opcode (enable with --experimental-wasm-" #feat ")"); \
+ } else { \
+ this->detected_->feat = true; \
}
-#define CHECK_PROTOTYPE_OPCODE(flag) \
+#define CHECK_PROTOTYPE_OPCODE(feat) \
DCHECK(!this->module_ || this->module_->origin == kWasmOrigin); \
- if (!FLAG_experimental_wasm_##flag) { \
- this->error("Invalid opcode (enable with --experimental-wasm-" #flag ")"); \
+ if (!this->enabled_.feat) { \
+ this->error("Invalid opcode (enable with --experimental-wasm-" #feat ")"); \
break; \
+ } else { \
+ this->detected_->feat = true; \
}
#define OPCODE_ERROR(opcode, message) \
@@ -208,14 +214,16 @@ struct BlockTypeImmediate {
uint32_t sig_index = 0;
FunctionSig* sig = nullptr;
- inline BlockTypeImmediate(Decoder* decoder, const byte* pc) {
+ inline BlockTypeImmediate(const WasmFeatures& enabled, Decoder* decoder,
+ const byte* pc) {
uint8_t val = decoder->read_u8<validate>(pc + 1, "block type");
if (!decode_local_type(val, &type)) {
// Handle multi-value blocks.
- if (!VALIDATE(FLAG_experimental_wasm_mv)) {
+ if (!VALIDATE(enabled.mv)) {
decoder->error(pc + 1, "invalid block type");
return;
}
+ if (!VALIDATE(decoder->ok())) return;
int32_t index =
decoder->read_i32v<validate>(pc + 1, &length, "block arity");
if (!VALIDATE(length > 0 && index >= 0)) {
@@ -660,13 +668,18 @@ struct ControlWithNamedConstructors : public ControlBase<Value> {
template <Decoder::ValidateFlag validate>
class WasmDecoder : public Decoder {
public:
- WasmDecoder(const WasmModule* module, FunctionSig* sig, const byte* start,
+ WasmDecoder(const WasmModule* module, const WasmFeatures& enabled,
+ WasmFeatures* detected, FunctionSig* sig, const byte* start,
const byte* end, uint32_t buffer_offset = 0)
: Decoder(start, end, buffer_offset),
module_(module),
+ enabled_(enabled),
+ detected_(detected),
sig_(sig),
local_types_(nullptr) {}
const WasmModule* module_;
+ const WasmFeatures enabled_;
+ WasmFeatures* detected_;
FunctionSig* sig_;
ZoneVector<ValueType>* local_types_;
@@ -677,7 +690,8 @@ class WasmDecoder : public Decoder {
: static_cast<uint32_t>(local_types_->size());
}
- static bool DecodeLocals(Decoder* decoder, const FunctionSig* sig,
+ static bool DecodeLocals(const WasmFeatures& enabled, Decoder* decoder,
+ const FunctionSig* sig,
ZoneVector<ValueType>* type_list) {
DCHECK_NOT_NULL(type_list);
DCHECK_EQ(0, type_list->size());
@@ -717,14 +731,14 @@ class WasmDecoder : public Decoder {
type = kWasmF64;
break;
case kLocalAnyRef:
- if (FLAG_experimental_wasm_anyref) {
+ if (enabled.anyref) {
type = kWasmAnyRef;
break;
}
decoder->error(decoder->pc() - 1, "invalid local type");
return false;
case kLocalS128:
- if (FLAG_experimental_wasm_simd) {
+ if (enabled.simd) {
type = kWasmS128;
break;
}
@@ -1007,7 +1021,7 @@ class WasmDecoder : public Decoder {
case kExprIf: // fall through
case kExprLoop:
case kExprBlock: {
- BlockTypeImmediate<validate> imm(decoder, pc);
+ BlockTypeImmediate<validate> imm(kAllWasmFeatures, decoder, pc);
return 1 + imm.length;
}
@@ -1213,10 +1227,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
public:
template <typename... InterfaceArgs>
- WasmFullDecoder(Zone* zone, const wasm::WasmModule* module,
+ WasmFullDecoder(Zone* zone, const WasmModule* module,
+ const WasmFeatures& enabled, WasmFeatures* detected,
const FunctionBody& body, InterfaceArgs&&... interface_args)
- : WasmDecoder<validate>(module, body.sig, body.start, body.end,
- body.offset),
+ : WasmDecoder<validate>(module, enabled, detected, body.sig, body.start,
+ body.end, body.offset),
zone_(zone),
interface_(std::forward<InterfaceArgs>(interface_args)...),
local_type_vec_(zone),
@@ -1244,7 +1259,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
DCHECK_EQ(0, this->local_types_->size());
- WasmDecoder<validate>::DecodeLocals(this, this->sig_, this->local_types_);
+ WasmDecoder<validate>::DecodeLocals(this->enabled_, this, this->sig_,
+ this->local_types_);
CALL_INTERFACE(StartFunction);
DecodeFunctionBody();
if (!this->failed()) CALL_INTERFACE(FinishFunction);
@@ -1300,7 +1316,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return local_type_vec_[index];
}
- inline wasm::WasmCodePosition position() {
+ inline WasmCodePosition position() {
int offset = static_cast<int>(this->pc_ - this->start_);
DCHECK_EQ(this->pc_ - this->start_, offset); // overflows cannot happen
return offset;
@@ -1432,7 +1448,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprNop:
break;
case kExprBlock: {
- BlockTypeImmediate<validate> imm(this, this->pc_);
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
if (!this->Validate(imm)) break;
PopArgs(imm.sig);
auto* block = PushBlock();
@@ -1461,7 +1477,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprTry: {
CHECK_PROTOTYPE_OPCODE(eh);
- BlockTypeImmediate<validate> imm(this, this->pc_);
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
if (!this->Validate(imm)) break;
PopArgs(imm.sig);
auto* try_block = PushTry();
@@ -1514,7 +1530,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprLoop: {
- BlockTypeImmediate<validate> imm(this, this->pc_);
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
if (!this->Validate(imm)) break;
PopArgs(imm.sig);
auto* block = PushLoop();
@@ -1525,7 +1541,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprIf: {
- BlockTypeImmediate<validate> imm(this, this->pc_);
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
if (!this->Validate(imm)) break;
auto cond = Pop(0, kWasmI32);
PopArgs(imm.sig);
@@ -2475,14 +2491,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
class EmptyInterface {
public:
- static constexpr wasm::Decoder::ValidateFlag validate =
- wasm::Decoder::kValidate;
+ static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
using Value = ValueBase;
using Control = ControlBase<Value>;
- using Decoder = WasmFullDecoder<validate, EmptyInterface>;
+ using FullDecoder = WasmFullDecoder<validate, EmptyInterface>;
#define DEFINE_EMPTY_CALLBACK(name, ...) \
- void name(Decoder* decoder, ##__VA_ARGS__) {}
+ void name(FullDecoder* decoder, ##__VA_ARGS__) {}
INTERFACE_FUNCTIONS(DEFINE_EMPTY_CALLBACK)
#undef DEFINE_EMPTY_CALLBACK
};
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index 41398eba25..beb8716d9a 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -61,9 +61,8 @@ constexpr uint32_t kNullCatch = static_cast<uint32_t>(-1);
class WasmGraphBuildingInterface {
public:
- static constexpr wasm::Decoder::ValidateFlag validate =
- wasm::Decoder::kValidate;
- using Decoder = WasmFullDecoder<validate, WasmGraphBuildingInterface>;
+ static constexpr Decoder::ValidateFlag validate = Decoder::kValidate;
+ using FullDecoder = WasmFullDecoder<validate, WasmGraphBuildingInterface>;
struct Value : public ValueWithNamedConstructors<Value> {
TFNode* node;
@@ -85,7 +84,7 @@ class WasmGraphBuildingInterface {
explicit WasmGraphBuildingInterface(TFBuilder* builder) : builder_(builder) {}
- void StartFunction(Decoder* decoder) {
+ void StartFunction(FullDecoder* decoder) {
SsaEnv* ssa_env =
reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
uint32_t num_locals = decoder->NumLocals();
@@ -101,8 +100,7 @@ class WasmGraphBuildingInterface {
TFNode* start = builder_->Start(
static_cast<int>(decoder->sig_->parameter_count() + 1 + 1));
// Initialize the instance parameter (index 0).
- builder_->set_instance_node(
- builder_->Param(wasm::kWasmInstanceParameterIndex));
+ builder_->set_instance_node(builder_->Param(kWasmInstanceParameterIndex));
// Initialize local variables. Parameters are shifted by 1 because of the
// the instance parameter.
uint32_t index = 0;
@@ -132,25 +130,25 @@ class WasmGraphBuildingInterface {
builder_->InitInstanceCache(&ssa_env->instance_cache);
}
- void StartFunctionBody(Decoder* decoder, Control* block) {
+ void StartFunctionBody(FullDecoder* decoder, Control* block) {
SsaEnv* break_env = ssa_env_;
SetEnv(Steal(decoder->zone(), break_env));
block->end_env = break_env;
}
- void FinishFunction(Decoder*) { builder_->PatchInStackCheckIfNeeded(); }
+ void FinishFunction(FullDecoder*) { builder_->PatchInStackCheckIfNeeded(); }
- void OnFirstError(Decoder*) {}
+ void OnFirstError(FullDecoder*) {}
- void NextInstruction(Decoder*, WasmOpcode) {}
+ void NextInstruction(FullDecoder*, WasmOpcode) {}
- void Block(Decoder* decoder, Control* block) {
+ void Block(FullDecoder* decoder, Control* block) {
// The break environment is the outer environment.
block->end_env = ssa_env_;
SetEnv(Steal(decoder->zone(), ssa_env_));
}
- void Loop(Decoder* decoder, Control* block) {
+ void Loop(FullDecoder* decoder, Control* block) {
SsaEnv* finish_try_env = Steal(decoder->zone(), ssa_env_);
block->end_env = finish_try_env;
// The continue environment is the inner environment.
@@ -164,7 +162,7 @@ class WasmGraphBuildingInterface {
}
}
- void Try(Decoder* decoder, Control* block) {
+ void Try(FullDecoder* decoder, Control* block) {
SsaEnv* outer_env = ssa_env_;
SsaEnv* catch_env = Split(decoder, outer_env);
// Mark catch environment as unreachable, since only accessable
@@ -179,7 +177,7 @@ class WasmGraphBuildingInterface {
current_catch_ = static_cast<int32_t>(decoder->control_depth() - 1);
}
- void If(Decoder* decoder, const Value& cond, Control* if_block) {
+ void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
TFNode* if_true = nullptr;
TFNode* if_false = nullptr;
if (ssa_env_->go()) BUILD(BranchNoHint, cond.node, &if_true, &if_false);
@@ -193,51 +191,51 @@ class WasmGraphBuildingInterface {
SetEnv(true_env);
}
- void FallThruTo(Decoder* decoder, Control* c) {
+ void FallThruTo(FullDecoder* decoder, Control* c) {
DCHECK(!c->is_loop());
MergeValuesInto(decoder, c, &c->end_merge);
}
- void PopControl(Decoder* decoder, Control* block) {
+ void PopControl(FullDecoder* decoder, Control* block) {
if (!block->is_loop()) SetEnv(block->end_env);
}
- void EndControl(Decoder* decoder, Control* block) { ssa_env_->Kill(); }
+ void EndControl(FullDecoder* decoder, Control* block) { ssa_env_->Kill(); }
- void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig* sig,
+ void UnOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig* sig,
const Value& value, Value* result) {
result->node = BUILD(Unop, opcode, value.node, decoder->position());
}
- void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig* sig,
+ void BinOp(FullDecoder* decoder, WasmOpcode opcode, FunctionSig* sig,
const Value& lhs, const Value& rhs, Value* result) {
auto node = BUILD(Binop, opcode, lhs.node, rhs.node, decoder->position());
if (result) result->node = node;
}
- void I32Const(Decoder* decoder, Value* result, int32_t value) {
+ void I32Const(FullDecoder* decoder, Value* result, int32_t value) {
result->node = builder_->Int32Constant(value);
}
- void I64Const(Decoder* decoder, Value* result, int64_t value) {
+ void I64Const(FullDecoder* decoder, Value* result, int64_t value) {
result->node = builder_->Int64Constant(value);
}
- void F32Const(Decoder* decoder, Value* result, float value) {
+ void F32Const(FullDecoder* decoder, Value* result, float value) {
result->node = builder_->Float32Constant(value);
}
- void F64Const(Decoder* decoder, Value* result, double value) {
+ void F64Const(FullDecoder* decoder, Value* result, double value) {
result->node = builder_->Float64Constant(value);
}
- void RefNull(Decoder* decoder, Value* result) {
+ void RefNull(FullDecoder* decoder, Value* result) {
result->node = builder_->RefNull();
}
- void Drop(Decoder* decoder, const Value& value) {}
+ void Drop(FullDecoder* decoder, const Value& value) {}
- void DoReturn(Decoder* decoder, Vector<Value> values, bool implicit) {
+ void DoReturn(FullDecoder* decoder, Vector<Value> values, bool implicit) {
if (implicit) {
DCHECK_EQ(1, decoder->control_depth());
SetEnv(decoder->control_at(0)->end_env);
@@ -250,40 +248,40 @@ class WasmGraphBuildingInterface {
BUILD(Return, static_cast<unsigned>(values.size()), buffer);
}
- void GetLocal(Decoder* decoder, Value* result,
+ void GetLocal(FullDecoder* decoder, Value* result,
const LocalIndexImmediate<validate>& imm) {
if (!ssa_env_->locals) return; // unreachable
result->node = ssa_env_->locals[imm.index];
}
- void SetLocal(Decoder* decoder, const Value& value,
+ void SetLocal(FullDecoder* decoder, const Value& value,
const LocalIndexImmediate<validate>& imm) {
if (!ssa_env_->locals) return; // unreachable
ssa_env_->locals[imm.index] = value.node;
}
- void TeeLocal(Decoder* decoder, const Value& value, Value* result,
+ void TeeLocal(FullDecoder* decoder, const Value& value, Value* result,
const LocalIndexImmediate<validate>& imm) {
result->node = value.node;
if (!ssa_env_->locals) return; // unreachable
ssa_env_->locals[imm.index] = value.node;
}
- void GetGlobal(Decoder* decoder, Value* result,
+ void GetGlobal(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
result->node = BUILD(GetGlobal, imm.index);
}
- void SetGlobal(Decoder* decoder, const Value& value,
+ void SetGlobal(FullDecoder* decoder, const Value& value,
const GlobalIndexImmediate<validate>& imm) {
BUILD(SetGlobal, imm.index, value.node);
}
- void Unreachable(Decoder* decoder) {
+ void Unreachable(FullDecoder* decoder) {
BUILD(Unreachable, decoder->position());
}
- void Select(Decoder* decoder, const Value& cond, const Value& fval,
+ void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
const Value& tval, Value* result) {
TFNode* controls[2];
BUILD(BranchNoHint, cond.node, &controls[0], &controls[1]);
@@ -294,11 +292,11 @@ class WasmGraphBuildingInterface {
ssa_env_->control = merge;
}
- void Br(Decoder* decoder, Control* target) {
+ void Br(FullDecoder* decoder, Control* target) {
MergeValuesInto(decoder, target, target->br_merge());
}
- void BrIf(Decoder* decoder, const Value& cond, Control* target) {
+ void BrIf(FullDecoder* decoder, const Value& cond, Control* target) {
SsaEnv* fenv = ssa_env_;
SsaEnv* tenv = Split(decoder, fenv);
fenv->SetNotMerged();
@@ -308,7 +306,7 @@ class WasmGraphBuildingInterface {
ssa_env_ = fenv;
}
- void BrTable(Decoder* decoder, const BranchTableImmediate<validate>& imm,
+ void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm,
const Value& key) {
if (imm.table_count == 0) {
// Only a default target. Do the equivalent of br.
@@ -336,11 +334,11 @@ class WasmGraphBuildingInterface {
ssa_env_ = break_env;
}
- void Else(Decoder* decoder, Control* if_block) {
+ void Else(FullDecoder* decoder, Control* if_block) {
SetEnv(if_block->false_env);
}
- void LoadMem(Decoder* decoder, LoadType type,
+ void LoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm, const Value& index,
Value* result) {
result->node =
@@ -348,56 +346,57 @@ class WasmGraphBuildingInterface {
imm.offset, imm.alignment, decoder->position());
}
- void StoreMem(Decoder* decoder, StoreType type,
+ void StoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm, const Value& index,
const Value& value) {
BUILD(StoreMem, type.mem_rep(), index.node, imm.offset, imm.alignment,
value.node, decoder->position(), type.value_type());
}
- void CurrentMemoryPages(Decoder* decoder, Value* result) {
+ void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
result->node = BUILD(CurrentMemoryPages);
}
- void GrowMemory(Decoder* decoder, const Value& value, Value* result) {
+ void GrowMemory(FullDecoder* decoder, const Value& value, Value* result) {
result->node = BUILD(GrowMemory, value.node);
// Always reload the instance cache after growing memory.
LoadContextIntoSsa(ssa_env_);
}
- void CallDirect(Decoder* decoder, const CallFunctionImmediate<validate>& imm,
+ void CallDirect(FullDecoder* decoder,
+ const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) {
DoCall(decoder, nullptr, imm.sig, imm.index, args, returns);
}
- void CallIndirect(Decoder* decoder, const Value& index,
+ void CallIndirect(FullDecoder* decoder, const Value& index,
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
DoCall(decoder, index.node, imm.sig, imm.sig_index, args, returns);
}
- void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
+ void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
TFNode** inputs = GetNodes(args);
TFNode* node = BUILD(SimdOp, opcode, inputs);
if (result) result->node = node;
}
- void SimdLaneOp(Decoder* decoder, WasmOpcode opcode,
+ void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdLaneImmediate<validate> imm, Vector<Value> inputs,
Value* result) {
TFNode** nodes = GetNodes(inputs);
result->node = BUILD(SimdLaneOp, opcode, imm.lane, nodes);
}
- void SimdShiftOp(Decoder* decoder, WasmOpcode opcode,
+ void SimdShiftOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdShiftImmediate<validate> imm, const Value& input,
Value* result) {
TFNode* inputs[] = {input.node};
result->node = BUILD(SimdShiftOp, opcode, imm.shift, inputs);
}
- void Simd8x16ShuffleOp(Decoder* decoder,
+ void Simd8x16ShuffleOp(FullDecoder* decoder,
const Simd8x16ShuffleImmediate<validate>& imm,
const Value& input0, const Value& input1,
Value* result) {
@@ -405,14 +404,14 @@ class WasmGraphBuildingInterface {
result->node = BUILD(Simd8x16ShuffleOp, imm.shuffle, input_nodes);
}
- TFNode* GetExceptionTag(Decoder* decoder,
+ TFNode* GetExceptionTag(FullDecoder* decoder,
const ExceptionIndexImmediate<validate>& imm) {
// TODO(kschimpf): Need to get runtime exception tag values. This
// code only handles non-imported/exported exceptions.
return BUILD(Int32Constant, imm.index);
}
- void Throw(Decoder* decoder, const ExceptionIndexImmediate<validate>& imm,
+ void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>& imm,
Control* block, const Vector<Value>& value_args) {
int count = value_args.length();
ZoneVector<TFNode*> args(count, decoder->zone());
@@ -424,7 +423,7 @@ class WasmGraphBuildingInterface {
EndControl(decoder, block);
}
- void CatchException(Decoder* decoder,
+ void CatchException(FullDecoder* decoder,
const ExceptionIndexImmediate<validate>& imm,
Control* block, Vector<Value> values) {
DCHECK(block->is_try_catch());
@@ -483,7 +482,7 @@ class WasmGraphBuildingInterface {
}
}
- void AtomicOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
+ void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
TFNode** inputs = GetNodes(args);
TFNode* node = BUILD(AtomicOp, opcode, inputs, imm.alignment, imm.offset,
@@ -496,7 +495,7 @@ class WasmGraphBuildingInterface {
TFBuilder* builder_;
uint32_t current_catch_ = kNullCatch;
- TryInfo* current_try_info(Decoder* decoder) {
+ TryInfo* current_try_info(FullDecoder* decoder) {
return decoder->control_at(decoder->control_depth() - 1 - current_catch_)
->try_info;
}
@@ -548,7 +547,7 @@ class WasmGraphBuildingInterface {
builder_->set_instance_cache(&env->instance_cache);
}
- TFNode* CheckForException(Decoder* decoder, TFNode* node) {
+ TFNode* CheckForException(FullDecoder* decoder, TFNode* node) {
if (node == nullptr) return nullptr;
const bool inside_try_scope = current_catch_ != kNullCatch;
@@ -600,7 +599,7 @@ class WasmGraphBuildingInterface {
}
}
- void MergeValuesInto(Decoder* decoder, Control* c, Merge<Value>* merge) {
+ void MergeValuesInto(FullDecoder* decoder, Control* c, Merge<Value>* merge) {
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
if (!ssa_env_->go()) return;
@@ -623,7 +622,7 @@ class WasmGraphBuildingInterface {
}
}
- void Goto(Decoder* decoder, SsaEnv* from, SsaEnv* to) {
+ void Goto(FullDecoder* decoder, SsaEnv* from, SsaEnv* to) {
DCHECK_NOT_NULL(to);
if (!from->go()) return;
switch (to->state) {
@@ -685,7 +684,7 @@ class WasmGraphBuildingInterface {
return from->Kill();
}
- SsaEnv* PrepareForLoop(Decoder* decoder, SsaEnv* env) {
+ SsaEnv* PrepareForLoop(FullDecoder* decoder, SsaEnv* env) {
if (!env->go()) return Split(decoder, env);
env->state = SsaEnv::kMerged;
@@ -732,7 +731,7 @@ class WasmGraphBuildingInterface {
}
// Create a complete copy of {from}.
- SsaEnv* Split(Decoder* decoder, SsaEnv* from) {
+ SsaEnv* Split(FullDecoder* decoder, SsaEnv* from) {
DCHECK_NOT_NULL(from);
SsaEnv* result =
reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
@@ -782,9 +781,8 @@ class WasmGraphBuildingInterface {
return result;
}
- void DoCall(WasmFullDecoder<validate, WasmGraphBuildingInterface>* decoder,
- TFNode* index_node, FunctionSig* sig, uint32_t index,
- const Value args[], Value returns[]) {
+ void DoCall(FullDecoder* decoder, TFNode* index_node, FunctionSig* sig,
+ uint32_t index, const Value args[], Value returns[]) {
int param_count = static_cast<int>(sig->parameter_count());
TFNode** arg_nodes = builder_->Buffer(param_count + 1);
TFNode** return_nodes = nullptr;
@@ -811,10 +809,10 @@ class WasmGraphBuildingInterface {
} // namespace
-bool DecodeLocalDecls(BodyLocalDecls* decls, const byte* start,
- const byte* end) {
+bool DecodeLocalDecls(const WasmFeatures& enabled, BodyLocalDecls* decls,
+ const byte* start, const byte* end) {
Decoder decoder(start, end);
- if (WasmDecoder<Decoder::kValidate>::DecodeLocals(&decoder, nullptr,
+ if (WasmDecoder<Decoder::kValidate>::DecodeLocals(enabled, &decoder, nullptr,
&decls->type_list)) {
DCHECK(decoder.ok());
decls->encoded_size = decoder.pc_offset();
@@ -827,7 +825,7 @@ BytecodeIterator::BytecodeIterator(const byte* start, const byte* end,
BodyLocalDecls* decls)
: Decoder(start, end) {
if (decls != nullptr) {
- if (DecodeLocalDecls(decls, start, end)) {
+ if (DecodeLocalDecls(kAllWasmFeatures, decls, start, end)) {
pc_ += decls->encoded_size;
if (pc_ > end_) pc_ = end_;
}
@@ -835,33 +833,24 @@ BytecodeIterator::BytecodeIterator(const byte* start, const byte* end,
}
DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
- const wasm::WasmModule* module,
+ const WasmFeatures& enabled,
+ const WasmModule* module, WasmFeatures* detected,
FunctionBody& body) {
Zone zone(allocator, ZONE_NAME);
- WasmFullDecoder<Decoder::kValidate, EmptyInterface> decoder(&zone, module,
- body);
+ WasmFullDecoder<Decoder::kValidate, EmptyInterface> decoder(
+ &zone, module, enabled, detected, body);
decoder.Decode();
return decoder.toResult(nullptr);
}
-DecodeResult VerifyWasmCodeWithStats(AccountingAllocator* allocator,
- const wasm::WasmModule* module,
- FunctionBody& body, ModuleOrigin origin,
- Counters* counters) {
- CHECK_LE(0, body.end - body.start);
- auto time_counter = origin == kWasmOrigin
- ? counters->wasm_decode_wasm_function_time()
- : counters->wasm_decode_asm_function_time();
- TimedHistogramScope wasm_decode_function_time_scope(time_counter);
- return VerifyWasmCode(allocator, module, body);
-}
-
-DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
- FunctionBody& body,
+DecodeResult BuildTFGraph(AccountingAllocator* allocator,
+ const WasmFeatures& enabled,
+ const wasm::WasmModule* module, TFBuilder* builder,
+ WasmFeatures* detected, FunctionBody& body,
compiler::NodeOriginTable* node_origins) {
Zone zone(allocator, ZONE_NAME);
WasmFullDecoder<Decoder::kValidate, WasmGraphBuildingInterface> decoder(
- &zone, builder->module(), body, builder);
+ &zone, module, enabled, detected, body, builder);
if (node_origins) {
builder->AddBytecodePositionDecorator(node_origins, &decoder);
}
@@ -880,7 +869,9 @@ unsigned OpcodeLength(const byte* pc, const byte* end) {
std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
FunctionSig* sig, const byte* pc,
const byte* end) {
- WasmDecoder<Decoder::kNoValidate> decoder(module, sig, pc, end);
+ WasmFeatures unused_detected_features;
+ WasmDecoder<Decoder::kNoValidate> decoder(
+ module, kAllWasmFeatures, &unused_detected_features, sig, pc, end);
return decoder.StackEffect(pc);
}
@@ -906,18 +897,19 @@ const char* RawOpcodeName(WasmOpcode opcode) {
} // namespace
bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
- const wasm::WasmModule* module,
- PrintLocals print_locals) {
+ const WasmModule* module, PrintLocals print_locals) {
StdoutStream os;
return PrintRawWasmCode(allocator, body, module, print_locals, os);
}
bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
- const wasm::WasmModule* module, PrintLocals print_locals,
+ const WasmModule* module, PrintLocals print_locals,
std::ostream& os, std::vector<int>* line_numbers) {
Zone zone(allocator, ZONE_NAME);
- WasmDecoder<Decoder::kNoValidate> decoder(module, body.sig, body.start,
- body.end);
+ WasmFeatures unused_detected_features;
+ WasmDecoder<Decoder::kNoValidate> decoder(module, kAllWasmFeatures,
+ &unused_detected_features, body.sig,
+ body.start, body.end);
int line_nr = 0;
constexpr int kNoByteCode = -1;
@@ -1015,7 +1007,8 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
case kExprIf:
case kExprBlock:
case kExprTry: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
+ i.pc());
os << " // @" << i.pc_offset();
if (decoder.Complete(imm)) {
for (unsigned i = 0; i < imm.out_arity(); i++) {
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 7dbb800399..13a3ae2d0c 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -17,7 +17,6 @@ namespace v8 {
namespace internal {
class BitVector; // forward declaration
-class Counters;
namespace compiler { // external declarations from compiler.
class NodeOriginTable;
@@ -26,9 +25,10 @@ class WasmGraphBuilder;
namespace wasm {
-typedef compiler::WasmGraphBuilder TFBuilder;
struct WasmModule; // forward declaration of module interface.
-enum ModuleOrigin : uint8_t;
+struct WasmFeatures;
+
+typedef compiler::WasmGraphBuilder TFBuilder;
// A wrapper around the signature and bytes of a function.
struct FunctionBody {
@@ -43,47 +43,30 @@ struct FunctionBody {
};
V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
- const wasm::WasmModule* module,
+ const WasmFeatures& enabled,
+ const WasmModule* module,
+ WasmFeatures* detected,
FunctionBody& body);
-// Note: If run in the background thread, must follow protocol using
-// isolate::async_counters() to guarantee usability of counters argument.
-DecodeResult VerifyWasmCodeWithStats(AccountingAllocator* allocator,
- const wasm::WasmModule* module,
- FunctionBody& body, ModuleOrigin origin,
- Counters* counters);
-
-DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
+DecodeResult BuildTFGraph(AccountingAllocator* allocator,
+ const WasmFeatures& enabled, const WasmModule* module,
+ TFBuilder* builder, WasmFeatures* detected,
FunctionBody& body,
compiler::NodeOriginTable* node_origins);
enum PrintLocals { kPrintLocals, kOmitLocals };
V8_EXPORT_PRIVATE
bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
- const wasm::WasmModule* module, PrintLocals print_locals);
+ const WasmModule* module, PrintLocals print_locals);
V8_EXPORT_PRIVATE
bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
- const wasm::WasmModule* module, PrintLocals print_locals,
+ const WasmModule* module, PrintLocals print_locals,
std::ostream& out,
std::vector<int>* line_numbers = nullptr);
// A simplified form of AST printing, e.g. from a debugger.
void PrintRawWasmCode(const byte* start, const byte* end);
-inline DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
- const WasmModule* module, FunctionSig* sig,
- const byte* start, const byte* end) {
- FunctionBody body(sig, 0, start, end);
- return VerifyWasmCode(allocator, module, body);
-}
-
-inline DecodeResult BuildTFGraph(AccountingAllocator* allocator,
- TFBuilder* builder, FunctionSig* sig,
- const byte* start, const byte* end) {
- FunctionBody body(sig, 0, start, end);
- return BuildTFGraph(allocator, builder, body, nullptr);
-}
-
struct BodyLocalDecls {
// The size of the encoded declarations.
uint32_t encoded_size = 0; // size of encoded declarations
@@ -93,7 +76,8 @@ struct BodyLocalDecls {
explicit BodyLocalDecls(Zone* zone) : type_list(zone) {}
};
-V8_EXPORT_PRIVATE bool DecodeLocalDecls(BodyLocalDecls* decls,
+V8_EXPORT_PRIVATE bool DecodeLocalDecls(const WasmFeatures& enabled,
+ BodyLocalDecls* decls,
const byte* start, const byte* end);
V8_EXPORT_PRIVATE BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone,
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index 2e47f82ca3..c4209d8c9c 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -15,48 +15,56 @@ namespace internal {
namespace wasm {
namespace {
-const char* GetCompilationModeAsString(
- WasmCompilationUnit::CompilationMode mode) {
+
+const char* GetExecutionTierAsString(ExecutionTier mode) {
switch (mode) {
- case WasmCompilationUnit::CompilationMode::kLiftoff:
+ case ExecutionTier::kBaseline:
return "liftoff";
- case WasmCompilationUnit::CompilationMode::kTurbofan:
+ case ExecutionTier::kOptimized:
return "turbofan";
+ case ExecutionTier::kInterpreter:
+ return "interpreter";
}
UNREACHABLE();
}
+
+void RecordStats(const WasmCode* code, Counters* counters) {
+ counters->wasm_generated_code_size()->Increment(
+ static_cast<int>(code->instructions().size()));
+ counters->wasm_reloc_size()->Increment(
+ static_cast<int>(code->reloc_info().size()));
+}
+
} // namespace
// static
-WasmCompilationUnit::CompilationMode
-WasmCompilationUnit::GetDefaultCompilationMode() {
- return FLAG_liftoff ? CompilationMode::kLiftoff : CompilationMode::kTurbofan;
+ExecutionTier WasmCompilationUnit::GetDefaultExecutionTier() {
+ return FLAG_liftoff ? ExecutionTier::kBaseline : ExecutionTier::kOptimized;
}
-WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate, ModuleEnv* env,
- wasm::NativeModule* native_module,
- wasm::FunctionBody body,
- wasm::WasmName name, int index,
- CompilationMode mode,
- Counters* counters, bool lower_simd)
+WasmCompilationUnit::WasmCompilationUnit(WasmEngine* wasm_engine,
+ ModuleEnv* env,
+ NativeModule* native_module,
+ FunctionBody body, WasmName name,
+ int index, Counters* counters,
+ ExecutionTier mode)
: env_(env),
- wasm_engine_(isolate->wasm_engine()),
+ wasm_engine_(wasm_engine),
func_body_(body),
func_name_(name),
- counters_(counters ? counters : isolate->counters()),
+ counters_(counters),
func_index_(index),
native_module_(native_module),
- lower_simd_(lower_simd),
mode_(mode) {
DCHECK_GE(index, env->module->num_imported_functions);
DCHECK_LT(index, env->module->functions.size());
// Always disable Liftoff for asm.js, for two reasons:
// 1) asm-specific opcodes are not implemented, and
// 2) tier-up does not work with lazy compilation.
- if (env->module->origin == kAsmJsOrigin) mode = CompilationMode::kTurbofan;
+ if (env->module->origin == kAsmJsOrigin) mode = ExecutionTier::kOptimized;
if (V8_UNLIKELY(FLAG_wasm_tier_mask_for_testing) && index < 32 &&
(FLAG_wasm_tier_mask_for_testing & (1 << index))) {
- mode = CompilationMode::kTurbofan;
+ mode = ExecutionTier::kOptimized;
}
SwitchMode(mode);
}
@@ -65,7 +73,7 @@ WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate, ModuleEnv* env,
// {TurbofanWasmCompilationUnit} can be opaque in the header file.
WasmCompilationUnit::~WasmCompilationUnit() {}
-void WasmCompilationUnit::ExecuteCompilation() {
+void WasmCompilationUnit::ExecuteCompilation(WasmFeatures* detected) {
auto size_histogram = SELECT_WASM_COUNTER(counters_, env_->module->origin,
wasm, function_size_bytes);
size_histogram->AddSample(
@@ -76,75 +84,80 @@ void WasmCompilationUnit::ExecuteCompilation() {
if (FLAG_trace_wasm_compiler) {
PrintF("Compiling wasm function %d with %s\n\n", func_index_,
- GetCompilationModeAsString(mode_));
+ GetExecutionTierAsString(mode_));
}
switch (mode_) {
- case WasmCompilationUnit::CompilationMode::kLiftoff:
- if (liftoff_unit_->ExecuteCompilation()) break;
+ case ExecutionTier::kBaseline:
+ if (liftoff_unit_->ExecuteCompilation(detected)) break;
// Otherwise, fall back to turbofan.
- SwitchMode(CompilationMode::kTurbofan);
+ SwitchMode(ExecutionTier::kOptimized);
V8_FALLTHROUGH;
- case WasmCompilationUnit::CompilationMode::kTurbofan:
- turbofan_unit_->ExecuteCompilation();
+ case ExecutionTier::kOptimized:
+ turbofan_unit_->ExecuteCompilation(detected);
break;
+ case ExecutionTier::kInterpreter:
+ UNREACHABLE(); // TODO(titzer): compile interpreter entry stub.
}
}
-wasm::WasmCode* WasmCompilationUnit::FinishCompilation(
- wasm::ErrorThrower* thrower) {
- wasm::WasmCode* ret;
+WasmCode* WasmCompilationUnit::FinishCompilation(ErrorThrower* thrower) {
+ WasmCode* ret;
switch (mode_) {
- case CompilationMode::kLiftoff:
+ case ExecutionTier::kBaseline:
ret = liftoff_unit_->FinishCompilation(thrower);
break;
- case CompilationMode::kTurbofan:
+ case ExecutionTier::kOptimized:
ret = turbofan_unit_->FinishCompilation(thrower);
break;
- default:
- UNREACHABLE();
+ case ExecutionTier::kInterpreter:
+ UNREACHABLE(); // TODO(titzer): finish interpreter entry stub.
}
if (ret == nullptr) {
thrower->RuntimeError("Error finalizing code.");
+ } else {
+ RecordStats(ret, counters_);
}
return ret;
}
-void WasmCompilationUnit::SwitchMode(CompilationMode new_mode) {
+void WasmCompilationUnit::SwitchMode(ExecutionTier new_mode) {
// This method is being called in the constructor, where neither
// {liftoff_unit_} nor {turbofan_unit_} are set, or to switch mode from
// kLiftoff to kTurbofan, in which case {liftoff_unit_} is already set.
mode_ = new_mode;
switch (new_mode) {
- case CompilationMode::kLiftoff:
+ case ExecutionTier::kBaseline:
DCHECK(!turbofan_unit_);
DCHECK(!liftoff_unit_);
liftoff_unit_.reset(new LiftoffCompilationUnit(this));
return;
- case CompilationMode::kTurbofan:
+ case ExecutionTier::kOptimized:
DCHECK(!turbofan_unit_);
liftoff_unit_.reset();
turbofan_unit_.reset(new compiler::TurbofanWasmCompilationUnit(this));
return;
+ case ExecutionTier::kInterpreter:
+ UNREACHABLE(); // TODO(titzer): allow compiling interpreter entry stub.
}
UNREACHABLE();
}
// static
-wasm::WasmCode* WasmCompilationUnit::CompileWasmFunction(
- wasm::NativeModule* native_module, wasm::ErrorThrower* thrower,
- Isolate* isolate, ModuleEnv* env, const wasm::WasmFunction* function,
- CompilationMode mode) {
+WasmCode* WasmCompilationUnit::CompileWasmFunction(
+ Isolate* isolate, NativeModule* native_module, WasmFeatures* detected,
+ ErrorThrower* thrower, ModuleEnv* env, const WasmFunction* function,
+ ExecutionTier mode) {
ModuleWireBytes wire_bytes(native_module->wire_bytes());
- wasm::FunctionBody function_body{
- function->sig, function->code.offset(),
- wire_bytes.start() + function->code.offset(),
- wire_bytes.start() + function->code.end_offset()};
+ FunctionBody function_body{function->sig, function->code.offset(),
+ wire_bytes.start() + function->code.offset(),
+ wire_bytes.start() + function->code.end_offset()};
- WasmCompilationUnit unit(isolate, env, native_module, function_body,
+ WasmCompilationUnit unit(isolate->wasm_engine(), env, native_module,
+ function_body,
wire_bytes.GetNameOrNull(function, env->module),
- function->func_index, mode);
- unit.ExecuteCompilation();
+ function->func_index, isolate->counters(), mode);
+ unit.ExecuteCompilation(detected);
return unit.FinishCompilation(thrower);
}
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index a270d36f78..7e19f4d12f 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -6,10 +6,15 @@
#define V8_WASM_FUNCTION_COMPILER_H_
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/wasm-limits.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-tier.h"
namespace v8 {
namespace internal {
+class Counters;
+
namespace compiler {
class TurbofanWasmCompilationUnit;
} // namespace compiler
@@ -30,6 +35,8 @@ enum RuntimeExceptionSupport : bool {
enum UseTrapHandler : bool { kUseTrapHandler = true, kNoTrapHandler = false };
+enum LowerSimd : bool { kLowerSimd = true, kNoLowerSimd = false };
+
// The {ModuleEnv} encapsulates the module data that is used during compilation.
// ModuleEnvs are shareable across multiple compilations.
struct ModuleEnv {
@@ -45,40 +52,55 @@ struct ModuleEnv {
// be generated differently.
const RuntimeExceptionSupport runtime_exception_support;
+ // The smallest size of any memory that could be used with this module, in
+ // bytes.
+ const uint64_t min_memory_size;
+
+ // The largest size of any memory that could be used with this module, in
+ // bytes.
+ const uint64_t max_memory_size;
+
+ const LowerSimd lower_simd;
+
constexpr ModuleEnv(const WasmModule* module, UseTrapHandler use_trap_handler,
- RuntimeExceptionSupport runtime_exception_support)
+ RuntimeExceptionSupport runtime_exception_support,
+ LowerSimd lower_simd = kNoLowerSimd)
: module(module),
use_trap_handler(use_trap_handler),
- runtime_exception_support(runtime_exception_support) {}
+ runtime_exception_support(runtime_exception_support),
+ min_memory_size(module ? module->initial_pages * uint64_t{kWasmPageSize}
+ : 0),
+ max_memory_size(module && module->has_maximum_pages
+ ? (module->maximum_pages * uint64_t{kWasmPageSize})
+ : kSpecMaxWasmMemoryBytes),
+ lower_simd(lower_simd) {}
};
class WasmCompilationUnit final {
public:
- enum class CompilationMode : uint8_t { kLiftoff, kTurbofan };
- static CompilationMode GetDefaultCompilationMode();
+ static ExecutionTier GetDefaultExecutionTier();
// If constructing from a background thread, pass in a Counters*, and ensure
// that the Counters live at least as long as this compilation unit (which
// typically means to hold a std::shared_ptr<Counters>).
- // If no such pointer is passed, Isolate::counters() will be called. This is
- // only allowed to happen on the foreground thread.
- WasmCompilationUnit(Isolate*, ModuleEnv*, wasm::NativeModule*,
- wasm::FunctionBody, wasm::WasmName, int index,
- CompilationMode = GetDefaultCompilationMode(),
- Counters* = nullptr, bool lower_simd = false);
+ // If used exclusively from a foreground thread, Isolate::counters() may be
+ // used by callers to pass Counters.
+ WasmCompilationUnit(WasmEngine* wasm_engine, ModuleEnv*, NativeModule*,
+ FunctionBody, WasmName, int index, Counters*,
+ ExecutionTier = GetDefaultExecutionTier());
~WasmCompilationUnit();
- void ExecuteCompilation();
- wasm::WasmCode* FinishCompilation(wasm::ErrorThrower* thrower);
+ void ExecuteCompilation(WasmFeatures* detected);
+ WasmCode* FinishCompilation(ErrorThrower* thrower);
- static wasm::WasmCode* CompileWasmFunction(
- wasm::NativeModule* native_module, wasm::ErrorThrower* thrower,
- Isolate* isolate, ModuleEnv* env, const wasm::WasmFunction* function,
- CompilationMode = GetDefaultCompilationMode());
+ static WasmCode* CompileWasmFunction(
+ Isolate* isolate, NativeModule* native_module, WasmFeatures* detected,
+ ErrorThrower* thrower, ModuleEnv* env, const WasmFunction* function,
+ ExecutionTier = GetDefaultExecutionTier());
- wasm::NativeModule* native_module() const { return native_module_; }
- CompilationMode mode() const { return mode_; }
+ NativeModule* native_module() const { return native_module_; }
+ ExecutionTier mode() const { return mode_; }
private:
friend class LiftoffCompilationUnit;
@@ -86,20 +108,18 @@ class WasmCompilationUnit final {
ModuleEnv* env_;
WasmEngine* wasm_engine_;
- wasm::FunctionBody func_body_;
- wasm::WasmName func_name_;
+ FunctionBody func_body_;
+ WasmName func_name_;
Counters* counters_;
int func_index_;
- wasm::NativeModule* native_module_;
- // TODO(wasm): Put {lower_simd_} inside the {ModuleEnv}.
- bool lower_simd_;
- CompilationMode mode_;
+ NativeModule* native_module_;
+ ExecutionTier mode_;
// LiftoffCompilationUnit, set if {mode_ == kLiftoff}.
std::unique_ptr<LiftoffCompilationUnit> liftoff_unit_;
// TurbofanWasmCompilationUnit, set if {mode_ == kTurbofan}.
std::unique_ptr<compiler::TurbofanWasmCompilationUnit> turbofan_unit_;
- void SwitchMode(CompilationMode new_mode);
+ void SwitchMode(ExecutionTier new_mode);
DISALLOW_COPY_AND_ASSIGN(WasmCompilationUnit);
};
diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc
index 3ac9d13e89..029044c005 100644
--- a/deps/v8/src/wasm/jump-table-assembler.cc
+++ b/deps/v8/src/wasm/jump-table-assembler.cc
@@ -36,6 +36,8 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
pushq(Immediate(func_index)); // max 5 bytes
movq(kScratchRegister, uint64_t{lazy_compile_target}); // max 10 bytes
jmp(kScratchRegister); // 3 bytes
+
+ PatchConstPool(); // force patching entries for partial const pool
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
@@ -80,18 +82,9 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
- int offset =
- target - reinterpret_cast<Address>(pc_) - Instruction::kPcLoadDelta;
- DCHECK_EQ(0, offset % kInstrSize);
- // If the offset is within 64 MB, emit a direct jump. Otherwise jump
- // indirectly.
- if (is_int26(offset)) {
- b(offset); // 1 instr
- } else {
- // {Move32BitImmediate} emits either [movw, movt, mov] or [ldr, constant].
- Move32BitImmediate(pc, Operand(target));
- }
-
+ // Note that {Move32BitImmediate} emits [ldr, constant] for the relocation
+ // mode used below, we need this to allow concurrent patching of this slot.
+ Move32BitImmediate(pc, Operand(target, RelocInfo::WASM_CALL));
CheckConstPool(true, false); // force emit of const pool
}
@@ -111,13 +104,16 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
+ // TODO(wasm): Currently this is guaranteed to be a {near_call} and hence is
+ // patchable concurrently. Once {kMaxWasmCodeMemory} is raised on ARM64, make
+ // sure concurrent patching is still supported.
Jump(target, RelocInfo::NONE);
}
void JumpTableAssembler::NopBytes(int bytes) {
DCHECK_LE(0, bytes);
- DCHECK_EQ(0, bytes % kInstructionSize);
- for (; bytes > 0; bytes -= kInstructionSize) {
+ DCHECK_EQ(0, bytes % kInstrSize);
+ for (; bytes > 0; bytes -= kInstrSize) {
nop();
}
}
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
index 1ef1a82f41..a83a7d5b21 100644
--- a/deps/v8/src/wasm/jump-table-assembler.h
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -12,38 +12,122 @@ namespace v8 {
namespace internal {
namespace wasm {
+// The jump table is the central dispatch point for all (direct and indirect)
+// invocations in WebAssembly. It holds one slot per function in a module, with
+// each slot containing a dispatch to the currently published {WasmCode} that
+// corresponds to the function.
+//
+// Note that the table is split into lines of fixed size, with lines laid out
+// consecutively within the executable memory of the {NativeModule}. The slots
+// in turn are consecutive within a line, but do not cross line boundaries.
+//
+// +- L1 -------------------+ +- L2 -------------------+ +- L3 ...
+// | S1 | S2 | ... | Sn | x | | S1 | S2 | ... | Sn | x | | S1 ...
+// +------------------------+ +------------------------+ +---- ...
+//
+// The above illustrates jump table lines {Li} containing slots {Si} with each
+// line containing {n} slots and some padding {x} for alignment purposes.
class JumpTableAssembler : public TurboAssembler {
public:
+ // Translate an offset into the continuous jump table to a jump table index.
+ static uint32_t SlotOffsetToIndex(uint32_t slot_offset) {
+ uint32_t line_index = slot_offset / kJumpTableLineSize;
+ uint32_t line_offset = slot_offset % kJumpTableLineSize;
+ DCHECK_EQ(0, line_offset % kJumpTableSlotSize);
+ return line_index * kJumpTableSlotsPerLine +
+ line_offset / kJumpTableSlotSize;
+ }
+
+ // Translate a jump table index to an offset into the continuous jump table.
+ static uint32_t SlotIndexToOffset(uint32_t slot_index) {
+ uint32_t line_index = slot_index / kJumpTableSlotsPerLine;
+ uint32_t line_offset =
+ (slot_index % kJumpTableSlotsPerLine) * kJumpTableSlotSize;
+ return line_index * kJumpTableLineSize + line_offset;
+ }
+
+ // Determine the size of a jump table containing the given number of slots.
+ static constexpr uint32_t SizeForNumberOfSlots(uint32_t slot_count) {
+ // TODO(wasm): Once the {RoundUp} utility handles non-powers of two values,
+ // use: {RoundUp<kJumpTableSlotsPerLine>(slot_count) * kJumpTableLineSize}
+ return ((slot_count + kJumpTableSlotsPerLine - 1) /
+ kJumpTableSlotsPerLine) *
+ kJumpTableLineSize;
+ }
+
+ static void EmitLazyCompileJumpSlot(Address base, uint32_t slot_index,
+ uint32_t func_index,
+ Address lazy_compile_target,
+ WasmCode::FlushICache flush_i_cache) {
+ Address slot = base + SlotIndexToOffset(slot_index);
+ JumpTableAssembler jtasm(slot);
+ jtasm.EmitLazyCompileJumpSlot(func_index, lazy_compile_target);
+ jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
+ if (flush_i_cache) {
+ Assembler::FlushICache(slot, kJumpTableSlotSize);
+ }
+ }
+
+ static void PatchJumpTableSlot(Address base, uint32_t slot_index,
+ Address new_target,
+ WasmCode::FlushICache flush_i_cache) {
+ Address slot = base + SlotIndexToOffset(slot_index);
+ JumpTableAssembler jtasm(slot);
+ jtasm.EmitJumpSlot(new_target);
+ jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
+ if (flush_i_cache) {
+ Assembler::FlushICache(slot, kJumpTableSlotSize);
+ }
+ }
+
+ private:
// Instantiate a {JumpTableAssembler} for patching.
explicit JumpTableAssembler(Address slot_addr, int size = 256)
: TurboAssembler(nullptr, JumpTableAssemblerOptions(),
reinterpret_cast<void*>(slot_addr), size,
CodeObjectRequired::kNo) {}
+// To allow concurrent patching of the jump table entries, we need to ensure
+// that the instruction containing the call target does not cross cache-line
+// boundaries. The jump table line size has been chosen to satisfy this.
#if V8_TARGET_ARCH_X64
+ static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 18;
#elif V8_TARGET_ARCH_IA32
+ static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 10;
#elif V8_TARGET_ARCH_ARM
+ static constexpr int kJumpTableLineSize = 5 * kInstrSize;
static constexpr int kJumpTableSlotSize = 5 * kInstrSize;
#elif V8_TARGET_ARCH_ARM64
- static constexpr int kJumpTableSlotSize = 3 * kInstructionSize;
+ static constexpr int kJumpTableLineSize = 3 * kInstrSize;
+ static constexpr int kJumpTableSlotSize = 3 * kInstrSize;
#elif V8_TARGET_ARCH_S390X
+ static constexpr int kJumpTableLineSize = 20;
static constexpr int kJumpTableSlotSize = 20;
#elif V8_TARGET_ARCH_S390
+ static constexpr int kJumpTableLineSize = 14;
static constexpr int kJumpTableSlotSize = 14;
#elif V8_TARGET_ARCH_PPC64
+ static constexpr int kJumpTableLineSize = 48;
static constexpr int kJumpTableSlotSize = 48;
#elif V8_TARGET_ARCH_PPC
+ static constexpr int kJumpTableLineSize = 24;
static constexpr int kJumpTableSlotSize = 24;
#elif V8_TARGET_ARCH_MIPS
+ static constexpr int kJumpTableLineSize = 6 * kInstrSize;
static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
#elif V8_TARGET_ARCH_MIPS64
+ static constexpr int kJumpTableLineSize = 8 * kInstrSize;
static constexpr int kJumpTableSlotSize = 8 * kInstrSize;
#else
+ static constexpr int kJumpTableLineSize = 1;
static constexpr int kJumpTableSlotSize = 1;
#endif
+ static constexpr int kJumpTableSlotsPerLine =
+ kJumpTableLineSize / kJumpTableSlotSize;
+
// {JumpTableAssembler} is never used during snapshot generation, and its code
// must be independent of the code range of any isolate anyway. Just ensure
// that no relocation information is recorded, there is no buffer to store it
@@ -60,16 +144,6 @@ class JumpTableAssembler : public TurboAssembler {
void EmitJumpSlot(Address target);
void NopBytes(int bytes);
-
- static void PatchJumpTableSlot(Address slot, Address new_target,
- WasmCode::FlushICache flush_i_cache) {
- JumpTableAssembler jsasm(slot);
- jsasm.EmitJumpSlot(new_target);
- jsasm.NopBytes(kJumpTableSlotSize - jsasm.pc_offset());
- if (flush_i_cache) {
- Assembler::FlushICache(slot, kJumpTableSlotSize);
- }
- }
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
index 6fa7219cda..885a534198 100644
--- a/deps/v8/src/wasm/memory-tracing.cc
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -5,12 +5,13 @@
#include "src/wasm/memory-tracing.h"
#include "src/utils.h"
+#include "src/v8memory.h"
namespace v8 {
namespace internal {
namespace wasm {
-void TraceMemoryOperation(ExecutionEngine engine, const MemoryTracingInfo* info,
+void TraceMemoryOperation(ExecutionTier tier, const MemoryTracingInfo* info,
int func_index, int position, uint8_t* mem_start) {
EmbeddedVector<char, 64> value;
auto mem_rep = static_cast<MachineRepresentation>(info->mem_rep);
@@ -33,20 +34,21 @@ void TraceMemoryOperation(ExecutionEngine engine, const MemoryTracingInfo* info,
default:
SNPrintF(value, "???");
}
- char eng_c = '?';
- switch (engine) {
- case ExecutionEngine::kTurbofan:
- eng_c = 'T';
+ const char* eng = "?";
+ switch (tier) {
+ case ExecutionTier::kOptimized:
+ eng = "turbofan";
break;
- case ExecutionEngine::kLiftoff:
- eng_c = 'L';
+ case ExecutionTier::kBaseline:
+ eng = "liftoff";
break;
- case ExecutionEngine::kInterpreter:
- eng_c = 'I';
+ case ExecutionTier::kInterpreter:
+ eng = "interpreter";
break;
}
- printf("%c %8d+0x%-6x %s @%08x %s\n", eng_c, func_index, position,
- info->is_store ? "store" : "load ", info->address, value.start());
+ printf("%-11s func:%6d+0x%-6x%s %08x val: %s\n", eng, func_index, position,
+ info->is_store ? " store to" : "load from", info->address,
+ value.start());
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/memory-tracing.h b/deps/v8/src/wasm/memory-tracing.h
index 33170aefbe..b5105c5327 100644
--- a/deps/v8/src/wasm/memory-tracing.h
+++ b/deps/v8/src/wasm/memory-tracing.h
@@ -8,13 +8,12 @@
#include <cstdint>
#include "src/machine-type.h"
+#include "src/wasm/wasm-tier.h"
namespace v8 {
namespace internal {
namespace wasm {
-enum class ExecutionEngine { kTurbofan, kLiftoff, kInterpreter };
-
// This struct is create in generated code, hence use low-level types.
struct MemoryTracingInfo {
uint32_t address;
@@ -31,7 +30,7 @@ struct MemoryTracingInfo {
// Callback for tracing a memory operation for debugging.
// Triggered by --wasm-trace-memory.
-void TraceMemoryOperation(ExecutionEngine, const MemoryTracingInfo* info,
+void TraceMemoryOperation(ExecutionTier, const MemoryTracingInfo* info,
int func_index, int position, uint8_t* mem_start);
} // namespace wasm
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 59c7bedbc1..b143b631a1 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -52,8 +52,7 @@ namespace wasm {
enum class CompilationEvent : uint8_t {
kFinishedBaselineCompilation,
kFinishedTopTierCompilation,
- kFailedCompilation,
- kDestroyed
+ kFailedCompilation
};
enum class CompileMode : uint8_t { kRegular, kTiering };
@@ -67,10 +66,14 @@ class CompilationState {
CompilationState(internal::Isolate*, const ModuleEnv&);
~CompilationState();
- // Needs to be set before {AddCompilationUnits} is run, which triggers
- // background compilation.
+ // Set the number of compilations unit expected to be executed. Needs to be
+ // set before {AddCompilationUnits} is run, which triggers background
+ // compilation.
void SetNumberOfFunctionsToCompile(size_t num_functions);
- void AddCallback(
+
+ // Set the callback function to be called on compilation events. Needs to be
+ // set before {AddCompilationUnits} is run.
+ void SetCallback(
std::function<void(CompilationEvent, ErrorThrower*)> callback);
// Inserts new functions to compile and kicks off compilation.
@@ -85,10 +88,10 @@ class CompilationState {
void OnError(ErrorThrower* thrower);
void OnFinishedUnit();
void ScheduleUnitForFinishing(std::unique_ptr<WasmCompilationUnit> unit,
- WasmCompilationUnit::CompilationMode mode);
+ ExecutionTier mode);
- void CancelAndWait();
- void OnBackgroundTaskStopped();
+ void OnBackgroundTaskStopped(const WasmFeatures& detected);
+ void PublishDetectedFeatures(Isolate* isolate, const WasmFeatures& detected);
void RestartBackgroundTasks(size_t max = std::numeric_limits<size_t>::max());
// Only one foreground thread (finisher) is allowed to run at a time.
// {SetFinisherIsRunning} returns whether the flag changed its state.
@@ -108,8 +111,10 @@ class CompilationState {
return baseline_compilation_finished_;
}
+ WasmEngine* wasm_engine() const { return wasm_engine_; }
CompileMode compile_mode() const { return compile_mode_; }
ModuleEnv* module_env() { return &module_env_; }
+ WasmFeatures* detected_features() { return &detected_features_; }
private:
void NotifyOnEvent(CompilationEvent event, ErrorThrower* thrower);
@@ -145,16 +150,16 @@ class CompilationState {
std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_finish_units_;
std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_finish_units_;
+ // Features detected to be used in this module. Features can be detected
+ // as a module is being compiled.
+ WasmFeatures detected_features_ = kNoWasmFeatures;
+
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
- // TODO(mstarzinger): We should make sure this allows at most one callback
- // to exist for each {CompilationState} because reifying the error object on
- // the given {ErrorThrower} can be done at most once.
- std::vector<std::function<void(CompilationEvent, ErrorThrower*)>> callbacks_;
+ // Callback function to be called on compilation events.
+ std::function<void(CompilationEvent, ErrorThrower*)> callback_;
- // When canceling the background_task_manager_, use {CancelAndWait} on
- // the CompilationState in order to cleanly clean up.
CancelableTaskManager background_task_manager_;
CancelableTaskManager foreground_task_manager_;
std::shared_ptr<v8::TaskRunner> foreground_task_runner_;
@@ -167,15 +172,22 @@ class CompilationState {
namespace {
+void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
+ if (detected.threads) {
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmThreadOpcodes);
+ }
+}
+
class JSToWasmWrapperCache {
public:
- Handle<Code> GetOrCompileJSToWasmWrapper(
- Isolate* isolate, const wasm::NativeModule* native_module,
- uint32_t func_index, wasm::UseTrapHandler use_trap_handler) {
- const wasm::WasmModule* module = native_module->module();
- const wasm::WasmFunction* func = &module->functions[func_index];
+ Handle<Code> GetOrCompileJSToWasmWrapper(Isolate* isolate,
+ const NativeModule* native_module,
+ uint32_t func_index,
+ UseTrapHandler use_trap_handler) {
+ const WasmModule* module = native_module->module();
+ const WasmFunction* func = &module->functions[func_index];
bool is_import = func_index < module->num_imported_functions;
- std::pair<bool, wasm::FunctionSig> key(is_import, *func->sig);
+ std::pair<bool, FunctionSig> key(is_import, *func->sig);
Handle<Code>& cached = cache_[key];
if (!cached.is_null()) return cached;
@@ -190,7 +202,7 @@ class JSToWasmWrapperCache {
private:
// We generate different code for calling imports than calling wasm functions
// in this module. Both are cached separately.
- using CacheKey = std::pair<bool, wasm::FunctionSig>;
+ using CacheKey = std::pair<bool, FunctionSig>;
std::unordered_map<CacheKey, Handle<Code>, base::hash<CacheKey>> cache_;
};
@@ -224,6 +236,7 @@ class InstanceBuilder {
};
Isolate* isolate_;
+ const WasmFeatures enabled_;
const WasmModule* const module_;
ErrorThrower* thrower_;
Handle<WasmModuleObject> module_object_;
@@ -236,7 +249,7 @@ class InstanceBuilder {
JSToWasmWrapperCache js_to_wasm_cache_;
std::vector<SanitizedImport> sanitized_imports_;
- wasm::UseTrapHandler use_trap_handler() const {
+ UseTrapHandler use_trap_handler() const {
return module_object_->native_module()->use_trap_handler() ? kUseTrapHandler
: kNoTrapHandler;
}
@@ -327,9 +340,8 @@ MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
return {};
}
-wasm::WasmCode* LazyCompileFunction(Isolate* isolate,
- NativeModule* native_module,
- int func_index) {
+WasmCode* LazyCompileFunction(Isolate* isolate, NativeModule* native_module,
+ int func_index) {
base::ElapsedTimer compilation_timer;
DCHECK(!native_module->has_code(static_cast<uint32_t>(func_index)));
@@ -357,12 +369,13 @@ wasm::WasmCode* LazyCompileFunction(Isolate* isolate,
module_start + func->code.end_offset()};
ErrorThrower thrower(isolate, "WasmLazyCompile");
- WasmCompilationUnit unit(isolate, module_env, native_module, body, func_name,
- func_index);
- unit.ExecuteCompilation();
- wasm::WasmCode* wasm_code = unit.FinishCompilation(&thrower);
+ WasmCompilationUnit unit(isolate->wasm_engine(), module_env, native_module,
+ body, func_name, func_index, isolate->counters());
+ unit.ExecuteCompilation(
+ native_module->compilation_state()->detected_features());
+ WasmCode* wasm_code = unit.FinishCompilation(&thrower);
- if (wasm::WasmCode::ShouldBeLogged(isolate)) wasm_code->LogCode(isolate);
+ if (WasmCode::ShouldBeLogged(isolate)) wasm_code->LogCode(isolate);
// If there is a pending error, something really went wrong. The module was
// verified before starting execution with lazy compilation.
@@ -378,11 +391,6 @@ wasm::WasmCode* LazyCompileFunction(Isolate* isolate,
auto counters = isolate->counters();
counters->wasm_lazily_compiled_functions()->Increment();
- counters->wasm_generated_code_size()->Increment(
- static_cast<int>(wasm_code->instructions().size()));
- counters->wasm_reloc_size()->Increment(
- static_cast<int>(wasm_code->reloc_info().size()));
-
counters->wasm_lazy_compilation_throughput()->AddSample(
compilation_time != 0 ? static_cast<int>(func_size / compilation_time)
: 0);
@@ -399,8 +407,7 @@ Address CompileLazy(Isolate* isolate, NativeModule* native_module,
NativeModuleModificationScope native_module_modification_scope(native_module);
- wasm::WasmCode* result =
- LazyCompileFunction(isolate, native_module, func_index);
+ WasmCode* result = LazyCompileFunction(isolate, native_module, func_index);
DCHECK_NOT_NULL(result);
DCHECK_EQ(func_index, result->index());
@@ -422,22 +429,6 @@ void RecordStats(const Code* code, Counters* counters) {
counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
}
-void RecordStats(const wasm::WasmCode* code, Counters* counters) {
- counters->wasm_generated_code_size()->Increment(
- static_cast<int>(code->instructions().size()));
- counters->wasm_reloc_size()->Increment(
- static_cast<int>(code->reloc_info().size()));
-}
-
-void RecordStats(const wasm::NativeModule* native_module, Counters* counters) {
- for (uint32_t i = native_module->num_imported_functions(),
- e = native_module->num_functions();
- i < e; ++i) {
- const wasm::WasmCode* code = native_module->code(i);
- if (code != nullptr) RecordStats(code, counters);
- }
-}
-
bool in_bounds(uint32_t offset, size_t size, size_t upper) {
return offset + size <= upper && offset + size >= offset;
}
@@ -472,17 +463,15 @@ class CompilationUnitBuilder {
Vector<const uint8_t> bytes, WasmName name) {
switch (compilation_state_->compile_mode()) {
case CompileMode::kTiering:
- tiering_units_.emplace_back(
- CreateUnit(function, buffer_offset, bytes, name,
- WasmCompilationUnit::CompilationMode::kTurbofan));
- baseline_units_.emplace_back(
- CreateUnit(function, buffer_offset, bytes, name,
- WasmCompilationUnit::CompilationMode::kLiftoff));
+ tiering_units_.emplace_back(CreateUnit(
+ function, buffer_offset, bytes, name, ExecutionTier::kOptimized));
+ baseline_units_.emplace_back(CreateUnit(
+ function, buffer_offset, bytes, name, ExecutionTier::kBaseline));
return;
case CompileMode::kRegular:
baseline_units_.emplace_back(
CreateUnit(function, buffer_offset, bytes, name,
- WasmCompilationUnit::GetDefaultCompilationMode()));
+ WasmCompilationUnit::GetDefaultExecutionTier()));
return;
}
UNREACHABLE();
@@ -501,17 +490,17 @@ class CompilationUnitBuilder {
}
private:
- std::unique_ptr<WasmCompilationUnit> CreateUnit(
- const WasmFunction* function, uint32_t buffer_offset,
- Vector<const uint8_t> bytes, WasmName name,
- WasmCompilationUnit::CompilationMode mode) {
+ std::unique_ptr<WasmCompilationUnit> CreateUnit(const WasmFunction* function,
+ uint32_t buffer_offset,
+ Vector<const uint8_t> bytes,
+ WasmName name,
+ ExecutionTier mode) {
return base::make_unique<WasmCompilationUnit>(
- compilation_state_->isolate(), compilation_state_->module_env(),
+ compilation_state_->wasm_engine(), compilation_state_->module_env(),
native_module_,
- wasm::FunctionBody{function->sig, buffer_offset, bytes.begin(),
- bytes.end()},
- name, function->func_index, mode,
- compilation_state_->isolate()->async_counters().get());
+ FunctionBody{function->sig, buffer_offset, bytes.begin(), bytes.end()},
+ name, function->func_index,
+ compilation_state_->isolate()->async_counters().get(), mode);
}
NativeModule* native_module_;
@@ -524,11 +513,9 @@ class CompilationUnitBuilder {
// foreground and background threads). The no_finisher_callback is called
// within the result_mutex_ lock when no finishing task is running, i.e. when
// the finisher_is_running_ flag is not set.
-bool FetchAndExecuteCompilationUnit(CompilationState* compilation_state) {
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
- DisallowCodeDependencyChange no_dependency_change;
+bool FetchAndExecuteCompilationUnit(CompilationState* compilation_state,
+ WasmFeatures* detected) {
+ DisallowHeapAccess no_heap_access;
std::unique_ptr<WasmCompilationUnit> unit =
compilation_state->GetNextCompilationUnit();
@@ -539,8 +526,8 @@ bool FetchAndExecuteCompilationUnit(CompilationState* compilation_state) {
// to Turbofan if it cannot be compiled using Liftoff. This can be removed
// later as soon as Liftoff can compile any function. Then, we can directly
// access {unit->mode()} within {ScheduleUnitForFinishing()}.
- WasmCompilationUnit::CompilationMode mode = unit->mode();
- unit->ExecuteCompilation();
+ ExecutionTier mode = unit->mode();
+ unit->ExecuteCompilation(detected);
compilation_state->ScheduleUnitForFinishing(std::move(unit), mode);
return true;
@@ -573,7 +560,7 @@ void FinishCompilationUnits(CompilationState* compilation_state,
std::unique_ptr<WasmCompilationUnit> unit =
compilation_state->GetNextExecutedUnit();
if (unit == nullptr) break;
- wasm::WasmCode* result = unit->FinishCompilation(thrower);
+ WasmCode* result = unit->FinishCompilation(thrower);
if (thrower->error()) {
compilation_state->Abort();
@@ -641,8 +628,10 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module,
// result is enqueued in {baseline_finish_units_}.
// The foreground task bypasses waiting on memory threshold, because
// its results will immediately be converted to code (below).
- while (FetchAndExecuteCompilationUnit(compilation_state) &&
- !compilation_state->baseline_compilation_finished()) {
+ WasmFeatures detected_features;
+ while (
+ FetchAndExecuteCompilationUnit(compilation_state, &detected_features) &&
+ !compilation_state->baseline_compilation_finished()) {
// 2.b) If {baseline_finish_units_} contains a compilation unit, the main
// thread dequeues it and finishes the compilation unit. Compilation
// units are finished concurrently to the background threads to save
@@ -663,6 +652,9 @@ void CompileInParallel(Isolate* isolate, NativeModule* native_module,
if (compilation_state->baseline_compilation_finished()) break;
}
+ // Publish features from the foreground and background tasks.
+ compilation_state->PublishDetectedFeatures(isolate, detected_features);
+
// 4) If tiering-compilation is enabled, we need to set the finisher
// to false, such that the background threads will spawn a foreground
// thread to finish the top-tier compilation units.
@@ -679,13 +671,14 @@ void CompileSequentially(Isolate* isolate, NativeModule* native_module,
ModuleWireBytes wire_bytes(native_module->wire_bytes());
const WasmModule* module = module_env->module;
+ WasmFeatures detected = kNoWasmFeatures;
for (uint32_t i = 0; i < module->functions.size(); ++i) {
const WasmFunction& func = module->functions[i];
if (func.imported) continue; // Imports are compiled at instantiation time.
// Compile the function.
- wasm::WasmCode* code = WasmCompilationUnit::CompileWasmFunction(
- native_module, thrower, isolate, module_env, &func);
+ WasmCode* code = WasmCompilationUnit::CompileWasmFunction(
+ isolate, native_module, &detected, thrower, module_env, &func);
if (code == nullptr) {
TruncatedUserString<> name(wire_bytes.GetName(&func, module));
thrower->CompileError("Compilation of #%d:%.*s failed.", i, name.length(),
@@ -693,6 +686,7 @@ void CompileSequentially(Isolate* isolate, NativeModule* native_module,
break;
}
}
+ UpdateFeatureUseCounts(isolate, detected);
}
void ValidateSequentially(Isolate* isolate, NativeModule* native_module,
@@ -709,9 +703,18 @@ void ValidateSequentially(Isolate* isolate, NativeModule* native_module,
const byte* base = wire_bytes.start();
FunctionBody body{func.sig, func.code.offset(), base + func.code.offset(),
base + func.code.end_offset()};
- DecodeResult result = VerifyWasmCodeWithStats(
- isolate->allocator(), module, body, module->origin,
- isolate->async_counters().get());
+ DecodeResult result;
+ {
+ auto time_counter =
+ SELECT_WASM_COUNTER(isolate->async_counters(), module->origin,
+ wasm_decode, function_time);
+
+ TimedHistogramScope wasm_decode_function_time_scope(time_counter);
+ WasmFeatures detected;
+ result = VerifyWasmCode(isolate->allocator(),
+ native_module->enabled_features(), module,
+ &detected, body);
+ }
if (result.failed()) {
TruncatedUserString<> name(wire_bytes.GetName(&func, module));
thrower->CompileError("Compiling function #%d:%.*s failed: %s @+%u", i,
@@ -755,8 +758,6 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
CompileSequentially(isolate, native_module, env, thrower);
}
if (thrower->error()) return;
-
- RecordStats(native_module, isolate->counters());
}
}
@@ -801,7 +802,7 @@ class FinishCompileTask : public CancelableTask {
}
ErrorThrower thrower(compilation_state_->isolate(), "AsyncCompile");
- wasm::WasmCode* result = unit->FinishCompilation(&thrower);
+ WasmCode* result = unit->FinishCompilation(&thrower);
if (thrower.error()) {
DCHECK_NULL(result);
@@ -818,11 +819,7 @@ class FinishCompileTask : public CancelableTask {
DCHECK_EQ(CompileMode::kTiering, compilation_state_->compile_mode());
DCHECK(!result->is_liftoff());
- if (wasm::WasmCode::ShouldBeLogged(isolate)) result->LogCode(isolate);
-
- // Update the counters to include the top-tier code.
- RecordStats(result,
- compilation_state_->isolate()->async_counters().get());
+ if (WasmCode::ShouldBeLogged(isolate)) result->LogCode(isolate);
}
// Update the compilation state, and possibly notify
@@ -855,26 +852,34 @@ class BackgroundCompileTask : public CancelableTask {
// The number of currently running background tasks is reduced in
// {OnBackgroundTaskStopped}.
while (!compilation_state_->failed()) {
- if (!FetchAndExecuteCompilationUnit(compilation_state_)) {
+ if (!FetchAndExecuteCompilationUnit(compilation_state_,
+ &detected_features_)) {
break;
}
}
- compilation_state_->OnBackgroundTaskStopped();
+ compilation_state_->OnBackgroundTaskStopped(detected_features_);
}
private:
CompilationState* compilation_state_;
+ WasmFeatures detected_features_ = kNoWasmFeatures;
};
} // namespace
MaybeHandle<WasmModuleObject> CompileToModuleObject(
- Isolate* isolate, ErrorThrower* thrower,
+ Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
Handle<Script> asm_js_script,
Vector<const byte> asm_js_offset_table_bytes) {
const WasmModule* wasm_module = module.get();
TimedHistogramScope wasm_compile_module_time_scope(SELECT_WASM_COUNTER(
isolate->counters(), wasm_module->origin, wasm_compile, module_time));
+
+ // Embedder usage count for declared shared memories.
+ if (wasm_module->has_shared_memory) {
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmSharedMemory);
+ }
+
// TODO(6792): No longer needed once WebAssembly code is off heap. Use
// base::Optional to be able to close the scope before notifying the debugger.
base::Optional<CodeSpaceMemoryModificationScope> modification_scope(
@@ -902,7 +907,6 @@ MaybeHandle<WasmModuleObject> CompileToModuleObject(
// TODO(clemensh): For the same module (same bytes / same hash), we should
// only have one WasmModuleObject. Otherwise, we might only set
// breakpoints on a (potentially empty) subset of the instances.
-
ModuleEnv env = CreateDefaultModuleEnv(wasm_module);
// Create the compiled module object and populate with compiled functions
@@ -910,8 +914,8 @@ MaybeHandle<WasmModuleObject> CompileToModuleObject(
// serializable. Instantiation may occur off a deserialized version of this
// object.
Handle<WasmModuleObject> module_object = WasmModuleObject::New(
- isolate, std::move(module), env, std::move(wire_bytes_copy), script,
- asm_js_offset_table);
+ isolate, enabled, std::move(module), env, std::move(wire_bytes_copy),
+ script, asm_js_offset_table);
CompileNativeModule(isolate, thrower, module_object, wasm_module, &env);
if (thrower->error()) return {};
@@ -938,6 +942,7 @@ InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
MaybeHandle<JSReceiver> ffi,
MaybeHandle<JSArrayBuffer> memory)
: isolate_(isolate),
+ enabled_(module_object->native_module()->enabled_features()),
module_(module_object->module()),
thrower_(thrower),
module_object_(module_object),
@@ -987,13 +992,9 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
memory->set_is_neuterable(false);
- DCHECK_IMPLIES(use_trap_handler(),
- module_->origin == kAsmJsOrigin ||
- memory->is_wasm_memory() ||
- memory->backing_store() == nullptr ||
- // TODO(836800) Remove once is_wasm_memory transfers over
- // post-message.
- (FLAG_experimental_wasm_threads && memory->is_shared()));
+ DCHECK_IMPLIES(use_trap_handler(), module_->origin == kAsmJsOrigin ||
+ memory->is_wasm_memory() ||
+ memory->backing_store() == nullptr);
} else if (initial_pages > 0 || use_trap_handler()) {
// We need to unconditionally create a guard region if using trap handlers,
// even when the size is zero to prevent null-dereference issues
@@ -1037,11 +1038,11 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Create the WebAssembly.Instance object.
//--------------------------------------------------------------------------
- wasm::NativeModule* native_module = module_object_->native_module();
+ NativeModule* native_module = module_object_->native_module();
TRACE("New module instantiation for %p\n", native_module);
Handle<WasmInstanceObject> instance =
WasmInstanceObject::New(isolate_, module_object_);
- wasm::NativeModuleModificationScope native_modification_scope(native_module);
+ NativeModuleModificationScope native_modification_scope(native_module);
//--------------------------------------------------------------------------
// Set up the globals for the new instance.
@@ -1075,7 +1076,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Set up the array of references to imported globals' array buffers.
//--------------------------------------------------------------------------
if (module_->num_imported_mutable_globals > 0) {
- DCHECK(FLAG_experimental_wasm_mut_global);
// TODO(binji): This allocates one slot for each mutable global, which is
// more than required if multiple globals are imported from the same
// module.
@@ -1127,9 +1127,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
if (!memory_.is_null()) {
// Double-check the {memory} array buffer matches the instance.
Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
- uint32_t mem_size = 0;
- CHECK(memory->byte_length()->ToUint32(&mem_size));
- CHECK_EQ(instance->memory_size(), mem_size);
+ CHECK_EQ(instance->memory_size(), memory->byte_length()->Number());
CHECK_EQ(instance->memory_start(), memory->backing_store());
}
}
@@ -1319,7 +1317,8 @@ uint32_t InstanceBuilder::EvalUint32InitExpr(const WasmInitExpr& expr) {
return expr.val.i32_const;
case WasmInitExpr::kGlobalIndex: {
uint32_t offset = module_->globals[expr.val.global_index].offset;
- return *reinterpret_cast<uint32_t*>(raw_buffer_ptr(globals_, offset));
+ return ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(raw_buffer_ptr(globals_, offset)));
}
default:
UNREACHABLE();
@@ -1348,17 +1347,20 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
num, ValueTypes::TypeName(global.type));
switch (global.type) {
case kWasmI32:
- *GetRawGlobalPtr<int32_t>(global) = static_cast<int32_t>(num);
+ WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
+ static_cast<int32_t>(num));
break;
case kWasmI64:
// TODO(titzer): initialization of imported i64 globals.
UNREACHABLE();
break;
case kWasmF32:
- *GetRawGlobalPtr<float>(global) = static_cast<float>(num);
+ WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global),
+ static_cast<float>(num));
break;
case kWasmF64:
- *GetRawGlobalPtr<double>(global) = static_cast<double>(num);
+ WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global),
+ static_cast<double>(num));
break;
default:
UNREACHABLE();
@@ -1372,25 +1374,25 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
switch (global.type) {
case kWasmI32: {
int32_t num = value->GetI32();
- *GetRawGlobalPtr<int32_t>(global) = num;
+ WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global), num);
TRACE("%d", num);
break;
}
case kWasmI64: {
int64_t num = value->GetI64();
- *GetRawGlobalPtr<int64_t>(global) = num;
+ WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global), num);
TRACE("%" PRId64, num);
break;
}
case kWasmF32: {
float num = value->GetF32();
- *GetRawGlobalPtr<float>(global) = num;
+ WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global), num);
TRACE("%f", num);
break;
}
case kWasmF64: {
double num = value->GetF64();
- *GetRawGlobalPtr<double>(global) = num;
+ WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global), num);
TRACE("%lf", num);
break;
}
@@ -1519,7 +1521,7 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
RecordStats(*wrapper_code, isolate_->counters());
WasmCode* wasm_code = native_module->AddCodeCopy(
- wrapper_code, wasm::WasmCode::kWasmToJsWrapper, func_index);
+ wrapper_code, WasmCode::kWasmToJsWrapper, func_index);
ImportedFunctionEntry entry(instance, func_index);
entry.set_wasm_to_js(*js_receiver, wasm_code);
}
@@ -1662,8 +1664,8 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
// The mutable-global proposal allows importing i64 values, but only if
// they are passed as a WebAssembly.Global object.
- if (global.type == kWasmI64 && !(FLAG_experimental_wasm_mut_global &&
- value->IsWasmGlobalObject())) {
+ if (global.type == kWasmI64 &&
+ !(enabled_.mut_global && value->IsWasmGlobalObject())) {
ReportLinkError("global import cannot have type i64", index,
module_name, import_name);
return -1;
@@ -1684,7 +1686,7 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
}
}
}
- if (FLAG_experimental_wasm_mut_global) {
+ if (enabled_.mut_global) {
if (value->IsWasmGlobalObject()) {
auto global_object = Handle<WasmGlobalObject>::cast(value);
if (global_object->type() != global.type) {
@@ -1759,22 +1761,25 @@ T* InstanceBuilder::GetRawGlobalPtr(const WasmGlobal& global) {
void InstanceBuilder::InitGlobals() {
for (auto global : module_->globals) {
if (global.mutability && global.imported) {
- DCHECK(FLAG_experimental_wasm_mut_global);
continue;
}
switch (global.init.kind) {
case WasmInitExpr::kI32Const:
- *GetRawGlobalPtr<int32_t>(global) = global.init.val.i32_const;
+ WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
+ global.init.val.i32_const);
break;
case WasmInitExpr::kI64Const:
- *GetRawGlobalPtr<int64_t>(global) = global.init.val.i64_const;
+ WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global),
+ global.init.val.i64_const);
break;
case WasmInitExpr::kF32Const:
- *GetRawGlobalPtr<float>(global) = global.init.val.f32_const;
+ WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global),
+ global.init.val.f32_const);
break;
case WasmInitExpr::kF64Const:
- *GetRawGlobalPtr<double>(global) = global.init.val.f64_const;
+ WriteLittleEndianValue<double>(GetRawGlobalPtr<double>(global),
+ global.init.val.f64_const);
break;
case WasmInitExpr::kGlobalIndex: {
// Initialize with another global.
@@ -1805,8 +1810,7 @@ Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t num_pages) {
thrower_->RangeError("Out of memory: wasm memory too large");
return Handle<JSArrayBuffer>::null();
}
- const bool is_shared_memory =
- module_->has_shared_memory && i::FLAG_experimental_wasm_threads;
+ const bool is_shared_memory = module_->has_shared_memory && enabled_.threads;
i::SharedFlag shared_flag =
is_shared_memory ? i::SharedFlag::kShared : i::SharedFlag::kNotShared;
Handle<JSArrayBuffer> mem_buffer;
@@ -1948,7 +1952,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
case kExternalGlobal: {
const WasmGlobal& global = module_->globals[exp.index];
- if (FLAG_experimental_wasm_mut_global) {
+ if (enabled_.mut_global) {
Handle<JSArrayBuffer> buffer;
uint32_t offset;
@@ -1985,13 +1989,16 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
double num = 0;
switch (global.type) {
case kWasmI32:
- num = *GetRawGlobalPtr<int32_t>(global);
+ num = ReadLittleEndianValue<int32_t>(
+ GetRawGlobalPtr<int32_t>(global));
break;
case kWasmF32:
- num = *GetRawGlobalPtr<float>(global);
+ num =
+ ReadLittleEndianValue<float>(GetRawGlobalPtr<float>(global));
break;
case kWasmF64:
- num = *GetRawGlobalPtr<double>(global);
+ num = ReadLittleEndianValue<double>(
+ GetRawGlobalPtr<double>(global));
break;
case kWasmI64:
thrower_->LinkError(
@@ -2121,10 +2128,11 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
}
AsyncCompileJob::AsyncCompileJob(
- Isolate* isolate, std::unique_ptr<byte[]> bytes_copy, size_t length,
- Handle<Context> context,
- std::unique_ptr<CompilationResultResolver> resolver)
+ Isolate* isolate, const WasmFeatures& enabled,
+ std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context,
+ std::shared_ptr<CompilationResultResolver> resolver)
: isolate_(isolate),
+ enabled_features_(enabled),
async_counters_(isolate->async_counters()),
bytes_copy_(std::move(bytes_copy)),
wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length),
@@ -2144,17 +2152,9 @@ void AsyncCompileJob::Start() {
}
void AsyncCompileJob::Abort() {
- background_task_manager_.CancelAndWait();
- if (native_module_) native_module_->compilation_state()->Abort();
- if (num_pending_foreground_tasks_ == 0) {
- // No task is pending, we can just remove the AsyncCompileJob.
- isolate_->wasm_engine()->RemoveCompileJob(this);
- } else {
- // There is still a compilation task in the task queue. We enter the
- // AbortCompilation state and wait for this compilation task to abort the
- // AsyncCompileJob.
- NextStep<AbortCompilation>();
- }
+ // Removing this job will trigger the destructor, which will cancel all
+ // compilation.
+ isolate_->wasm_engine()->RemoveCompileJob(this);
}
class AsyncStreamingProcessor final : public StreamingProcessor {
@@ -2202,12 +2202,15 @@ std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() {
AsyncCompileJob::~AsyncCompileJob() {
background_task_manager_.CancelAndWait();
+ if (native_module_) native_module_->compilation_state()->Abort();
+ CancelPendingForegroundTask();
for (auto d : deferred_handles_) delete d;
}
+// This function assumes that it is executed in a HandleScope, and that a
+// context is set on the isolate.
void AsyncCompileJob::FinishCompile() {
- RecordStats(native_module_, counters());
-
+ DCHECK_NOT_NULL(isolate_->context());
// Finish the wasm script now and make it public to the debugger.
Handle<Script> script(module_object_->script(), isolate_);
isolate_->debug()->OnAfterCompile(script);
@@ -2215,6 +2218,11 @@ void AsyncCompileJob::FinishCompile() {
// Log the code within the generated module for profiling.
native_module_->LogWasmCodes(isolate_);
+ // We can only update the feature counts once the entire compile is done.
+ auto compilation_state = native_module_->compilation_state();
+ compilation_state->PublishDetectedFeatures(
+ isolate_, *compilation_state->detected_features());
+
// TODO(wasm): compiling wrappers should be made async as well.
DoSync<CompileWrappers>();
}
@@ -2235,16 +2243,11 @@ void AsyncCompileJob::AsyncCompileSucceeded(Handle<WasmModuleObject> result) {
// task) and schedule the next step(s), if any.
class AsyncCompileJob::CompileStep {
public:
- explicit CompileStep(int num_background_tasks = 0)
- : num_background_tasks_(num_background_tasks) {}
-
virtual ~CompileStep() {}
void Run(bool on_foreground) {
if (on_foreground) {
HandleScope scope(job_->isolate_);
- --job_->num_pending_foreground_tasks_;
- DCHECK_EQ(0, job_->num_pending_foreground_tasks_);
SaveContext saved_context(job_->isolate_);
job_->isolate_->set_context(*job_->native_context_);
RunInForeground();
@@ -2256,10 +2259,7 @@ class AsyncCompileJob::CompileStep {
virtual void RunInForeground() { UNREACHABLE(); }
virtual void RunInBackground() { UNREACHABLE(); }
- int NumberOfBackgroundTasks() { return num_background_tasks_; }
-
AsyncCompileJob* job_ = nullptr;
- const int num_background_tasks_;
};
class AsyncCompileJob::CompileTask : public CancelableTask {
@@ -2274,18 +2274,55 @@ class AsyncCompileJob::CompileTask : public CancelableTask {
job_(job),
on_foreground_(on_foreground) {}
- void RunInternal() override { job_->step_->Run(on_foreground_); }
+ ~CompileTask() {
+ if (job_ != nullptr && on_foreground_) ResetPendingForegroundTask();
+ }
+
+ void RunInternal() final {
+ if (!job_) return;
+ if (on_foreground_) ResetPendingForegroundTask();
+ job_->step_->Run(on_foreground_);
+ // After execution, reset {job_} such that we don't try to reset the pending
+ // foreground task when the task is deleted.
+ job_ = nullptr;
+ }
+
+ void Cancel() {
+ DCHECK_NOT_NULL(job_);
+ job_ = nullptr;
+ }
private:
+ // {job_} will be cleared to cancel a pending task.
AsyncCompileJob* job_;
bool on_foreground_;
+
+ void ResetPendingForegroundTask() const {
+ DCHECK_EQ(this, job_->pending_foreground_task_);
+ job_->pending_foreground_task_ = nullptr;
+ }
};
void AsyncCompileJob::StartForegroundTask() {
- ++num_pending_foreground_tasks_;
- DCHECK_EQ(1, num_pending_foreground_tasks_);
+ DCHECK_NULL(pending_foreground_task_);
+
+ auto new_task = base::make_unique<CompileTask>(this, true);
+ pending_foreground_task_ = new_task.get();
+ foreground_task_runner_->PostTask(std::move(new_task));
+}
+
+void AsyncCompileJob::ExecuteForegroundTaskImmediately() {
+ DCHECK_NULL(pending_foreground_task_);
- foreground_task_runner_->PostTask(base::make_unique<CompileTask>(this, true));
+ auto new_task = base::make_unique<CompileTask>(this, true);
+ pending_foreground_task_ = new_task.get();
+ new_task->Run();
+}
+
+void AsyncCompileJob::CancelPendingForegroundTask() {
+ if (!pending_foreground_task_) return;
+ pending_foreground_task_->Cancel();
+ pending_foreground_task_ = nullptr;
}
template <typename Step, typename... Args>
@@ -2309,10 +2346,7 @@ void AsyncCompileJob::StartBackgroundTask() {
template <typename Step, typename... Args>
void AsyncCompileJob::DoAsync(Args&&... args) {
NextStep<Step>(std::forward<Args>(args)...);
- int end = step_->NumberOfBackgroundTasks();
- for (int i = 0; i < end; ++i) {
- StartBackgroundTask();
- }
+ StartBackgroundTask();
}
template <typename Step, typename... Args>
@@ -2326,8 +2360,6 @@ void AsyncCompileJob::NextStep(Args&&... args) {
//==========================================================================
class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
public:
- DecodeModule() : CompileStep(1) {}
-
void RunInBackground() override {
ModuleResult result;
{
@@ -2335,17 +2367,18 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
DisallowHeapAllocation no_allocation;
// Decode the module bytes.
TRACE_COMPILE("(1) Decoding module...\n");
- result = AsyncDecodeWasmModule(job_->isolate_, job_->wire_bytes_.start(),
- job_->wire_bytes_.end(), false,
- kWasmOrigin, job_->async_counters());
+ result =
+ DecodeWasmModule(job_->enabled_features_, job_->wire_bytes_.start(),
+ job_->wire_bytes_.end(), false, kWasmOrigin,
+ job_->async_counters().get(),
+ job_->isolate()->wasm_engine()->allocator());
}
if (result.failed()) {
// Decoding failure; reject the promise and clean up.
job_->DoSync<DecodeFail>(std::move(result));
} else {
// Decode passed.
- job_->module_ = std::move(result.val);
- job_->DoSync<PrepareAndStartCompile>(true);
+ job_->DoSync<PrepareAndStartCompile>(std::move(result.val), true);
}
}
};
@@ -2373,10 +2406,12 @@ class AsyncCompileJob::DecodeFail : public CompileStep {
//==========================================================================
class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
public:
- explicit PrepareAndStartCompile(bool start_compilation)
- : start_compilation_(start_compilation) {}
+ PrepareAndStartCompile(std::shared_ptr<const WasmModule> module,
+ bool start_compilation)
+ : module_(module), start_compilation_(start_compilation) {}
private:
+ std::shared_ptr<const WasmModule> module_;
bool start_compilation_;
void RunInForeground() override {
@@ -2386,12 +2421,18 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
// is done.
job_->background_task_manager_.CancelAndWait();
+ // Embedder usage count for declared shared memories.
+ if (module_->has_shared_memory) {
+ job_->isolate_->CountUsage(
+ v8::Isolate::UseCounterFeature::kWasmSharedMemory);
+ }
+
// Create heap objects for script and module bytes to be stored in the
// module object. Asm.js is not compiled asynchronously.
Handle<Script> script = CreateWasmScript(job_->isolate_, job_->wire_bytes_);
Handle<ByteArray> asm_js_offset_table;
- const WasmModule* module = job_->module_.get();
+ const WasmModule* module = module_.get();
ModuleEnv env = CreateDefaultModuleEnv(module);
// TODO(wasm): Improve efficiency of storing module wire bytes. Only store
// relevant sections, not function bodies
@@ -2403,7 +2444,7 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
// breakpoints on a (potentially empty) subset of the instances.
// Create the module object.
job_->module_object_ = WasmModuleObject::New(
- job_->isolate_, job_->module_, env,
+ job_->isolate_, job_->enabled_features_, module_, env,
{std::move(job_->bytes_copy_), job_->wire_bytes_.length()}, script,
asm_js_offset_table);
job_->native_module_ = job_->module_object_->native_module();
@@ -2433,7 +2474,7 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
// capture the {job} pointer by copy, as it otherwise is dependent
// on the current step we are in.
AsyncCompileJob* job = job_;
- compilation_state->AddCallback(
+ compilation_state->SetCallback(
[job](CompilationEvent event, ErrorThrower* thrower) {
// Callback is called from a foreground thread.
switch (event) {
@@ -2445,17 +2486,14 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
}
return;
case CompilationEvent::kFinishedTopTierCompilation:
- // It is only safe to remove the AsyncCompileJob if no
- // foreground task is currently pending, and no finisher is
- // outstanding (streaming compilation).
- if (job->num_pending_foreground_tasks_ == 0 &&
- job->outstanding_finishers_.load() == 0) {
- job->isolate_->wasm_engine()->RemoveCompileJob(job);
- } else {
- // If a foreground task was pending or a finsher was pending,
- // we will rely on FinishModule to remove the job.
+ // If a foreground task or a finisher is pending, we rely on
+ // FinishModule to remove the job.
+ if (job->pending_foreground_task_ ||
+ job->outstanding_finishers_.load() > 0) {
job->tiering_completed_ = true;
+ return;
}
+ job->isolate_->wasm_engine()->RemoveCompileJob(job);
return;
case CompilationEvent::kFailedCompilation: {
// Tier-up compilation should not fail if baseline compilation
@@ -2473,9 +2511,6 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
job->DoSync<CompileFailed>(error);
return;
}
- case CompilationEvent::kDestroyed:
- // Nothing to do.
- return;
}
UNREACHABLE();
});
@@ -2535,8 +2570,8 @@ class AsyncCompileJob::FinishModule : public CompileStep {
TRACE_COMPILE("(6) Finish module...\n");
job_->AsyncCompileSucceeded(job_->module_object_);
- size_t num_functions =
- job_->module_->functions.size() - job_->module_->num_imported_functions;
+ size_t num_functions = job_->native_module_->num_functions() -
+ job_->native_module_->num_imported_functions();
if (job_->native_module_->compilation_state()->compile_mode() ==
CompileMode::kRegular ||
num_functions == 0) {
@@ -2556,15 +2591,10 @@ class AsyncCompileJob::FinishModule : public CompileStep {
}
};
-class AsyncCompileJob::AbortCompilation : public CompileStep {
- void RunInForeground() override {
- TRACE_COMPILE("Abort asynchronous compilation ...\n");
- job_->isolate_->wasm_engine()->RemoveCompileJob(job_);
- }
-};
-
AsyncStreamingProcessor::AsyncStreamingProcessor(AsyncCompileJob* job)
- : job_(job), compilation_unit_builder_(nullptr) {}
+ : decoder_(job->enabled_features_),
+ job_(job),
+ compilation_unit_builder_(nullptr) {}
void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(ResultBase error) {
// Make sure all background tasks stopped executing before we change the state
@@ -2581,7 +2611,7 @@ void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(ResultBase error) {
if (job_->native_module_) {
job_->native_module_->compilation_state()->Abort();
- if (job_->num_pending_foreground_tasks_ == 0) {
+ if (job_->pending_foreground_task_ == nullptr) {
job_->DoSync<AsyncCompileJob::DecodeFail>(std::move(result));
} else {
job_->NextStep<AsyncCompileJob::DecodeFail>(std::move(result));
@@ -2600,8 +2630,8 @@ void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(ResultBase error) {
bool AsyncStreamingProcessor::ProcessModuleHeader(Vector<const uint8_t> bytes,
uint32_t offset) {
TRACE_STREAMING("Process module header...\n");
- decoder_.StartDecoding(job_->isolate());
- job_->module_ = decoder_.shared_module();
+ decoder_.StartDecoding(job_->async_counters().get(),
+ job_->isolate()->wasm_engine()->allocator());
decoder_.DecodeModuleHeader(bytes, offset);
if (!decoder_.ok()) {
FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false));
@@ -2652,14 +2682,11 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(size_t functions_count,
FinishAsyncCompileJobWithError(decoder_.FinishDecoding(false));
return false;
}
- job_->NextStep<AsyncCompileJob::PrepareAndStartCompile>(false);
+ job_->NextStep<AsyncCompileJob::PrepareAndStartCompile>(
+ decoder_.shared_module(), false);
// Execute the PrepareAndStartCompile step immediately and not in a separate
- // task. The step expects to be run on a separate foreground thread though, so
- // we to increment {num_pending_foreground_tasks_} to look like one.
- ++job_->num_pending_foreground_tasks_;
- DCHECK_EQ(1, job_->num_pending_foreground_tasks_);
- constexpr bool on_foreground = true;
- job_->step_->Run(on_foreground);
+ // task.
+ job_->ExecuteForegroundTaskImmediately();
job_->native_module_->compilation_state()->SetNumberOfFunctionsToCompile(
functions_count);
@@ -2709,15 +2736,17 @@ void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
}
ModuleResult result = decoder_.FinishDecoding(false);
DCHECK(result.ok());
- DCHECK_EQ(job_->module_, result.val);
if (job_->DecrementAndCheckFinisherCount()) {
if (job_->native_module_ == nullptr) {
// We are processing a WebAssembly module without code section. We need to
// prepare compilation first before we can finish it.
// {PrepareAndStartCompile} will call {FinishCompile} by itself if there
// is no code section.
- job_->DoSync<AsyncCompileJob::PrepareAndStartCompile>(true);
+ job_->DoSync<AsyncCompileJob::PrepareAndStartCompile>(result.val, true);
} else {
+ HandleScope scope(job_->isolate_);
+ SaveContext saved_context(job_->isolate_);
+ job_->isolate_->set_context(*job_->native_context_);
job_->FinishCompile();
}
}
@@ -2763,17 +2792,11 @@ CompilationState::CompilationState(internal::Isolate* isolate,
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
v8::Platform* platform = V8::GetCurrentPlatform();
foreground_task_runner_ = platform->GetForegroundTaskRunner(v8_isolate);
-
- // Register task manager for clean shutdown in case of an engine shutdown.
- wasm_engine_->Register(&background_task_manager_);
- wasm_engine_->Register(&foreground_task_manager_);
}
CompilationState::~CompilationState() {
- CancelAndWait();
+ background_task_manager_.CancelAndWait();
foreground_task_manager_.CancelAndWait();
- wasm_engine_->Unregister(&foreground_task_manager_);
- NotifyOnEvent(CompilationEvent::kDestroyed, nullptr);
}
void CompilationState::SetNumberOfFunctionsToCompile(size_t num_functions) {
@@ -2786,9 +2809,10 @@ void CompilationState::SetNumberOfFunctionsToCompile(size_t num_functions) {
}
}
-void CompilationState::AddCallback(
+void CompilationState::SetCallback(
std::function<void(CompilationEvent, ErrorThrower*)> callback) {
- callbacks_.push_back(callback);
+ DCHECK_NULL(callback_);
+ callback_ = callback;
}
void CompilationState::AddCompilationUnits(
@@ -2799,8 +2823,7 @@ void CompilationState::AddCompilationUnits(
if (compile_mode_ == CompileMode::kTiering) {
DCHECK_EQ(baseline_units.size(), tiering_units.size());
- DCHECK_EQ(tiering_units.back()->mode(),
- WasmCompilationUnit::CompilationMode::kTurbofan);
+ DCHECK_EQ(tiering_units.back()->mode(), ExecutionTier::kOptimized);
tiering_compilation_units_.insert(
tiering_compilation_units_.end(),
std::make_move_iterator(tiering_units.begin()),
@@ -2860,7 +2883,7 @@ void CompilationState::OnFinishedUnit() {
--outstanding_units_;
if (outstanding_units_ == 0) {
- CancelAndWait();
+ background_task_manager_.CancelAndWait();
baseline_compilation_finished_ = true;
DCHECK(compile_mode_ == CompileMode::kRegular ||
@@ -2887,11 +2910,10 @@ void CompilationState::OnFinishedUnit() {
}
void CompilationState::ScheduleUnitForFinishing(
- std::unique_ptr<WasmCompilationUnit> unit,
- WasmCompilationUnit::CompilationMode mode) {
+ std::unique_ptr<WasmCompilationUnit> unit, ExecutionTier mode) {
base::LockGuard<base::Mutex> guard(&mutex_);
if (compile_mode_ == CompileMode::kTiering &&
- mode == WasmCompilationUnit::CompilationMode::kTurbofan) {
+ mode == ExecutionTier::kOptimized) {
tiering_finish_units_.push_back(std::move(unit));
} else {
baseline_finish_units_.push_back(std::move(unit));
@@ -2904,15 +2926,21 @@ void CompilationState::ScheduleUnitForFinishing(
}
}
-void CompilationState::CancelAndWait() {
- background_task_manager_.CancelAndWait();
- wasm_engine_->Unregister(&background_task_manager_);
-}
-
-void CompilationState::OnBackgroundTaskStopped() {
+void CompilationState::OnBackgroundTaskStopped(const WasmFeatures& detected) {
base::LockGuard<base::Mutex> guard(&mutex_);
DCHECK_LE(1, num_background_tasks_);
--num_background_tasks_;
+ UnionFeaturesInto(&detected_features_, detected);
+}
+
+void CompilationState::PublishDetectedFeatures(Isolate* isolate,
+ const WasmFeatures& detected) {
+ // Notifying the isolate of the feature counts must take place under
+ // the mutex, because even if we have finished baseline compilation,
+ // tiering compilations may still occur in the background.
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ UnionFeaturesInto(&detected_features_, detected);
+ UpdateFeatureUseCounts(isolate, detected_features_);
}
void CompilationState::RestartBackgroundTasks(size_t max) {
@@ -2962,14 +2990,12 @@ void CompilationState::Abort() {
base::LockGuard<base::Mutex> guard(&mutex_);
failed_ = true;
}
- CancelAndWait();
+ background_task_manager_.CancelAndWait();
}
void CompilationState::NotifyOnEvent(CompilationEvent event,
ErrorThrower* thrower) {
- for (auto& callback_function : callbacks_) {
- callback_function(event, thrower);
- }
+ if (callback_) callback_(event, thrower);
}
void CompileJsToWasmWrappers(Isolate* isolate,
@@ -2978,7 +3004,7 @@ void CompileJsToWasmWrappers(Isolate* isolate,
int wrapper_index = 0;
Handle<FixedArray> export_wrappers(module_object->export_wrappers(), isolate);
NativeModule* native_module = module_object->native_module();
- wasm::UseTrapHandler use_trap_handler =
+ UseTrapHandler use_trap_handler =
native_module->use_trap_handler() ? kUseTrapHandler : kNoTrapHandler;
const WasmModule* module = native_module->module();
for (auto exp : module->export_table) {
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index eb9f271543..57bbd883e2 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -11,6 +11,7 @@
#include "src/cancelable-task.h"
#include "src/globals.h"
+#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-module.h"
namespace v8 {
@@ -48,7 +49,7 @@ std::unique_ptr<CompilationState, CompilationStateDeleter> NewCompilationState(
ModuleEnv* GetModuleEnv(CompilationState* compilation_state);
MaybeHandle<WasmModuleObject> CompileToModuleObject(
- Isolate* isolate, ErrorThrower* thrower,
+ Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes);
@@ -77,9 +78,10 @@ Address CompileLazy(Isolate*, NativeModule*, uint32_t func_index);
// TODO(wasm): factor out common parts of this with the synchronous pipeline.
class AsyncCompileJob {
public:
- explicit AsyncCompileJob(Isolate* isolate, std::unique_ptr<byte[]> bytes_copy,
- size_t length, Handle<Context> context,
- std::unique_ptr<CompilationResultResolver> resolver);
+ AsyncCompileJob(Isolate* isolate, const WasmFeatures& enabled_features,
+ std::unique_ptr<byte[]> bytes_copy, size_t length,
+ Handle<Context> context,
+ std::shared_ptr<CompilationResultResolver> resolver);
~AsyncCompileJob();
void Start();
@@ -87,6 +89,7 @@ class AsyncCompileJob {
std::shared_ptr<StreamingDecoder> CreateStreamingDecoder();
void Abort();
+ void CancelPendingForegroundTask();
Isolate* isolate() const { return isolate_; }
@@ -95,14 +98,12 @@ class AsyncCompileJob {
class CompileStep;
// States of the AsyncCompileJob.
- class DecodeModule;
- class DecodeFail;
- class PrepareAndStartCompile;
- class CompileFailed;
- class CompileWrappers;
- class FinishModule;
- class AbortCompilation;
- class UpdateToTopTierCompiledCode;
+ class DecodeModule; // Step 1 (async)
+ class DecodeFail; // Step 1b (sync)
+ class PrepareAndStartCompile; // Step 2 (sync)
+ class CompileFailed; // Step 4b (sync)
+ class CompileWrappers; // Step 5 (sync)
+ class FinishModule; // Step 6 (sync)
const std::shared_ptr<Counters>& async_counters() const {
return async_counters_;
@@ -116,6 +117,7 @@ class AsyncCompileJob {
void AsyncCompileSucceeded(Handle<WasmModuleObject> result);
void StartForegroundTask();
+ void ExecuteForegroundTaskImmediately();
void StartBackgroundTask();
@@ -137,16 +139,16 @@ class AsyncCompileJob {
friend class AsyncStreamingProcessor;
Isolate* isolate_;
+ const WasmFeatures enabled_features_;
const std::shared_ptr<Counters> async_counters_;
- // Copy of the module wire bytes, moved into the {native_module_} on it's
+ // Copy of the module wire bytes, moved into the {native_module_} on its
// creation.
std::unique_ptr<byte[]> bytes_copy_;
// Reference to the wire bytes (hold in {bytes_copy_} or as part of
// {native_module_}).
ModuleWireBytes wire_bytes_;
Handle<Context> native_context_;
- std::unique_ptr<CompilationResultResolver> resolver_;
- std::shared_ptr<const WasmModule> module_;
+ std::shared_ptr<CompilationResultResolver> resolver_;
std::vector<DeferredHandles*> deferred_handles_;
Handle<WasmModuleObject> module_object_;
@@ -169,8 +171,8 @@ class AsyncCompileJob {
return outstanding_finishers_.fetch_sub(1) == 1;
}
- // Counts the number of pending foreground tasks.
- int32_t num_pending_foreground_tasks_ = 0;
+ // A reference to a pending foreground task, or {nullptr} if none is pending.
+ CompileTask* pending_foreground_task_ = nullptr;
// The AsyncCompileJob owns the StreamingDecoder because the StreamingDecoder
// contains data which is needed by the AsyncCompileJob for streaming
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index bae8e4baf8..db9cf45049 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -15,6 +15,7 @@
#include "src/v8.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
namespace v8 {
@@ -82,8 +83,7 @@ const char* SectionName(SectionCode code) {
case kNameSectionCode:
return kNameString;
case kExceptionSectionCode:
- if (FLAG_experimental_wasm_eh) return kExceptionString;
- return kUnknownString;
+ return kExceptionString;
default:
return kUnknownString;
}
@@ -246,13 +246,15 @@ class WasmSectionIterator {
// The main logic for decoding the bytes of a module.
class ModuleDecoderImpl : public Decoder {
public:
- explicit ModuleDecoderImpl(ModuleOrigin origin)
+ explicit ModuleDecoderImpl(const WasmFeatures& enabled, ModuleOrigin origin)
: Decoder(nullptr, nullptr),
+ enabled_features_(enabled),
origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) {}
- ModuleDecoderImpl(const byte* module_start, const byte* module_end,
- ModuleOrigin origin)
+ ModuleDecoderImpl(const WasmFeatures& enabled, const byte* module_start,
+ const byte* module_end, ModuleOrigin origin)
: Decoder(module_start, module_end),
+ enabled_features_(enabled),
origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) {
if (end_ < start_) {
error(start_, "end is less than start");
@@ -288,11 +290,11 @@ class ModuleDecoderImpl : public Decoder {
}
}
- void StartDecoding(Isolate* isolate) {
+ void StartDecoding(Counters* counters, AccountingAllocator* allocator) {
CHECK_NULL(module_);
- SetCounters(isolate->counters());
- module_.reset(new WasmModule(base::make_unique<Zone>(
- isolate->wasm_engine()->allocator(), "signatures")));
+ SetCounters(counters);
+ module_.reset(
+ new WasmModule(base::make_unique<Zone>(allocator, "signatures")));
module_->initial_pages = 0;
module_->maximum_pages = 0;
module_->mem_export = false;
@@ -400,7 +402,7 @@ class ModuleDecoderImpl : public Decoder {
DecodeNameSection();
break;
case kExceptionSectionCode:
- if (FLAG_experimental_wasm_eh) {
+ if (enabled_features_.eh) {
DecodeExceptionSection();
} else {
errorf(pc(), "unexpected section: %s", SectionName(section_code));
@@ -450,8 +452,8 @@ class ModuleDecoderImpl : public Decoder {
});
WasmImport* import = &module_->import_table.back();
const byte* pos = pc_;
- import->module_name = consume_string(true, "module name");
- import->field_name = consume_string(true, "field name");
+ import->module_name = consume_string(*this, true, "module name");
+ import->field_name = consume_string(*this, true, "field name");
import->kind =
static_cast<ImportExportKindCode>(consume_u8("import kind"));
switch (import->kind) {
@@ -478,7 +480,7 @@ class ModuleDecoderImpl : public Decoder {
WasmTable* table = &module_->tables.back();
table->imported = true;
ValueType type = consume_reference_type();
- if (!FLAG_experimental_wasm_anyref) {
+ if (!enabled_features_.anyref) {
if (type != kWasmAnyFunc) {
error(pc_ - 1, "invalid table type");
break;
@@ -511,7 +513,7 @@ class ModuleDecoderImpl : public Decoder {
global->type = consume_value_type();
global->mutability = consume_mutability();
if (global->mutability) {
- if (FLAG_experimental_wasm_mut_global) {
+ if (enabled_features_.mut_global) {
module_->num_imported_mutable_globals++;
} else {
error("mutable globals cannot be imported");
@@ -555,7 +557,7 @@ class ModuleDecoderImpl : public Decoder {
void DecodeTableSection() {
// TODO(ahaas): Set the correct limit to {kV8MaxWasmTables} once the
// implementation of AnyRef landed.
- uint32_t max_count = FLAG_experimental_wasm_anyref ? 10 : kV8MaxWasmTables;
+ uint32_t max_count = enabled_features_.anyref ? 10 : kV8MaxWasmTables;
uint32_t table_count = consume_count("table count", max_count);
for (uint32_t i = 0; ok() && i < table_count; i++) {
@@ -614,7 +616,7 @@ class ModuleDecoderImpl : public Decoder {
});
WasmExport* exp = &module_->export_table.back();
- exp->name = consume_string(true, "field name");
+ exp->name = consume_string(*this, true, "field name");
const byte* pos = pc();
exp->kind = static_cast<ImportExportKindCode>(consume_u8("export kind"));
@@ -646,7 +648,7 @@ class ModuleDecoderImpl : public Decoder {
WasmGlobal* global = nullptr;
exp->index = consume_global_index(module_.get(), &global);
if (global) {
- if (!FLAG_experimental_wasm_mut_global && global->mutability) {
+ if (!enabled_features_.mut_global && global->mutability) {
error("mutable globals cannot be exported");
}
global->exported = true;
@@ -709,7 +711,7 @@ class ModuleDecoderImpl : public Decoder {
for (uint32_t i = 0; ok() && i < element_count; ++i) {
const byte* pos = pc();
uint32_t table_index = consume_u32v("table index");
- if (!FLAG_experimental_wasm_anyref && table_index != 0) {
+ if (!enabled_features_.anyref && table_index != 0) {
errorf(pos, "illegal table index %u != 0", table_index);
}
if (table_index >= module_->tables.size()) {
@@ -815,7 +817,7 @@ class ModuleDecoderImpl : public Decoder {
// Decode module name, ignore the rest.
// Function and local names will be decoded when needed.
if (name_type == NameSectionKindCode::kModule) {
- WireBytesRef name = wasm::consume_string(inner, false, "module name");
+ WireBytesRef name = consume_string(inner, false, "module name");
if (inner.ok() && validate_utf8(&inner, name)) module_->name = name;
} else {
inner.consume_bytes(name_payload_len, "name subsection payload");
@@ -849,8 +851,9 @@ class ModuleDecoderImpl : public Decoder {
}
// Decodes an entire module.
- ModuleResult DecodeModule(Isolate* isolate, bool verify_functions = true) {
- StartDecoding(isolate);
+ ModuleResult DecodeModule(Counters* counters, AccountingAllocator* allocator,
+ bool verify_functions = true) {
+ StartDecoding(counters, allocator);
uint32_t offset = 0;
Vector<const byte> orig_bytes(start(), end() - start());
DecodeModuleHeader(Vector<const uint8_t>(start(), end() - start()), offset);
@@ -928,6 +931,7 @@ class ModuleDecoderImpl : public Decoder {
}
private:
+ const WasmFeatures enabled_features_;
std::shared_ptr<WasmModule> module_;
Counters* counters_ = nullptr;
// The type section is the first section in a module.
@@ -946,7 +950,7 @@ class ModuleDecoderImpl : public Decoder {
}
bool AddTable(WasmModule* module) {
- if (FLAG_experimental_wasm_anyref) return true;
+ if (enabled_features_.anyref) return true;
if (module->tables.size() > 0) {
error("At most one table is supported");
return false;
@@ -1019,7 +1023,7 @@ class ModuleDecoderImpl : public Decoder {
for (WasmGlobal& global : module->globals) {
byte size = ValueTypes::MemSize(ValueTypes::MachineTypeFor(global.type));
if (global.mutability && global.imported) {
- DCHECK(FLAG_experimental_wasm_mut_global);
+ DCHECK(enabled_features_.mut_global);
global.index = num_imported_mutable_globals++;
} else {
offset = (offset + size - 1) & ~(size - 1); // align
@@ -1044,8 +1048,18 @@ class ModuleDecoderImpl : public Decoder {
function->sig, function->code.offset(),
start_ + GetBufferRelativeOffset(function->code.offset()),
start_ + GetBufferRelativeOffset(function->code.end_offset())};
- DecodeResult result = VerifyWasmCodeWithStats(allocator, module, body,
- origin_, GetCounters());
+
+ DecodeResult result;
+ {
+ auto time_counter = SELECT_WASM_COUNTER(GetCounters(), origin_,
+ wasm_decode, function_time);
+
+ TimedHistogramScope wasm_decode_function_time_scope(time_counter);
+ WasmFeatures unused_detected_features;
+ result = VerifyWasmCode(allocator, enabled_features_, module,
+ &unused_detected_features, body);
+ }
+
if (result.failed()) {
// Wrap the error message from the function decoder.
std::ostringstream wrapped;
@@ -1059,10 +1073,6 @@ class ModuleDecoderImpl : public Decoder {
}
}
- WireBytesRef consume_string(bool validate_utf8, const char* name) {
- return wasm::consume_string(*this, validate_utf8, name);
- }
-
uint32_t consume_sig_index(WasmModule* module, FunctionSig** sig) {
const byte* pos = pc_;
uint32_t sig_index = consume_u32v("signature index");
@@ -1125,7 +1135,7 @@ class ModuleDecoderImpl : public Decoder {
uint8_t flags = consume_u8("resizable limits flags");
const byte* pos = pc();
*has_shared_memory = false;
- if (FLAG_experimental_wasm_threads) {
+ if (enabled_features_.threads) {
if (flags & 0xFC) {
errorf(pos - 1, "invalid memory limits flags");
} else if (flags == 3) {
@@ -1243,7 +1253,7 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kExprRefNull: {
- if (FLAG_experimental_wasm_anyref) {
+ if (enabled_features_.anyref) {
expr.kind = WasmInitExpr::kAnyRefConst;
len = 0;
break;
@@ -1292,13 +1302,13 @@ class ModuleDecoderImpl : public Decoder {
if (origin_ == kWasmOrigin) {
switch (t) {
case kLocalS128:
- if (FLAG_experimental_wasm_simd) return kWasmS128;
+ if (enabled_features_.simd) return kWasmS128;
break;
case kLocalAnyFunc:
- if (FLAG_experimental_wasm_anyref) return kWasmAnyFunc;
+ if (enabled_features_.anyref) return kWasmAnyFunc;
break;
case kLocalAnyRef:
- if (FLAG_experimental_wasm_anyref) return kWasmAnyRef;
+ if (enabled_features_.anyref) return kWasmAnyRef;
break;
default:
break;
@@ -1317,7 +1327,7 @@ class ModuleDecoderImpl : public Decoder {
case kLocalAnyFunc:
return kWasmAnyFunc;
case kLocalAnyRef:
- if (!FLAG_experimental_wasm_anyref) {
+ if (!enabled_features_.anyref) {
error(pc_ - 1,
"Invalid type. Set --experimental-wasm-anyref to use 'AnyRef'");
}
@@ -1356,7 +1366,7 @@ class ModuleDecoderImpl : public Decoder {
uint32_t return_count = 0;
if (has_return_values) {
// parse return types
- const size_t max_return_count = FLAG_experimental_wasm_mv
+ const size_t max_return_count = enabled_features_.mv
? kV8MaxWasmFunctionMultiReturns
: kV8MaxWasmFunctionReturns;
return_count = consume_count("return count", max_return_count);
@@ -1379,9 +1389,11 @@ class ModuleDecoderImpl : public Decoder {
}
};
-ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
- const byte* module_end, bool verify_functions,
- ModuleOrigin origin, Counters* counters) {
+ModuleResult DecodeWasmModule(const WasmFeatures& enabled,
+ const byte* module_start, const byte* module_end,
+ bool verify_functions, ModuleOrigin origin,
+ Counters* counters,
+ AccountingAllocator* allocator) {
auto counter =
SELECT_WASM_COUNTER(counters, origin, wasm_decode, module_time);
TimedHistogramScope wasm_decode_module_time_scope(counter);
@@ -1395,8 +1407,9 @@ ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
size_counter->AddSample(static_cast<int>(size));
// Signatures are stored in zone memory, which have the same lifetime
// as the {module}.
- ModuleDecoderImpl decoder(module_start, module_end, origin);
- ModuleResult result = decoder.DecodeModule(isolate, verify_functions);
+ ModuleDecoderImpl decoder(enabled, module_start, module_end, origin);
+ ModuleResult result =
+ decoder.DecodeModule(counters, allocator, verify_functions);
// TODO(bradnelson): Improve histogram handling of size_t.
// TODO(titzer): this isn't accurate, since it doesn't count the data
// allocated on the C++ heap.
@@ -1410,17 +1423,21 @@ ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
return result;
}
-ModuleDecoder::ModuleDecoder() = default;
+ModuleDecoder::ModuleDecoder(const WasmFeatures& enabled)
+ : enabled_features_(enabled) {}
+
ModuleDecoder::~ModuleDecoder() = default;
const std::shared_ptr<WasmModule>& ModuleDecoder::shared_module() const {
return impl_->shared_module();
}
-void ModuleDecoder::StartDecoding(Isolate* isolate, ModuleOrigin origin) {
+void ModuleDecoder::StartDecoding(Counters* counters,
+ AccountingAllocator* allocator,
+ ModuleOrigin origin) {
DCHECK_NULL(impl_);
- impl_.reset(new ModuleDecoderImpl(origin));
- impl_->StartDecoding(isolate);
+ impl_.reset(new ModuleDecoderImpl(enabled_features_, origin));
+ impl_->StartDecoding(counters, allocator);
}
void ModuleDecoder::DecodeModuleHeader(Vector<const uint8_t> bytes,
@@ -1450,7 +1467,7 @@ ModuleResult ModuleDecoder::FinishDecoding(bool verify_functions) {
SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder& decoder,
const byte* end) {
- WireBytesRef string = wasm::consume_string(decoder, true, "section name");
+ WireBytesRef string = consume_string(decoder, true, "section name");
if (decoder.failed() || decoder.pc() > end) {
return kUnknownSectionCode;
}
@@ -1471,75 +1488,39 @@ SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder& decoder,
bool ModuleDecoder::ok() { return impl_->ok(); }
-ModuleResult SyncDecodeWasmModule(Isolate* isolate, const byte* module_start,
- const byte* module_end, bool verify_functions,
- ModuleOrigin origin) {
- return DecodeWasmModule(isolate, module_start, module_end, verify_functions,
- origin, isolate->counters());
-}
-
-ModuleResult AsyncDecodeWasmModule(
- Isolate* isolate, const byte* module_start, const byte* module_end,
- bool verify_functions, ModuleOrigin origin,
- const std::shared_ptr<Counters> async_counters) {
- return DecodeWasmModule(isolate, module_start, module_end, verify_functions,
- origin, async_counters.get());
-}
-
-FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
+FunctionSig* DecodeWasmSignatureForTesting(const WasmFeatures& enabled,
+ Zone* zone, const byte* start,
const byte* end) {
- ModuleDecoderImpl decoder(start, end, kWasmOrigin);
+ ModuleDecoderImpl decoder(enabled, start, end, kWasmOrigin);
return decoder.DecodeFunctionSignature(zone, start);
}
-WasmInitExpr DecodeWasmInitExprForTesting(const byte* start, const byte* end) {
+WasmInitExpr DecodeWasmInitExprForTesting(const WasmFeatures& enabled,
+ const byte* start, const byte* end) {
AccountingAllocator allocator;
- ModuleDecoderImpl decoder(start, end, kWasmOrigin);
+ ModuleDecoderImpl decoder(enabled, start, end, kWasmOrigin);
return decoder.DecodeInitExpr(start);
}
-namespace {
-
-FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
- const ModuleWireBytes& wire_bytes,
- const WasmModule* module,
- const byte* function_start,
- const byte* function_end,
- Counters* counters) {
+FunctionResult DecodeWasmFunctionForTesting(
+ const WasmFeatures& enabled, Zone* zone, const ModuleWireBytes& wire_bytes,
+ const WasmModule* module, const byte* function_start,
+ const byte* function_end, Counters* counters) {
size_t size = function_end - function_start;
if (function_start > function_end)
return FunctionResult::Error("start > end");
- auto size_histogram =
- SELECT_WASM_COUNTER(counters, module->origin, wasm, function_size_bytes);
+ auto size_histogram = SELECT_WASM_COUNTER(counters, module->origin, wasm,
+ function_size_bytes);
// TODO(bradnelson): Improve histogram handling of ptrdiff_t.
size_histogram->AddSample(static_cast<int>(size));
if (size > kV8MaxWasmFunctionSize)
return FunctionResult::Error("size > maximum function size: %zu", size);
- ModuleDecoderImpl decoder(function_start, function_end, kWasmOrigin);
+ ModuleDecoderImpl decoder(enabled, function_start, function_end, kWasmOrigin);
decoder.SetCounters(counters);
return decoder.DecodeSingleFunction(zone, wire_bytes, module,
base::make_unique<WasmFunction>());
}
-} // namespace
-
-FunctionResult SyncDecodeWasmFunction(Isolate* isolate, Zone* zone,
- const ModuleWireBytes& wire_bytes,
- const WasmModule* module,
- const byte* function_start,
- const byte* function_end) {
- return DecodeWasmFunction(isolate, zone, wire_bytes, module, function_start,
- function_end, isolate->counters());
-}
-
-FunctionResult AsyncDecodeWasmFunction(
- Isolate* isolate, Zone* zone, const ModuleWireBytes& wire_bytes,
- const WasmModule* module, const byte* function_start,
- const byte* function_end, std::shared_ptr<Counters> async_counters) {
- return DecodeWasmFunction(isolate, zone, wire_bytes, module, function_start,
- function_end, async_counters.get());
-}
-
AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* tables_start,
const byte* tables_end) {
AsmJsOffsets table;
@@ -1670,7 +1651,7 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
for (; decoder.ok() && functions_count > 0; --functions_count) {
uint32_t function_index = decoder.consume_u32v("function index");
- WireBytesRef name = wasm::consume_string(decoder, false, "function name");
+ WireBytesRef name = consume_string(decoder, false, "function name");
// Be lenient with errors in the name section: Ignore non-UTF8 names. You
// can even assign to the same function multiple times (last valid one
@@ -1713,7 +1694,7 @@ void DecodeLocalNames(const byte* module_start, const byte* module_end,
uint32_t num_names = decoder.consume_u32v("namings count");
for (uint32_t k = 0; k < num_names; ++k) {
uint32_t local_index = decoder.consume_u32v("local index");
- WireBytesRef name = wasm::consume_string(decoder, true, "local name");
+ WireBytesRef name = consume_string(decoder, true, "local name");
if (!decoder.ok()) break;
if (local_index > kMaxInt) continue;
func_names.max_local_index =
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index dc6d4c4ae0..f190a12844 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -8,11 +8,15 @@
#include "src/globals.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-constants.h"
+#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-result.h"
namespace v8 {
namespace internal {
+
+class Counters;
+
namespace wasm {
struct ModuleEnv;
@@ -55,36 +59,26 @@ struct LocalNames {
};
// Decodes the bytes of a wasm module between {module_start} and {module_end}.
-V8_EXPORT_PRIVATE ModuleResult SyncDecodeWasmModule(Isolate* isolate,
- const byte* module_start,
- const byte* module_end,
- bool verify_functions,
- ModuleOrigin origin);
-
-V8_EXPORT_PRIVATE ModuleResult AsyncDecodeWasmModule(
- Isolate* isolate, const byte* module_start, const byte* module_end,
- bool verify_functions, ModuleOrigin origin,
- const std::shared_ptr<Counters> async_counters);
+V8_EXPORT_PRIVATE ModuleResult DecodeWasmModule(
+ const WasmFeatures& enabled, const byte* module_start,
+ const byte* module_end, bool verify_functions, ModuleOrigin origin,
+ Counters* counters, AccountingAllocator* allocator);
// Exposed for testing. Decodes a single function signature, allocating it
// in the given zone. Returns {nullptr} upon failure.
-V8_EXPORT_PRIVATE FunctionSig* DecodeWasmSignatureForTesting(Zone* zone,
- const byte* start,
- const byte* end);
+V8_EXPORT_PRIVATE FunctionSig* DecodeWasmSignatureForTesting(
+ const WasmFeatures& enabled, Zone* zone, const byte* start,
+ const byte* end);
// Decodes the bytes of a wasm function between
// {function_start} and {function_end}.
-V8_EXPORT_PRIVATE FunctionResult SyncDecodeWasmFunction(
- Isolate* isolate, Zone* zone, const ModuleWireBytes& wire_bytes,
+V8_EXPORT_PRIVATE FunctionResult DecodeWasmFunctionForTesting(
+ const WasmFeatures& enabled, Zone* zone, const ModuleWireBytes& wire_bytes,
const WasmModule* module, const byte* function_start,
- const byte* function_end);
-
-V8_EXPORT_PRIVATE FunctionResult AsyncDecodeWasmFunction(
- Isolate* isolate, Zone* zone, ModuleEnv* env, const byte* function_start,
- const byte* function_end, const std::shared_ptr<Counters> async_counters);
+ const byte* function_end, Counters* counters);
-V8_EXPORT_PRIVATE WasmInitExpr DecodeWasmInitExprForTesting(const byte* start,
- const byte* end);
+V8_EXPORT_PRIVATE WasmInitExpr DecodeWasmInitExprForTesting(
+ const WasmFeatures& enabled, const byte* start, const byte* end);
struct CustomSectionOffset {
WireBytesRef section;
@@ -120,10 +114,10 @@ class ModuleDecoderImpl;
class ModuleDecoder {
public:
- ModuleDecoder();
+ explicit ModuleDecoder(const WasmFeatures& enabled);
~ModuleDecoder();
- void StartDecoding(Isolate* isolate,
+ void StartDecoding(Counters* counters, AccountingAllocator* allocator,
ModuleOrigin origin = ModuleOrigin::kWasmOrigin);
void DecodeModuleHeader(Vector<const uint8_t> bytes, uint32_t offset);
@@ -154,6 +148,7 @@ class ModuleDecoder {
static SectionCode IdentifyUnknownSection(Decoder& decoder, const byte* end);
private:
+ const WasmFeatures enabled_features_;
std::unique_ptr<ModuleDecoderImpl> impl_;
};
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index 07b425aad0..15ced2316b 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -83,7 +83,10 @@ void StreamingDecoder::Finish() {
void StreamingDecoder::Abort() {
TRACE_STREAMING("Abort\n");
- if (ok()) processor_->OnAbort();
+ if (ok()) {
+ ok_ = false;
+ processor_->OnAbort();
+ }
}
// An abstract class to share code among the states which decode VarInts. This
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 676d14e1f7..c2c425a44e 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -117,7 +117,7 @@ void WasmCode::set_trap_handler_index(size_t value) {
void WasmCode::RegisterTrapHandlerData() {
DCHECK(!HasTrapHandlerIndex());
- if (kind() != wasm::WasmCode::kFunction) return;
+ if (kind() != WasmCode::kFunction) return;
Address base = instruction_start();
@@ -199,7 +199,6 @@ void WasmCode::Validate() const {
}
case RelocInfo::JS_TO_WASM_CALL:
case RelocInfo::EXTERNAL_REFERENCE:
- case RelocInfo::OFF_HEAP_TARGET:
case RelocInfo::COMMENT:
case RelocInfo::CONST_POOL:
case RelocInfo::VENEER_POOL:
@@ -236,6 +235,9 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
if (safepoint_table_offset_ && safepoint_table_offset_ < instruction_size) {
instruction_size = safepoint_table_offset_;
}
+ if (handler_table_offset_ && handler_table_offset_ < instruction_size) {
+ instruction_size = handler_table_offset_;
+ }
DCHECK_LT(0, instruction_size);
os << "Instructions (size = " << instruction_size << ")\n";
Disassembler::Decode(nullptr, &os, instructions().start(),
@@ -243,6 +245,23 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
CodeReference(this), current_pc);
os << "\n";
+ if (handler_table_offset_ > 0) {
+ HandlerTable table(instruction_start(), handler_table_offset_);
+ os << "Exception Handler Table (size = " << table.NumberOfReturnEntries()
+ << "):\n";
+ table.HandlerTableReturnPrint(os);
+ os << "\n";
+ }
+
+ if (!protected_instructions_.is_empty()) {
+ os << "Protected instructions:\n pc offset land pad\n";
+ for (auto& data : protected_instructions()) {
+ os << std::setw(10) << std::hex << data.instr_offset << std::setw(10)
+ << std::hex << data.landing_offset << "\n";
+ }
+ os << "\n";
+ }
+
if (!source_positions().is_empty()) {
os << "Source positions:\n pc offset position\n";
for (SourcePositionTableIterator it(source_positions()); !it.done();
@@ -289,12 +308,13 @@ WasmCode::~WasmCode() {
}
}
-NativeModule::NativeModule(Isolate* isolate, bool can_request_more,
- VirtualMemory* code_space,
+NativeModule::NativeModule(Isolate* isolate, const WasmFeatures& enabled,
+ bool can_request_more, VirtualMemory* code_space,
WasmCodeManager* code_manager,
std::shared_ptr<const WasmModule> module,
const ModuleEnv& env)
- : module_(std::move(module)),
+ : enabled_features_(enabled),
+ module_(std::move(module)),
compilation_state_(NewCompilationState(isolate, env)),
free_code_space_({code_space->address(), code_space->end()}),
wasm_code_manager_(code_manager),
@@ -329,11 +349,11 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
}
void NativeModule::LogWasmCodes(Isolate* isolate) {
- if (!wasm::WasmCode::ShouldBeLogged(isolate)) return;
+ if (!WasmCode::ShouldBeLogged(isolate)) return;
// TODO(titzer): we skip the logging of the import wrappers
// here, but they should be included somehow.
- for (wasm::WasmCode* code : code_table()) {
+ for (WasmCode* code : code_table()) {
if (code != nullptr) code->LogCode(isolate);
}
}
@@ -398,7 +418,9 @@ WasmCode* NativeModule::AddCodeCopy(Handle<Code> code, WasmCode::Kind kind,
WasmCode* NativeModule::AddInterpreterEntry(Handle<Code> code, uint32_t index) {
WasmCode* ret = AddAnonymousCode(code, WasmCode::kInterpreterEntry);
ret->index_ = Just(index);
+ base::LockGuard<base::Mutex> lock(&allocation_mutex_);
PatchJumpTable(index, ret->instruction_start(), WasmCode::kFlushICache);
+ set_code(index, ret);
return ret;
}
@@ -408,16 +430,11 @@ void NativeModule::SetLazyBuiltin(Handle<Code> code) {
WasmCode* lazy_builtin = AddAnonymousCode(code, WasmCode::kLazyStub);
// Fill the jump table with jumps to the lazy compile stub.
Address lazy_compile_target = lazy_builtin->instruction_start();
- JumpTableAssembler jtasm(
- jump_table_->instruction_start(),
- static_cast<int>(jump_table_->instructions().size()) + 256);
for (uint32_t i = 0; i < num_wasm_functions; ++i) {
- // Check that the offset in the jump table increases as expected.
- DCHECK_EQ(i * JumpTableAssembler::kJumpTableSlotSize, jtasm.pc_offset());
- jtasm.EmitLazyCompileJumpSlot(i + module_->num_imported_functions,
- lazy_compile_target);
- jtasm.NopBytes((i + 1) * JumpTableAssembler::kJumpTableSlotSize -
- jtasm.pc_offset());
+ JumpTableAssembler::EmitLazyCompileJumpSlot(
+ jump_table_->instruction_start(), i,
+ i + module_->num_imported_functions, lazy_compile_target,
+ WasmCode::kNoFlushICache);
}
Assembler::FlushICache(jump_table_->instructions().start(),
jump_table_->instructions().size());
@@ -437,9 +454,13 @@ void NativeModule::SetRuntimeStubs(Isolate* isolate) {
WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
WasmCode::Kind kind) {
- OwnedVector<byte> reloc_info =
- OwnedVector<byte>::New(code->relocation_size());
- memcpy(reloc_info.start(), code->relocation_start(), code->relocation_size());
+ // For off-heap builtins, we create a copy of the off-heap instruction stream
+ // instead of the on-heap code object containing the trampoline. Ensure that
+ // we do not apply the on-heap reloc info to the off-heap instructions.
+ const size_t relocation_size =
+ code->is_off_heap_trampoline() ? 0 : code->relocation_size();
+ OwnedVector<byte> reloc_info = OwnedVector<byte>::New(relocation_size);
+ memcpy(reloc_info.start(), code->relocation_start(), relocation_size);
Handle<ByteArray> source_pos_table(code->SourcePositionTable(),
code->GetIsolate());
OwnedVector<byte> source_pos =
@@ -567,9 +588,13 @@ WasmCode* NativeModule::AddDeserializedCode(
}
void NativeModule::PublishCode(WasmCode* code) {
- // TODO(clemensh): Remove the need for locking here. Probably requires
- // word-aligning the jump table slots.
base::LockGuard<base::Mutex> lock(&allocation_mutex_);
+ // Skip publishing code if there is an active redirection to the interpreter
+ // for the given function index, in order to preserve the redirection.
+ if (has_code(code->index()) &&
+ this->code(code->index())->kind() == WasmCode::kInterpreterEntry) {
+ return;
+ }
if (!code->protected_instructions_.is_empty()) {
code->RegisterTrapHandlerData();
}
@@ -579,11 +604,19 @@ void NativeModule::PublishCode(WasmCode* code) {
WasmCode::kFlushICache);
}
+std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
+ base::LockGuard<base::Mutex> lock(&allocation_mutex_);
+ std::vector<WasmCode*> result;
+ result.reserve(code_table().size());
+ for (WasmCode* code : code_table()) result.push_back(code);
+ return result;
+}
+
WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t num_wasm_functions) {
// Only call this if we really need a jump table.
DCHECK_LT(0, num_wasm_functions);
OwnedVector<byte> instructions = OwnedVector<byte>::New(
- num_wasm_functions * JumpTableAssembler::kJumpTableSlotSize);
+ JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
memset(instructions.start(), 0, instructions.size());
return AddOwnedCode(Nothing<uint32_t>(), // index
instructions.as_vector(), // instructions
@@ -602,9 +635,8 @@ void NativeModule::PatchJumpTable(uint32_t func_index, Address target,
WasmCode::FlushICache flush_icache) {
DCHECK_LE(module_->num_imported_functions, func_index);
uint32_t slot_idx = func_index - module_->num_imported_functions;
- Address jump_table_slot = jump_table_->instruction_start() +
- slot_idx * JumpTableAssembler::kJumpTableSlotSize;
- JumpTableAssembler::PatchJumpTableSlot(jump_table_slot, target, flush_icache);
+ JumpTableAssembler::PatchJumpTableSlot(jump_table_->instruction_start(),
+ slot_idx, target, flush_icache);
}
Address NativeModule::AllocateForCode(size_t size) {
@@ -622,6 +654,8 @@ Address NativeModule::AllocateForCode(size_t size) {
wasm_code_manager_->TryAllocate(size, &new_mem,
reinterpret_cast<void*>(hint));
if (!new_mem.IsReserved()) return kNullAddress;
+ base::LockGuard<base::Mutex> lock(
+ &wasm_code_manager_->native_modules_mutex_);
wasm_code_manager_->AssignRanges(new_mem.address(), new_mem.end(), this);
free_code_space_.Merge({new_mem.address(), new_mem.end()});
@@ -655,7 +689,7 @@ Address NativeModule::AllocateForCode(size_t size) {
if (!wasm_code_manager_->Commit(start, commit_size)) {
return kNullAddress;
}
- committed_code_space_ += commit_size;
+ committed_code_space_.fetch_add(commit_size);
commit_end = start;
}
#else
@@ -664,7 +698,7 @@ Address NativeModule::AllocateForCode(size_t size) {
if (!wasm_code_manager_->Commit(commit_start, commit_size)) {
return kNullAddress;
}
- committed_code_space_ += commit_size;
+ committed_code_space_.fetch_add(commit_size);
#endif
}
DCHECK(IsAligned(mem.start, kCodeAlignment));
@@ -693,18 +727,17 @@ Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
// Return the jump table slot for that function index.
DCHECK_NOT_NULL(jump_table_);
uint32_t slot_idx = func_index - module_->num_imported_functions;
- DCHECK_LT(slot_idx, jump_table_->instructions().size() /
- JumpTableAssembler::kJumpTableSlotSize);
- return jump_table_->instruction_start() +
- slot_idx * JumpTableAssembler::kJumpTableSlotSize;
+ uint32_t slot_offset = JumpTableAssembler::SlotIndexToOffset(slot_idx);
+ DCHECK_LT(slot_offset, jump_table_->instructions().size());
+ return jump_table_->instruction_start() + slot_offset;
}
uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
Address slot_address) const {
DCHECK(is_jump_table_slot(slot_address));
- uint32_t offset =
+ uint32_t slot_offset =
static_cast<uint32_t>(slot_address - jump_table_->instruction_start());
- uint32_t slot_idx = offset / JumpTableAssembler::kJumpTableSlotSize;
+ uint32_t slot_idx = JumpTableAssembler::SlotOffsetToIndex(slot_offset);
DCHECK_LT(slot_idx, module_->num_declared_functions);
return module_->num_imported_functions + slot_idx;
}
@@ -729,9 +762,11 @@ NativeModule::~NativeModule() {
wasm_code_manager_->FreeNativeModule(this);
}
-WasmCodeManager::WasmCodeManager(size_t max_committed) {
+WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker,
+ size_t max_committed)
+ : memory_tracker_(memory_tracker),
+ remaining_uncommitted_code_space_(max_committed) {
DCHECK_LE(max_committed, kMaxWasmCodeMemory);
- remaining_uncommitted_code_space_.store(max_committed);
}
bool WasmCodeManager::Commit(Address start, size_t size) {
@@ -773,17 +808,45 @@ void WasmCodeManager::AssignRanges(Address start, Address end,
void WasmCodeManager::TryAllocate(size_t size, VirtualMemory* ret, void* hint) {
DCHECK_GT(size, 0);
size = RoundUp(size, AllocatePageSize());
+ DCHECK(!ret->IsReserved());
+ if (!memory_tracker_->ReserveAddressSpace(size)) return;
if (hint == nullptr) hint = GetRandomMmapAddr();
if (!AlignedAllocVirtualMemory(size, static_cast<size_t>(AllocatePageSize()),
hint, ret)) {
DCHECK(!ret->IsReserved());
+ memory_tracker_->ReleaseReservation(size);
}
TRACE_HEAP("VMem alloc: %p:%p (%zu)\n",
reinterpret_cast<void*>(ret->address()),
reinterpret_cast<void*>(ret->end()), ret->size());
}
+void WasmCodeManager::SampleModuleSizes(Isolate* isolate) const {
+ base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
+ for (NativeModule* native_module : native_modules_) {
+ int code_size =
+ static_cast<int>(native_module->committed_code_space_.load() / MB);
+ isolate->counters()->wasm_module_code_size_mb()->AddSample(code_size);
+ }
+}
+
+namespace {
+
+void ModuleSamplingCallback(v8::Isolate* v8_isolate, v8::GCType type,
+ v8::GCCallbackFlags flags, void* data) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ isolate->wasm_engine()->code_manager()->SampleModuleSizes(isolate);
+}
+
+} // namespace
+
+// static
+void WasmCodeManager::InstallSamplingGCCallback(Isolate* isolate) {
+ isolate->heap()->AddGCEpilogueCallback(ModuleSamplingCallback,
+ v8::kGCTypeMarkSweepCompact, nullptr);
+}
+
// static
size_t WasmCodeManager::EstimateNativeModuleSize(const WasmModule* module) {
constexpr size_t kCodeSizeMultiplier = 4;
@@ -797,8 +860,7 @@ size_t WasmCodeManager::EstimateNativeModuleSize(const WasmModule* module) {
(sizeof(WasmCode*) * num_wasm_functions /* code table size */) +
(sizeof(WasmCode) * num_wasm_functions /* code object size */) +
(kImportSize * module->num_imported_functions /* import size */) +
- (JumpTableAssembler::kJumpTableSlotSize *
- num_wasm_functions /* jump table size */);
+ (JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
for (auto& function : module->functions) {
estimate += kCodeSizeMultiplier * function.code.length();
@@ -807,51 +869,65 @@ size_t WasmCodeManager::EstimateNativeModuleSize(const WasmModule* module) {
return estimate;
}
-std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
- Isolate* isolate, size_t memory_estimate, bool can_request_more,
- std::shared_ptr<const WasmModule> module, const ModuleEnv& env) {
+bool WasmCodeManager::ShouldForceCriticalMemoryPressureNotification() {
+ base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
// TODO(titzer): we force a critical memory pressure notification
// when the code space is almost exhausted, but only upon the next module
// creation. This is only for one isolate, and it should really do this for
// all isolates, at the point of commit.
constexpr size_t kCriticalThreshold = 32 * 1024 * 1024;
- bool force_critical_notification =
- (active_ > 1) &&
- (remaining_uncommitted_code_space_.load() < kCriticalThreshold);
+ return native_modules_.size() > 1 &&
+ remaining_uncommitted_code_space_.load() < kCriticalThreshold;
+}
- if (force_critical_notification) {
+std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
+ Isolate* isolate, const WasmFeatures& enabled, size_t memory_estimate,
+ bool can_request_more, std::shared_ptr<const WasmModule> module,
+ const ModuleEnv& env) {
+ if (ShouldForceCriticalMemoryPressureNotification()) {
(reinterpret_cast<v8::Isolate*>(isolate))
->MemoryPressureNotification(MemoryPressureLevel::kCritical);
}
- VirtualMemory mem;
// If the code must be contiguous, reserve enough address space up front.
size_t vmem_size = kRequiresCodeRange ? kMaxWasmCodeMemory : memory_estimate;
- TryAllocate(vmem_size, &mem);
- if (mem.IsReserved()) {
- Address start = mem.address();
- size_t size = mem.size();
- Address end = mem.end();
- std::unique_ptr<NativeModule> ret(new NativeModule(
- isolate, can_request_more, &mem, this, std::move(module), env));
- TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", this, start,
- size);
- AssignRanges(start, end, ret.get());
- ++active_;
- return ret;
+ // Try up to three times; getting rid of dead JSArrayBuffer allocations might
+ // require two GCs because the first GC maybe incremental and may have
+ // floating garbage.
+ static constexpr int kAllocationRetries = 2;
+ VirtualMemory mem;
+ for (int retries = 0;; ++retries) {
+ TryAllocate(vmem_size, &mem);
+ if (mem.IsReserved()) break;
+ if (retries == kAllocationRetries) {
+ V8::FatalProcessOutOfMemory(isolate, "WasmCodeManager::NewNativeModule");
+ UNREACHABLE();
+ }
+ // Run one GC, then try the allocation again.
+ isolate->heap()->MemoryPressureNotification(MemoryPressureLevel::kCritical,
+ true);
}
- V8::FatalProcessOutOfMemory(isolate, "WasmCodeManager::NewNativeModule");
- return nullptr;
+ Address start = mem.address();
+ size_t size = mem.size();
+ Address end = mem.end();
+ std::unique_ptr<NativeModule> ret(new NativeModule(
+ isolate, enabled, can_request_more, &mem, this, std::move(module), env));
+ TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", this, start,
+ size);
+ base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
+ AssignRanges(start, end, ret.get());
+ native_modules_.emplace(ret.get());
+ return ret;
}
bool NativeModule::SetExecutable(bool executable) {
if (is_executable_ == executable) return true;
TRACE_HEAP("Setting module %p as executable: %d.\n", this, executable);
- PageAllocator::Permission permission =
- executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
if (FLAG_wasm_write_protect_code_memory) {
+ PageAllocator::Permission permission =
+ executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
#if V8_OS_WIN
// On windows, we need to switch permissions per separate virtual memory
// reservation. This is really just a problem when the NativeModule is
@@ -890,8 +966,9 @@ bool NativeModule::SetExecutable(bool executable) {
}
void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
- DCHECK_GE(active_, 1);
- --active_;
+ base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
+ DCHECK_EQ(1, native_modules_.count(native_module));
+ native_modules_.erase(native_module);
TRACE_HEAP("Freeing NativeModule %p\n", this);
for (auto& vmem : native_module->owned_code_space_) {
lookup_map_.erase(vmem.address());
@@ -900,13 +977,8 @@ void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
}
native_module->owned_code_space_.clear();
- size_t code_size = native_module->committed_code_space_;
+ size_t code_size = native_module->committed_code_space_.load();
DCHECK(IsAligned(code_size, AllocatePageSize()));
-
- if (module_code_size_mb_) {
- module_code_size_mb_->AddSample(static_cast<int>(code_size / MB));
- }
-
remaining_uncommitted_code_space_.fetch_add(code_size);
}
@@ -924,6 +996,7 @@ WasmCode* WasmCodeManager::GetCodeFromStartAddress(Address pc) const {
}
NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
+ base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
if (lookup_map_.empty()) return nullptr;
auto iter = lookup_map_.upper_bound(pc);
@@ -948,6 +1021,7 @@ void WasmCodeManager::Free(VirtualMemory* mem) {
void* end = reinterpret_cast<void*>(mem->end());
size_t size = mem->size();
mem->Free();
+ memory_tracker_->ReleaseReservation(size);
TRACE_HEAP("VMem Release: %p:%p (%zu)\n", start, end, size);
}
@@ -955,17 +1029,21 @@ size_t WasmCodeManager::remaining_uncommitted_code_space() const {
return remaining_uncommitted_code_space_.load();
}
+// TODO(v8:7424): Code protection scopes are not yet supported with shared code
+// enabled and need to be revisited to work with --wasm-shared-code as well.
NativeModuleModificationScope::NativeModuleModificationScope(
NativeModule* native_module)
: native_module_(native_module) {
- if (native_module_ && (native_module_->modification_scope_depth_++) == 0) {
+ if (FLAG_wasm_write_protect_code_memory && native_module_ &&
+ (native_module_->modification_scope_depth_++) == 0) {
bool success = native_module_->SetExecutable(false);
CHECK(success);
}
}
NativeModuleModificationScope::~NativeModuleModificationScope() {
- if (native_module_ && (native_module_->modification_scope_depth_--) == 1) {
+ if (FLAG_wasm_write_protect_code_memory && native_module_ &&
+ (native_module_->modification_scope_depth_--) == 1) {
bool success = native_module_->SetExecutable(true);
CHECK(success);
}
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 7d01aa513d..ffcc05fbcd 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -9,38 +9,29 @@
#include <list>
#include <map>
#include <unordered_map>
+#include <unordered_set>
#include "src/base/macros.h"
+#include "src/builtins/builtins-definitions.h"
#include "src/handles.h"
#include "src/trap-handler/trap-handler.h"
#include "src/vector.h"
#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-features.h"
namespace v8 {
namespace internal {
struct CodeDesc;
class Code;
-class Histogram;
namespace wasm {
class NativeModule;
class WasmCodeManager;
+class WasmMemoryTracker;
struct WasmModule;
-// Convenience macro listing all wasm runtime stubs. Note that the first few
-// elements of the list coincide with {compiler::TrapId}, order matters.
-#define WASM_RUNTIME_STUB_LIST(V, VTRAP) \
- FOREACH_WASM_TRAPREASON(VTRAP) \
- V(WasmAllocateHeapNumber) \
- V(WasmArgumentsAdaptor) \
- V(WasmCallJavaScript) \
- V(WasmGrowMemory) \
- V(WasmStackGuard) \
- V(WasmToNumber) \
- V(DoubleToI)
-
struct AddressRange {
Address start;
Address end;
@@ -269,6 +260,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
// threads executing the old code.
void PublishCode(WasmCode* code);
+ // Creates a snapshot of the current state of the code table. This is useful
+ // to get a consistent view of the table (e.g. used by the serializer).
+ std::vector<WasmCode*> SnapshotCodeTable() const;
+
WasmCode* code(uint32_t index) const {
DCHECK_LT(index, num_functions());
DCHECK_LE(module_->num_imported_functions, index);
@@ -288,12 +283,15 @@ class V8_EXPORT_PRIVATE NativeModule final {
return jump_table_ ? jump_table_->instruction_start() : kNullAddress;
}
+ ptrdiff_t jump_table_offset(uint32_t func_index) const {
+ DCHECK_GE(func_index, num_imported_functions());
+ return GetCallTargetForFunction(func_index) - jump_table_start();
+ }
+
bool is_jump_table_slot(Address address) const {
return jump_table_->contains(address);
}
- uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
-
// Transition this module from code relying on trap handlers (i.e. without
// explicit memory bounds checks) to code that does not require trap handlers
// (i.e. code with explicit bounds checks).
@@ -306,6 +304,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
// slot within {jump_table_}).
Address GetCallTargetForFunction(uint32_t func_index) const;
+ // Reverse lookup from a given call target (i.e. a jump table slot as the
+ // above {GetCallTargetForFunction} returns) to a function index.
+ uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
+
bool SetExecutable(bool executable);
// For cctests, where we build both WasmModule and the runtime objects
@@ -322,9 +324,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
uint32_t num_imported_functions() const {
return module_->num_imported_functions;
}
- Vector<WasmCode*> code_table() const {
- return {code_table_.get(), module_->num_declared_functions};
- }
bool use_trap_handler() const { return use_trap_handler_; }
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
@@ -333,18 +332,22 @@ class V8_EXPORT_PRIVATE NativeModule final {
wire_bytes_ = std::move(wire_bytes);
}
const WasmModule* module() const { return module_.get(); }
+ WasmCodeManager* code_manager() const { return wasm_code_manager_; }
WasmCode* Lookup(Address) const;
~NativeModule();
+ const WasmFeatures& enabled_features() const { return enabled_features_; }
+
private:
friend class WasmCode;
friend class WasmCodeManager;
friend class NativeModuleModificationScope;
- NativeModule(Isolate* isolate, bool can_request_more,
- VirtualMemory* code_space, WasmCodeManager* code_manager,
+ NativeModule(Isolate* isolate, const WasmFeatures& enabled_features,
+ bool can_request_more, VirtualMemory* code_space,
+ WasmCodeManager* code_manager,
std::shared_ptr<const WasmModule> module, const ModuleEnv& env);
WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind);
@@ -368,6 +371,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
void PatchJumpTable(uint32_t func_index, Address target,
WasmCode::FlushICache);
+ Vector<WasmCode*> code_table() const {
+ return {code_table_.get(), module_->num_declared_functions};
+ }
void set_code(uint32_t index, WasmCode* code) {
DCHECK_LT(index, num_functions());
DCHECK_LE(module_->num_imported_functions, index);
@@ -375,6 +381,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
code_table_[index - module_->num_imported_functions] = code;
}
+ // Features enabled for this module. We keep a copy of the features that
+ // were enabled at the time of the creation of this native module,
+ // to be consistent across asynchronous compilations later.
+ const WasmFeatures enabled_features_;
+
// TODO(clemensh): Make this a unique_ptr (requires refactoring
// AsyncCompileJob).
std::shared_ptr<const WasmModule> module_;
@@ -405,7 +416,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::list<VirtualMemory> owned_code_space_;
WasmCodeManager* wasm_code_manager_;
- size_t committed_code_space_ = 0;
+ std::atomic<size_t> committed_code_space_{0};
int modification_scope_depth_ = 0;
bool can_request_more_memory_;
bool use_trap_handler_ = false;
@@ -417,7 +428,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
class V8_EXPORT_PRIVATE WasmCodeManager final {
public:
- explicit WasmCodeManager(size_t max_committed);
+ explicit WasmCodeManager(WasmMemoryTracker* memory_tracker,
+ size_t max_committed);
// Create a new NativeModule. The caller is responsible for its
// lifetime. The native module will be given some memory for code,
// which will be page size aligned. The size of the initial memory
@@ -425,7 +437,8 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// code. The native module may later request more memory.
// TODO(titzer): isolate is only required here for CompilationState.
std::unique_ptr<NativeModule> NewNativeModule(
- Isolate* isolate, size_t memory_estimate, bool can_request_more,
+ Isolate* isolate, const WasmFeatures& enabled_features,
+ size_t memory_estimate, bool can_request_more,
std::shared_ptr<const WasmModule> module, const ModuleEnv& env);
NativeModule* LookupNativeModule(Address pc) const;
@@ -433,9 +446,14 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
WasmCode* GetCodeFromStartAddress(Address pc) const;
size_t remaining_uncommitted_code_space() const;
- void SetModuleCodeSizeHistogram(Histogram* histogram) {
- module_code_size_mb_ = histogram;
- }
+ // Add a sample of all module sizes.
+ void SampleModuleSizes(Isolate* isolate) const;
+
+ // TODO(v8:7424): For now we sample module sizes in a GC callback. This will
+ // bias samples towards apps with high memory pressure. We should switch to
+ // using sampling based on regular intervals independent of the GC.
+ static void InstallSamplingGCCallback(Isolate* isolate);
+
static size_t EstimateNativeModuleSize(const WasmModule* module);
private:
@@ -450,16 +468,14 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
void FreeNativeModule(NativeModule*);
void Free(VirtualMemory* mem);
void AssignRanges(Address start, Address end, NativeModule*);
+ bool ShouldForceCriticalMemoryPressureNotification();
+ WasmMemoryTracker* const memory_tracker_;
+ mutable base::Mutex native_modules_mutex_;
std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
- // Count of NativeModules not yet collected. Helps determine if it's
- // worth requesting a GC on memory pressure.
- size_t active_ = 0;
+ std::unordered_set<NativeModule*> native_modules_;
std::atomic<size_t> remaining_uncommitted_code_space_;
- // Histogram to update with the maximum used code space for each NativeModule.
- Histogram* module_code_size_mb_ = nullptr;
-
DISALLOW_COPY_AND_ASSIGN(WasmCodeManager);
};
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index 0233ced6ac..70794fc7ab 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -5,6 +5,9 @@
#ifndef V8_WASM_WASM_CONSTANTS_H_
#define V8_WASM_WASM_CONSTANTS_H_
+#include <cstddef>
+#include <cstdint>
+
namespace v8 {
namespace internal {
namespace wasm {
@@ -70,7 +73,8 @@ enum SectionCode : int8_t {
// Binary encoding of name section kinds.
enum NameSectionKindCode : uint8_t { kModule = 0, kFunction = 1, kLocal = 2 };
-constexpr uint32_t kWasmPageSize = 0x10000;
+constexpr size_t kWasmPageSize = 0x10000;
+constexpr uint32_t kWasmPageSizeLog2 = 16;
constexpr int kInvalidExceptionTag = -1;
// TODO(wasm): Wrap WasmCodePosition in a struct.
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index b1f57fa8f8..0d8b1f18aa 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -72,8 +72,7 @@ MaybeHandle<String> GetLocalName(Isolate* isolate,
if (!debug_info->has_locals_names()) {
Handle<WasmModuleObject> module_object(
debug_info->wasm_instance()->module_object(), isolate);
- Handle<FixedArray> locals_names =
- wasm::DecodeLocalNames(isolate, module_object);
+ Handle<FixedArray> locals_names = DecodeLocalNames(isolate, module_object);
debug_info->set_locals_names(*locals_names);
}
@@ -290,7 +289,7 @@ class InterpreterHandle {
Handle<WasmInstanceObject> instance_obj(frame->wasm_instance(), isolate_);
// Check that this is indeed the instance which is connected to this
// interpreter.
- DCHECK_EQ(this, Managed<wasm::InterpreterHandle>::cast(
+ DCHECK_EQ(this, Managed<InterpreterHandle>::cast(
instance_obj->debug_info()->interpreter_handle())
->raw());
return instance_obj;
@@ -299,8 +298,6 @@ class InterpreterHandle {
void NotifyDebugEventListeners(WasmInterpreter::Thread* thread) {
// Enter the debugger.
DebugScope debug_scope(isolate_->debug());
- // Postpone interrupt during breakpoint processing.
- PostponeInterruptsScope postpone(isolate_);
// Check whether we hit a breakpoint.
if (isolate_->debug()->break_points_active()) {
@@ -406,7 +403,7 @@ class InterpreterHandle {
return interpreter()->GetThread(0)->NumInterpretedCalls();
}
- Handle<JSObject> GetGlobalScopeObject(wasm::InterpretedFrame* frame,
+ Handle<JSObject> GetGlobalScopeObject(InterpretedFrame* frame,
Handle<WasmDebugInfo> debug_info) {
Isolate* isolate = isolate_;
Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
@@ -430,7 +427,7 @@ class InterpreterHandle {
return global_scope_object;
}
- Handle<JSObject> GetLocalScopeObject(wasm::InterpretedFrame* frame,
+ Handle<JSObject> GetLocalScopeObject(InterpretedFrame* frame,
Handle<WasmDebugInfo> debug_info) {
Isolate* isolate = isolate_;
@@ -531,7 +528,12 @@ wasm::InterpreterHandle* GetOrCreateInterpreterHandle(
Isolate* isolate, Handle<WasmDebugInfo> debug_info) {
Handle<Object> handle(debug_info->interpreter_handle(), isolate);
if (handle->IsUndefined(isolate)) {
- size_t interpreter_size = 0; // TODO(titzer): estimate size properly.
+ // Use the maximum stack size to estimate the maximum size of the
+ // interpreter. The interpreter keeps its own stack internally, and the size
+ // of the stack should dominate the overall size of the interpreter. We
+ // multiply by '2' to account for the growing strategy for the backing store
+ // of the stack.
+ size_t interpreter_size = FLAG_stack_size * KB * 2;
handle = Managed<wasm::InterpreterHandle>::Allocate(
isolate, interpreter_size, isolate, *debug_info);
debug_info->set_interpreter_handle(*handle);
@@ -582,7 +584,11 @@ wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting(
Handle<WasmInstanceObject> instance_obj) {
Handle<WasmDebugInfo> debug_info = WasmDebugInfo::New(instance_obj);
Isolate* isolate = instance_obj->GetIsolate();
- size_t interpreter_size = 0; // TODO(titzer): estimate size properly.
+ // Use the maximum stack size to estimate the maximum size of the interpreter.
+ // The interpreter keeps its own stack internally, and the size of the stack
+ // should dominate the overall size of the interpreter. We multiply by '2' to
+ // account for the growing strategy for the backing store of the stack.
+ size_t interpreter_size = FLAG_stack_size * KB * 2;
auto interp_handle = Managed<wasm::InterpreterHandle>::Allocate(
isolate, interpreter_size, isolate, *debug_info);
debug_info->set_interpreter_handle(*interp_handle);
@@ -637,13 +643,16 @@ void WasmDebugInfo::PrepareStep(StepAction step_action) {
GetInterpreterHandle(this)->PrepareStep(step_action);
}
-bool WasmDebugInfo::RunInterpreter(Address frame_pointer, int func_index,
+// static
+bool WasmDebugInfo::RunInterpreter(Isolate* isolate,
+ Handle<WasmDebugInfo> debug_info,
+ Address frame_pointer, int func_index,
Address arg_buffer) {
DCHECK_LE(0, func_index);
- Handle<WasmInstanceObject> instance(wasm_instance(),
- wasm_instance()->GetIsolate());
- return GetInterpreterHandle(this)->Execute(
- instance, frame_pointer, static_cast<uint32_t>(func_index), arg_buffer);
+ auto* handle = GetOrCreateInterpreterHandle(isolate, debug_info);
+ Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
+ return handle->Execute(instance, frame_pointer,
+ static_cast<uint32_t>(func_index), arg_buffer);
}
std::vector<std::pair<uint32_t, int>> WasmDebugInfo::GetInterpretedStack(
@@ -719,6 +728,7 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
WASM_EXPORTED_FUNCTION_DATA_TYPE, TENURED));
function_data->set_wrapper_code(*new_entry_code);
function_data->set_instance(debug_info->wasm_instance());
+ function_data->set_jump_table_offset(-1);
function_data->set_function_index(-1);
Handle<String> name = isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("c-wasm-entry"));
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 8367c07cd7..4f772d9bdd 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -8,25 +8,31 @@
#include "src/compilation-statistics.h"
#include "src/objects-inl.h"
#include "src/objects/js-promise.h"
+#include "src/wasm/function-compiler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/streaming-decoder.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
namespace internal {
namespace wasm {
-WasmEngine::WasmEngine(std::unique_ptr<WasmCodeManager> code_manager)
- : code_manager_(std::move(code_manager)) {}
+WasmEngine::WasmEngine()
+ : code_manager_(&memory_tracker_, kMaxWasmCodeMemory) {}
-WasmEngine::~WasmEngine() = default;
+WasmEngine::~WasmEngine() {
+ // All AsyncCompileJobs have been canceled.
+ DCHECK(jobs_.empty());
+}
-bool WasmEngine::SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes) {
+bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
+ const ModuleWireBytes& bytes) {
// TODO(titzer): remove dependency on the isolate.
if (bytes.start() == nullptr || bytes.length() == 0) return false;
- ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
- bytes.end(), true, kWasmOrigin);
+ ModuleResult result =
+ DecodeWasmModule(enabled, bytes.start(), bytes.end(), true, kWasmOrigin,
+ isolate->counters(), allocator());
return result.ok();
}
@@ -34,20 +40,24 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
Handle<Script> asm_js_script,
Vector<const byte> asm_js_offset_table_bytes) {
- ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
- bytes.end(), false, kAsmJsOrigin);
+ ModuleResult result =
+ DecodeWasmModule(kAsmjsWasmFeatures, bytes.start(), bytes.end(), false,
+ kAsmJsOrigin, isolate->counters(), allocator());
CHECK(!result.failed());
// Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
// in {CompileToModuleObject}.
- return CompileToModuleObject(isolate, thrower, std::move(result.val), bytes,
- asm_js_script, asm_js_offset_table_bytes);
+ return CompileToModuleObject(isolate, kAsmjsWasmFeatures, thrower,
+ std::move(result.val), bytes, asm_js_script,
+ asm_js_offset_table_bytes);
}
MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes) {
- ModuleResult result = SyncDecodeWasmModule(isolate, bytes.start(),
- bytes.end(), false, kWasmOrigin);
+ Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
+ const ModuleWireBytes& bytes) {
+ ModuleResult result =
+ DecodeWasmModule(enabled, bytes.start(), bytes.end(), false, kWasmOrigin,
+ isolate->counters(), allocator());
if (result.failed()) {
thrower->CompileFailed("Wasm decoding failed", result);
return {};
@@ -55,8 +65,8 @@ MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
// Transfer ownership of the WasmModule to the {Managed<WasmModule>} generated
// in {CompileToModuleObject}.
- return CompileToModuleObject(isolate, thrower, std::move(result.val), bytes,
- Handle<Script>(), Vector<const byte>());
+ return CompileToModuleObject(isolate, enabled, thrower, std::move(result.val),
+ bytes, Handle<Script>(), Vector<const byte>());
}
MaybeHandle<WasmInstanceObject> WasmEngine::SyncInstantiate(
@@ -70,7 +80,7 @@ MaybeHandle<WasmInstanceObject> WasmEngine::SyncInstantiate(
void WasmEngine::AsyncInstantiate(
Isolate* isolate, std::unique_ptr<InstantiationResultResolver> resolver,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports) {
- ErrorThrower thrower(isolate, nullptr);
+ ErrorThrower thrower(isolate, "WebAssembly Instantiation");
// Instantiate a TryCatch so that caught exceptions won't progagate out.
// They will still be set as pending exceptions on the isolate.
// TODO(clemensh): Avoid TryCatch, use Execution::TryCall internally to invoke
@@ -87,24 +97,24 @@ void WasmEngine::AsyncInstantiate(
return;
}
- // We either have a pending exception (if the start function threw), or an
- // exception in the ErrorThrower.
- DCHECK_EQ(1, isolate->has_pending_exception() + thrower.error());
- if (thrower.error()) {
- resolver->OnInstantiationFailed(thrower.Reify());
- } else {
- // The start function has thrown an exception. We have to move the
- // exception to the promise chain.
+ if (isolate->has_pending_exception()) {
+ // The JS code executed during instantiation has thrown an exception.
+ // We have to move the exception to the promise chain.
Handle<Object> exception(isolate->pending_exception(), isolate);
isolate->clear_pending_exception();
DCHECK(*isolate->external_caught_exception_address());
*isolate->external_caught_exception_address() = false;
resolver->OnInstantiationFailed(exception);
+ thrower.Reset();
+ } else {
+ DCHECK(thrower.error());
+ resolver->OnInstantiationFailed(thrower.Reify());
}
}
void WasmEngine::AsyncCompile(
- Isolate* isolate, std::unique_ptr<CompilationResultResolver> resolver,
+ Isolate* isolate, const WasmFeatures& enabled,
+ std::shared_ptr<CompilationResultResolver> resolver,
const ModuleWireBytes& bytes, bool is_shared) {
if (!FLAG_wasm_async_compilation) {
// Asynchronous compilation disabled; fall back on synchronous compilation.
@@ -114,12 +124,11 @@ void WasmEngine::AsyncCompile(
// Make a copy of the wire bytes to avoid concurrent modification.
std::unique_ptr<uint8_t[]> copy(new uint8_t[bytes.length()]);
memcpy(copy.get(), bytes.start(), bytes.length());
- i::wasm::ModuleWireBytes bytes_copy(copy.get(),
- copy.get() + bytes.length());
- module_object = SyncCompile(isolate, &thrower, bytes_copy);
+ ModuleWireBytes bytes_copy(copy.get(), copy.get() + bytes.length());
+ module_object = SyncCompile(isolate, enabled, &thrower, bytes_copy);
} else {
// The wire bytes are not shared, OK to use them directly.
- module_object = SyncCompile(isolate, &thrower, bytes);
+ module_object = SyncCompile(isolate, enabled, &thrower, bytes);
}
if (thrower.error()) {
resolver->OnCompilationFailed(thrower.Reify());
@@ -132,8 +141,9 @@ void WasmEngine::AsyncCompile(
if (FLAG_wasm_test_streaming) {
std::shared_ptr<StreamingDecoder> streaming_decoder =
- isolate->wasm_engine()->StartStreamingCompilation(
- isolate, handle(isolate->context(), isolate), std::move(resolver));
+ StartStreamingCompilation(isolate, enabled,
+ handle(isolate->context(), isolate),
+ std::move(resolver));
streaming_decoder->OnBytesReceived(bytes.module_bytes());
streaming_decoder->Finish();
return;
@@ -144,20 +154,53 @@ void WasmEngine::AsyncCompile(
memcpy(copy.get(), bytes.start(), bytes.length());
AsyncCompileJob* job = CreateAsyncCompileJob(
- isolate, std::move(copy), bytes.length(),
+ isolate, enabled, std::move(copy), bytes.length(),
handle(isolate->context(), isolate), std::move(resolver));
job->Start();
}
std::shared_ptr<StreamingDecoder> WasmEngine::StartStreamingCompilation(
- Isolate* isolate, Handle<Context> context,
- std::unique_ptr<CompilationResultResolver> resolver) {
+ Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context,
+ std::shared_ptr<CompilationResultResolver> resolver) {
AsyncCompileJob* job =
- CreateAsyncCompileJob(isolate, std::unique_ptr<byte[]>(nullptr), 0,
- context, std::move(resolver));
+ CreateAsyncCompileJob(isolate, enabled, std::unique_ptr<byte[]>(nullptr),
+ 0, context, std::move(resolver));
return job->CreateStreamingDecoder();
}
+bool WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
+ uint32_t function_index, ExecutionTier tier) {
+ ErrorThrower thrower(isolate, "Manually requested tier up");
+ // Note we assume that "one-off" compilations can discard detected features.
+ WasmFeatures detected = kNoWasmFeatures;
+ WasmCode* ret = WasmCompilationUnit::CompileWasmFunction(
+ isolate, native_module, &detected, &thrower,
+ GetModuleEnv(native_module->compilation_state()),
+ &native_module->module()->functions[function_index], tier);
+ return ret != nullptr;
+}
+
+std::shared_ptr<NativeModule> WasmEngine::ExportNativeModule(
+ Handle<WasmModuleObject> module_object) {
+ return module_object->managed_native_module()->get();
+}
+
+Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
+ Isolate* isolate, std::shared_ptr<NativeModule> shared_module) {
+ CHECK_EQ(code_manager(), shared_module->code_manager());
+ Vector<const byte> wire_bytes = shared_module->wire_bytes();
+ Handle<Script> script = CreateWasmScript(isolate, wire_bytes);
+ Handle<WasmModuleObject> module_object =
+ WasmModuleObject::New(isolate, shared_module, script);
+
+ // TODO(6792): Wrappers below might be cloned using {Factory::CopyCode}.
+ // This requires unlocking the code space here. This should eventually be
+ // moved into the allocator.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ CompileJsToWasmWrappers(isolate, module_object);
+ return module_object;
+}
+
CompilationStatistics* WasmEngine::GetOrCreateTurboStatistics() {
base::LockGuard<base::Mutex> guard(&mutex_);
if (compilation_stats_ == nullptr) {
@@ -181,27 +224,22 @@ CodeTracer* WasmEngine::GetCodeTracer() {
return code_tracer_.get();
}
-void WasmEngine::Register(CancelableTaskManager* task_manager) {
- task_managers_.emplace_back(task_manager);
-}
-
-void WasmEngine::Unregister(CancelableTaskManager* task_manager) {
- task_managers_.remove(task_manager);
-}
-
AsyncCompileJob* WasmEngine::CreateAsyncCompileJob(
- Isolate* isolate, std::unique_ptr<byte[]> bytes_copy, size_t length,
- Handle<Context> context,
- std::unique_ptr<CompilationResultResolver> resolver) {
- AsyncCompileJob* job = new AsyncCompileJob(
- isolate, std::move(bytes_copy), length, context, std::move(resolver));
+ Isolate* isolate, const WasmFeatures& enabled,
+ std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context,
+ std::shared_ptr<CompilationResultResolver> resolver) {
+ AsyncCompileJob* job =
+ new AsyncCompileJob(isolate, enabled, std::move(bytes_copy), length,
+ context, std::move(resolver));
// Pass ownership to the unique_ptr in {jobs_}.
+ base::LockGuard<base::Mutex> guard(&mutex_);
jobs_[job] = std::unique_ptr<AsyncCompileJob>(job);
return job;
}
std::unique_ptr<AsyncCompileJob> WasmEngine::RemoveCompileJob(
AsyncCompileJob* job) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
auto item = jobs_.find(job);
DCHECK(item != jobs_.end());
std::unique_ptr<AsyncCompileJob> result = std::move(item->second);
@@ -209,26 +247,56 @@ std::unique_ptr<AsyncCompileJob> WasmEngine::RemoveCompileJob(
return result;
}
-void WasmEngine::AbortCompileJobsOnIsolate(Isolate* isolate) {
- // Iterate over a copy of {jobs_}, because {job->Abort} modifies {jobs_}.
- std::vector<AsyncCompileJob*> isolate_jobs;
-
+bool WasmEngine::HasRunningCompileJob(Isolate* isolate) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
for (auto& entry : jobs_) {
- if (entry.first->isolate() != isolate) continue;
- isolate_jobs.push_back(entry.first);
+ if (entry.first->isolate() == isolate) return true;
}
+ return false;
+}
- for (auto* job : isolate_jobs) job->Abort();
+void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ for (auto it = jobs_.begin(); it != jobs_.end();) {
+ if (it->first->isolate() == isolate) {
+ it = jobs_.erase(it);
+ } else {
+ ++it;
+ }
+ }
}
-void WasmEngine::TearDown() {
- // Cancel all registered task managers.
- for (auto task_manager : task_managers_) {
- task_manager->CancelAndWait();
+namespace {
+
+struct WasmEnginePointerConstructTrait final {
+ static void Construct(void* raw_ptr) {
+ auto engine_ptr = reinterpret_cast<std::shared_ptr<WasmEngine>*>(raw_ptr);
+ *engine_ptr = std::shared_ptr<WasmEngine>();
}
+};
+
+// Holds the global shared pointer to the single {WasmEngine} that is intended
+// to be shared among Isolates within the same process. The {LazyStaticInstance}
+// here is required because {std::shared_ptr} has a non-trivial initializer.
+base::LazyStaticInstance<std::shared_ptr<WasmEngine>,
+ WasmEnginePointerConstructTrait>::type
+ global_wasm_engine;
+
+} // namespace
+
+void WasmEngine::InitializeOncePerProcess() {
+ if (!FLAG_wasm_shared_engine) return;
+ global_wasm_engine.Pointer()->reset(new WasmEngine());
+}
+
+void WasmEngine::GlobalTearDown() {
+ if (!FLAG_wasm_shared_engine) return;
+ global_wasm_engine.Pointer()->reset();
+}
- // Cancel all AsyncCompileJobs.
- jobs_.clear();
+std::shared_ptr<WasmEngine> WasmEngine::GetWasmEngine() {
+ if (FLAG_wasm_shared_engine) return global_wasm_engine.Get();
+ return std::shared_ptr<WasmEngine>(new WasmEngine());
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 4d34b4d3de..66c12404b7 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -9,6 +9,7 @@
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-memory.h"
+#include "src/wasm/wasm-tier.h"
#include "src/zone/accounting-allocator.h"
namespace v8 {
@@ -22,6 +23,7 @@ class WasmInstanceObject;
namespace wasm {
class ErrorThrower;
+struct WasmFeatures;
struct ModuleWireBytes;
class V8_EXPORT_PRIVATE CompilationResultResolver {
@@ -42,12 +44,13 @@ class V8_EXPORT_PRIVATE InstantiationResultResolver {
// loading, instantiating, and executing WASM code.
class V8_EXPORT_PRIVATE WasmEngine {
public:
- explicit WasmEngine(std::unique_ptr<WasmCodeManager> code_manager);
+ WasmEngine();
~WasmEngine();
// Synchronously validates the given bytes that represent an encoded WASM
// module.
- bool SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes);
+ bool SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
+ const ModuleWireBytes& bytes);
// Synchronously compiles the given bytes that represent a translated
// asm.js module.
@@ -59,6 +62,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
// Synchronously compiles the given bytes that represent an encoded WASM
// module.
MaybeHandle<WasmModuleObject> SyncCompile(Isolate* isolate,
+ const WasmFeatures& enabled,
ErrorThrower* thrower,
const ModuleWireBytes& bytes);
@@ -74,8 +78,8 @@ class V8_EXPORT_PRIVATE WasmEngine {
// encoded WASM module.
// The {is_shared} flag indicates if the bytes backing the module could
// be shared across threads, i.e. could be concurrently modified.
- void AsyncCompile(Isolate* isolate,
- std::unique_ptr<CompilationResultResolver> resolver,
+ void AsyncCompile(Isolate* isolate, const WasmFeatures& enabled,
+ std::shared_ptr<CompilationResultResolver> resolver,
const ModuleWireBytes& bytes, bool is_shared);
// Begin an asynchronous instantiation of the given WASM module.
@@ -85,10 +89,26 @@ class V8_EXPORT_PRIVATE WasmEngine {
MaybeHandle<JSReceiver> imports);
std::shared_ptr<StreamingDecoder> StartStreamingCompilation(
- Isolate* isolate, Handle<Context> context,
- std::unique_ptr<CompilationResultResolver> resolver);
+ Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context,
+ std::shared_ptr<CompilationResultResolver> resolver);
- WasmCodeManager* code_manager() const { return code_manager_.get(); }
+ // Compiles the function with the given index at a specific compilation tier
+ // and returns true on success, false (and pending exception) otherwise. This
+ // is mostly used for testing to force a function into a specific tier.
+ bool CompileFunction(Isolate* isolate, NativeModule* native_module,
+ uint32_t function_index, ExecutionTier tier);
+
+ // Exports the sharable parts of the given module object so that they can be
+ // transferred to a different Context/Isolate using the same engine.
+ std::shared_ptr<NativeModule> ExportNativeModule(
+ Handle<WasmModuleObject> module_object);
+
+ // Imports the shared part of a module from a different Context/Isolate using
+ // the the same engine, recreating a full module object in the given Isolate.
+ Handle<WasmModuleObject> ImportNativeModule(
+ Isolate* isolate, std::shared_ptr<NativeModule> shared_module);
+
+ WasmCodeManager* code_manager() { return &code_manager_; }
WasmMemoryTracker* memory_tracker() { return &memory_tracker_; }
@@ -103,41 +123,37 @@ class V8_EXPORT_PRIVATE WasmEngine {
// Used to redirect tracing output from {stdout} to a file.
CodeTracer* GetCodeTracer();
- // We register and unregister CancelableTaskManagers that run engine-dependent
- // tasks. These tasks need to be shutdown if the engine is shut down.
- void Register(CancelableTaskManager* task_manager);
- void Unregister(CancelableTaskManager* task_manager);
-
// Remove {job} from the list of active compile jobs.
std::unique_ptr<AsyncCompileJob> RemoveCompileJob(AsyncCompileJob* job);
- // Returns true if at lease one AsyncCompileJob is currently running.
- bool HasRunningCompileJob() const { return !jobs_.empty(); }
+ // Returns true if at least one AsyncCompileJob that belongs to the given
+ // Isolate is currently running.
+ bool HasRunningCompileJob(Isolate* isolate);
- // Cancel all AsyncCompileJobs that belong to the given Isolate. Their
- // deletion is delayed until all tasks accessing the AsyncCompileJob finish
- // their execution. This is used to clean-up the isolate to be reused.
- void AbortCompileJobsOnIsolate(Isolate*);
+ // Deletes all AsyncCompileJobs that belong to the given Isolate. All
+ // compilation is aborted, no more callbacks will be triggered. This is used
+ // for tearing down an isolate, or to clean it up to be reused.
+ void DeleteCompileJobsOnIsolate(Isolate* isolate);
- void TearDown();
+ // Call on process start and exit.
+ static void InitializeOncePerProcess();
+ static void GlobalTearDown();
+
+ // Constructs a WasmEngine instance. Depending on whether we are sharing
+ // engines this might be a pointer to a new instance or to a shared one.
+ static std::shared_ptr<WasmEngine> GetWasmEngine();
private:
AsyncCompileJob* CreateAsyncCompileJob(
- Isolate* isolate, std::unique_ptr<byte[]> bytes_copy, size_t length,
+ Isolate* isolate, const WasmFeatures& enabled,
+ std::unique_ptr<byte[]> bytes_copy, size_t length,
Handle<Context> context,
- std::unique_ptr<CompilationResultResolver> resolver);
+ std::shared_ptr<CompilationResultResolver> resolver);
- // We use an AsyncCompileJob as the key for itself so that we can delete the
- // job from the map when it is finished.
- std::unordered_map<AsyncCompileJob*, std::unique_ptr<AsyncCompileJob>> jobs_;
- std::unique_ptr<WasmCodeManager> code_manager_;
WasmMemoryTracker memory_tracker_;
+ WasmCodeManager code_manager_;
AccountingAllocator allocator_;
- // Contains all CancelableTaskManagers that run tasks that are dependent
- // on the isolate.
- std::list<CancelableTaskManager*> task_managers_;
-
// This mutex protects all information which is mutated concurrently or
// fields that are initialized lazily on the first access.
base::Mutex mutex_;
@@ -145,6 +161,10 @@ class V8_EXPORT_PRIVATE WasmEngine {
//////////////////////////////////////////////////////////////////////////////
// Protected by {mutex_}:
+ // We use an AsyncCompileJob as the key for itself so that we can delete the
+ // job from the map when it is finished.
+ std::unordered_map<AsyncCompileJob*, std::unique_ptr<AsyncCompileJob>> jobs_;
+
std::unique_ptr<CompilationStatistics> compilation_stats_;
std::unique_ptr<CodeTracer> code_tracer_;
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 0f63c35bec..0317bb7bf5 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -10,8 +10,8 @@
#include "include/v8config.h"
#include "src/base/bits.h"
-#include "src/trap-handler/trap-handler.h"
#include "src/utils.h"
+#include "src/v8memory.h"
#include "src/wasm/wasm-external-refs.h"
namespace v8 {
@@ -247,10 +247,6 @@ void float64_pow_wrapper(Address data) {
WriteUnalignedValue<double>(data, Pow(x, y));
}
-void set_thread_in_wasm_flag() { trap_handler::SetThreadInWasm(); }
-
-void clear_thread_in_wasm_flag() { trap_handler::ClearThreadInWasm(); }
-
static WasmTrapCallbackForTesting wasm_trap_callback_for_testing = nullptr;
void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback) {
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index 438235179b..fc116b7fd8 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -67,9 +67,6 @@ uint32_t word32_ror_wrapper(Address data);
void float64_pow_wrapper(Address data);
-void set_thread_in_wasm_flag();
-void clear_thread_in_wasm_flag();
-
typedef void (*WasmTrapCallbackForTesting)();
void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback);
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
new file mode 100644
index 0000000000..ec8aa8ba0c
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -0,0 +1,26 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_WASM_FEATURE_FLAGS_H_
+#define V8_WASM_WASM_FEATURE_FLAGS_H_
+
+// The SEPARATOR argument allows generating proper comma-separated lists.
+#define FOREACH_WASM_FEATURE_FLAG(V, SEPARATOR) \
+ V(mv, "multi-value support", false) \
+ SEPARATOR \
+ V(eh, "exception handling opcodes", false) \
+ SEPARATOR \
+ V(se, "sign extension opcodes", true) \
+ SEPARATOR \
+ V(sat_f2i_conversions, "saturating float conversion opcodes", false) \
+ SEPARATOR \
+ V(threads, "thread opcodes", false) \
+ SEPARATOR \
+ V(simd, "SIMD opcodes", false) \
+ SEPARATOR \
+ V(anyref, "anyref opcodes", false) \
+ SEPARATOR \
+ V(mut_global, "import/export mutable global support", true)
+
+#endif // V8_WASM_WASM_FEATURE_FLAGS_H_
diff --git a/deps/v8/src/wasm/wasm-features.cc b/deps/v8/src/wasm/wasm-features.cc
new file mode 100644
index 0000000000..6271fd0506
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-features.cc
@@ -0,0 +1,40 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-features.h"
+#include "src/flags.h"
+#include "src/handles-inl.h"
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#define COMMA ,
+#define SPACE
+#define DO_UNION(feat, desc, val) dst->feat |= src.feat;
+#define FLAG_REF(feat, desc, val) FLAG_experimental_wasm_##feat
+
+void UnionFeaturesInto(WasmFeatures* dst, const WasmFeatures& src) {
+ FOREACH_WASM_FEATURE(DO_UNION, SPACE);
+}
+
+WasmFeatures WasmFeaturesFromFlags() {
+ return WasmFeatures{FOREACH_WASM_FEATURE(FLAG_REF, COMMA)};
+}
+
+WasmFeatures WasmFeaturesFromIsolate(Isolate* isolate) {
+ WasmFeatures features = WasmFeaturesFromFlags();
+ features.threads |=
+ isolate->AreWasmThreadsEnabled(handle(isolate->context(), isolate));
+ return features;
+}
+
+#undef DO_UNION
+#undef FLAG_REF
+#undef SPACE
+#undef COMMA
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-features.h b/deps/v8/src/wasm/wasm-features.h
new file mode 100644
index 0000000000..2c6ab0f85a
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-features.h
@@ -0,0 +1,67 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_WASM_FEATURES_H_
+#define V8_WASM_WASM_FEATURES_H_
+
+// The feature flags are declared in their own header.
+#include "src/base/macros.h"
+#include "src/wasm/wasm-feature-flags.h"
+
+// All features, including features that do not have flags.
+#define FOREACH_WASM_FEATURE FOREACH_WASM_FEATURE_FLAG
+
+namespace v8 {
+namespace internal {
+class Isolate;
+namespace wasm {
+
+#define COMMA ,
+#define SPACE
+#define DECL_FIELD(feat, desc, val) bool feat = false;
+#define JUST_TRUE(feat, desc, val) true
+#define JUST_FALSE(feat, desc, val) false
+#define DECL_PARAM(feat, desc, val) bool p##feat
+#define DO_INIT(feat, desc, val) feat(p##feat)
+
+// Enabled or detected features.
+struct WasmFeatures {
+ FOREACH_WASM_FEATURE(DECL_FIELD, SPACE)
+
+ constexpr WasmFeatures() = default;
+
+ explicit constexpr WasmFeatures(FOREACH_WASM_FEATURE(DECL_PARAM, COMMA))
+ : FOREACH_WASM_FEATURE(DO_INIT, COMMA) {}
+};
+
+static constexpr WasmFeatures kAllWasmFeatures{
+ FOREACH_WASM_FEATURE(JUST_TRUE, COMMA)};
+
+static constexpr WasmFeatures kNoWasmFeatures{
+ FOREACH_WASM_FEATURE(JUST_FALSE, COMMA)};
+
+#undef JUST_TRUE
+#undef JUST_FALSE
+#undef DECL_FIELD
+#undef DECL_PARAM
+#undef DO_INIT
+#undef COMMA
+#undef SPACE
+
+static constexpr WasmFeatures kAsmjsWasmFeatures = kNoWasmFeatures;
+
+V8_EXPORT_PRIVATE WasmFeatures WasmFeaturesFromFlags();
+
+// Enables features based on both commandline flags and the isolate.
+// Precondition: A valid context must be set in {isolate->context()}.
+V8_EXPORT_PRIVATE WasmFeatures WasmFeaturesFromIsolate(Isolate* isolate);
+
+V8_EXPORT_PRIVATE void UnionFeaturesInto(WasmFeatures* dst,
+ const WasmFeatures& src);
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_WASM_FEATURES_H_
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 581277cbab..0c7fb25b67 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -37,6 +37,12 @@ namespace wasm {
if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
} while (false)
+#if V8_TARGET_BIG_ENDIAN
+#define LANE(i, type) ((sizeof(type.val) / sizeof(type.val[0])) - (i)-1)
+#else
+#define LANE(i, type) (i)
+#endif
+
#define FOREACH_INTERNAL_OPCODE(V) V(Breakpoint, 0xFF)
#define WASM_CTYPES(V) \
@@ -786,7 +792,8 @@ class SideTable : public ZoneObject {
case kExprBlock:
case kExprLoop: {
bool is_loop = opcode == kExprLoop;
- BlockTypeImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
+ i.pc());
if (imm.type == kWasmVar) {
imm.sig = module->signatures[imm.sig_index];
}
@@ -801,7 +808,8 @@ class SideTable : public ZoneObject {
break;
}
case kExprIf: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
+ i.pc());
if (imm.type == kWasmVar) {
imm.sig = module->signatures[imm.sig_index];
}
@@ -931,8 +939,7 @@ class CodeMap {
bool call_indirect_through_module_ = false;
public:
- CodeMap(Isolate* isolate, const WasmModule* module,
- const uint8_t* module_start, Zone* zone)
+ CodeMap(const WasmModule* module, const uint8_t* module_start, Zone* zone)
: zone_(zone), module_(module), interpreter_code_(zone) {
if (module == nullptr) return;
interpreter_code_.reserve(module->functions.size());
@@ -1256,6 +1263,7 @@ class ThreadImpl {
const WasmModule* module() const { return codemap_->module(); }
void DoTrap(TrapReason trap, pc_t pc) {
+ TRACE("TRAP: %s\n", WasmOpcodes::TrapReasonMessage(trap));
state_ = WasmInterpreter::TRAPPED;
trap_reason_ = trap;
CommitPc(pc);
@@ -1419,8 +1427,8 @@ class ThreadImpl {
len = 1 + imm.length;
if (FLAG_wasm_trace_memory) {
- wasm::MemoryTracingInfo info(imm.offset + index, false, rep);
- TraceMemoryOperation(ExecutionEngine::kInterpreter, &info,
+ MemoryTracingInfo info(imm.offset + index, false, rep);
+ TraceMemoryOperation(ExecutionTier::kInterpreter, &info,
code->function->func_index, static_cast<int>(pc),
instance_object_->memory_start());
}
@@ -1445,8 +1453,8 @@ class ThreadImpl {
len = 1 + imm.length;
if (FLAG_wasm_trace_memory) {
- wasm::MemoryTracingInfo info(imm.offset + index, true, rep);
- TraceMemoryOperation(ExecutionEngine::kInterpreter, &info,
+ MemoryTracingInfo info(imm.offset + index, true, rep);
+ TraceMemoryOperation(ExecutionTier::kInterpreter, &info,
code->function->func_index, static_cast<int>(pc),
instance_object_->memory_start());
}
@@ -1664,7 +1672,6 @@ class ThreadImpl {
byte* GetGlobalPtr(const WasmGlobal* global) {
if (global->mutability && global->imported) {
- DCHECK(FLAG_experimental_wasm_mut_global);
return reinterpret_cast<byte*>(
instance_object_->imported_mutable_globals()[global->index]);
} else {
@@ -1695,7 +1702,8 @@ class ThreadImpl {
++len; \
WasmValue val = Pop(); \
Simd128 s = val.to_s128(); \
- Push(WasmValue(s.to_##name().val[imm.lane])); \
+ auto ss = s.to_##name(); \
+ Push(WasmValue(ss.val[LANE(imm.lane, ss)])); \
return true; \
}
EXTRACT_LANE_CASE(I32x4, i32x4)
@@ -1711,9 +1719,9 @@ class ThreadImpl {
stype s2 = v2.to_s128().to_##name(); \
stype res; \
for (size_t i = 0; i < count; ++i) { \
- auto a = s1.val[i]; \
- auto b = s2.val[i]; \
- res.val[i] = expr; \
+ auto a = s1.val[LANE(i, s1)]; \
+ auto b = s2.val[LANE(i, s1)]; \
+ res.val[LANE(i, s1)] = expr; \
} \
Push(WasmValue(Simd128(res))); \
return true; \
@@ -1856,7 +1864,7 @@ class ThreadImpl {
WasmValue new_val = Pop(); \
WasmValue simd_val = Pop(); \
stype s = simd_val.to_s128().to_##name(); \
- s.val[imm.lane] = new_val.to<ctype>(); \
+ s.val[LANE(imm.lane, s)] = new_val.to<ctype>(); \
Push(WasmValue(Simd128(s))); \
return true; \
}
@@ -1905,8 +1913,8 @@ class ThreadImpl {
src_type s = v.to_s128().to_##name(); \
dst_type res; \
for (size_t i = 0; i < count; ++i) { \
- ctype a = s.val[start_index + i]; \
- res.val[i] = expr; \
+ ctype a = s.val[LANE(start_index + i, s)]; \
+ res.val[LANE(i, res)] = expr; \
} \
Push(WasmValue(Simd128(res))); \
return true; \
@@ -1940,23 +1948,25 @@ class ThreadImpl {
CONVERT_CASE(I16x8UConvertI8x16Low, int16, i8x16, int8, 8, 0, uint8_t,
a)
#undef CONVERT_CASE
-#define PACK_CASE(op, src_type, name, dst_type, count, ctype, dst_ctype, \
- is_unsigned) \
- case kExpr##op: { \
- WasmValue v2 = Pop(); \
- WasmValue v1 = Pop(); \
- src_type s1 = v1.to_s128().to_##name(); \
- src_type s2 = v2.to_s128().to_##name(); \
- dst_type res; \
- int64_t min = std::numeric_limits<ctype>::min(); \
- int64_t max = std::numeric_limits<ctype>::max(); \
- for (size_t i = 0; i < count; ++i) { \
- int32_t v = i < count / 2 ? s1.val[i] : s2.val[i - count / 2]; \
- int64_t a = is_unsigned ? static_cast<int64_t>(v & 0xFFFFFFFFu) : v; \
- res.val[i] = static_cast<dst_ctype>(std::max(min, std::min(max, a))); \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
+#define PACK_CASE(op, src_type, name, dst_type, count, ctype, dst_ctype, \
+ is_unsigned) \
+ case kExpr##op: { \
+ WasmValue v2 = Pop(); \
+ WasmValue v1 = Pop(); \
+ src_type s1 = v1.to_s128().to_##name(); \
+ src_type s2 = v2.to_s128().to_##name(); \
+ dst_type res; \
+ int64_t min = std::numeric_limits<ctype>::min(); \
+ int64_t max = std::numeric_limits<ctype>::max(); \
+ for (size_t i = 0; i < count; ++i) { \
+ int32_t v = i < count / 2 ? s1.val[LANE(i, s1)] \
+ : s2.val[LANE(i - count / 2, s2)]; \
+ int64_t a = is_unsigned ? static_cast<int64_t>(v & 0xFFFFFFFFu) : v; \
+ res.val[LANE(i, res)] = \
+ static_cast<dst_ctype>(std::max(min, std::min(max, a))); \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
}
PACK_CASE(I16x8SConvertI32x4, int4, i32x4, int8, 8, int16_t, int16_t,
false)
@@ -1978,19 +1988,21 @@ class ThreadImpl {
Push(WasmValue(Simd128(res)));
return true;
}
-#define ADD_HORIZ_CASE(op, name, stype, count) \
- case kExpr##op: { \
- WasmValue v2 = Pop(); \
- WasmValue v1 = Pop(); \
- stype s1 = v1.to_s128().to_##name(); \
- stype s2 = v2.to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count / 2; ++i) { \
- res.val[i] = s1.val[i * 2] + s1.val[i * 2 + 1]; \
- res.val[i + count / 2] = s2.val[i * 2] + s2.val[i * 2 + 1]; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
+#define ADD_HORIZ_CASE(op, name, stype, count) \
+ case kExpr##op: { \
+ WasmValue v2 = Pop(); \
+ WasmValue v1 = Pop(); \
+ stype s1 = v1.to_s128().to_##name(); \
+ stype s2 = v2.to_s128().to_##name(); \
+ stype res; \
+ for (size_t i = 0; i < count / 2; ++i) { \
+ res.val[LANE(i, s1)] = \
+ s1.val[LANE(i * 2, s1)] + s1.val[LANE(i * 2 + 1, s1)]; \
+ res.val[LANE(i + count / 2, s1)] = \
+ s2.val[LANE(i * 2, s1)] + s2.val[LANE(i * 2 + 1, s1)]; \
+ } \
+ Push(WasmValue(Simd128(res))); \
+ return true; \
}
ADD_HORIZ_CASE(I32x4AddHoriz, i32x4, int4, 4)
ADD_HORIZ_CASE(F32x4AddHoriz, f32x4, float4, 4)
@@ -2005,8 +2017,9 @@ class ThreadImpl {
int16 res;
for (size_t i = 0; i < kSimd128Size; ++i) {
int lane = imm.shuffle[i];
- res.val[i] =
- lane < kSimd128Size ? v1.val[lane] : v2.val[lane - kSimd128Size];
+ res.val[LANE(i, v1)] = lane < kSimd128Size
+ ? v1.val[LANE(lane, v1)]
+ : v2.val[LANE(lane - kSimd128Size, v1)];
}
Push(WasmValue(Simd128(res)));
return true;
@@ -2122,9 +2135,9 @@ class ThreadImpl {
#ifdef DEBUG
// Compute the stack effect of this opcode, and verify later that the
// stack was modified accordingly.
- std::pair<uint32_t, uint32_t> stack_effect = wasm::StackEffect(
- codemap_->module(), frames_.back().code->function->sig,
- code->orig_start + pc, code->orig_end);
+ std::pair<uint32_t, uint32_t> stack_effect =
+ StackEffect(codemap_->module(), frames_.back().code->function->sig,
+ code->orig_start + pc, code->orig_end);
sp_t expected_new_stack_height =
StackHeight() - stack_effect.first + stack_effect.second;
#endif
@@ -2133,17 +2146,20 @@ class ThreadImpl {
case kExprNop:
break;
case kExprBlock: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures,
+ &decoder, code->at(pc));
len = 1 + imm.length;
break;
}
case kExprLoop: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures,
+ &decoder, code->at(pc));
len = 1 + imm.length;
break;
}
case kExprIf: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures,
+ &decoder, code->at(pc));
WasmValue cond = Pop();
bool is_true = cond.to<uint32_t>() != 0;
if (is_true) {
@@ -2302,6 +2318,7 @@ class ThreadImpl {
uint32_t entry_index = Pop().to<uint32_t>();
// Assume only one table for now.
DCHECK_LE(module()->tables.size(), 1u);
+ CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC.
ExternalCallResult result =
CallIndirectFunction(0, entry_index, imm.sig_index);
switch (result.type) {
@@ -2331,9 +2348,10 @@ class ThreadImpl {
byte* ptr = GetGlobalPtr(global);
WasmValue val;
switch (global->type) {
-#define CASE_TYPE(wasm, ctype) \
- case kWasm##wasm: \
- val = WasmValue(*reinterpret_cast<ctype*>(ptr)); \
+#define CASE_TYPE(wasm, ctype) \
+ case kWasm##wasm: \
+ val = WasmValue( \
+ ReadLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr))); \
break;
WASM_CTYPES(CASE_TYPE)
#undef CASE_TYPE
@@ -2351,9 +2369,10 @@ class ThreadImpl {
byte* ptr = GetGlobalPtr(global);
WasmValue val = Pop();
switch (global->type) {
-#define CASE_TYPE(wasm, ctype) \
- case kWasm##wasm: \
- *reinterpret_cast<ctype*>(ptr) = val.to<ctype>(); \
+#define CASE_TYPE(wasm, ctype) \
+ case kWasm##wasm: \
+ WriteLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr), \
+ val.to<ctype>()); \
break;
WASM_CTYPES(CASE_TYPE)
#undef CASE_TYPE
@@ -2687,8 +2706,8 @@ class ThreadImpl {
ExternalCallResult CallExternalWasmFunction(
Isolate* isolate, Handle<WasmInstanceObject> instance,
- const wasm::WasmCode* code, FunctionSig* sig) {
- if (code->kind() == wasm::WasmCode::kWasmToJsWrapper &&
+ const WasmCode* code, FunctionSig* sig) {
+ if (code->kind() == WasmCode::kWasmToJsWrapper &&
!IsJSCompatibleSignature(sig)) {
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kWasmTrapTypeError));
@@ -2881,7 +2900,7 @@ class ThreadImpl {
HandleScope scope(isolate);
FunctionSig* signature = module()->signatures[sig_index];
- if (code->kind() == wasm::WasmCode::kFunction) {
+ if (code->kind() == WasmCode::kFunction) {
if (!instance_object_.is_identical_to(instance)) {
// Cross instance call.
return CallExternalWasmFunction(isolate, instance, code, signature);
@@ -2890,8 +2909,8 @@ class ThreadImpl {
}
// Call to external function.
- if (code->kind() == wasm::WasmCode::kInterpreterEntry ||
- code->kind() == wasm::WasmCode::kWasmToJsWrapper) {
+ if (code->kind() == WasmCode::kInterpreterEntry ||
+ code->kind() == WasmCode::kWasmToJsWrapper) {
return CallExternalWasmFunction(isolate, instance, code, signature);
}
return {ExternalCallResult::INVALID_FUNC};
@@ -3060,12 +3079,11 @@ class WasmInterpreterInternals : public ZoneObject {
CodeMap codemap_;
ZoneVector<ThreadImpl> threads_;
- WasmInterpreterInternals(Isolate* isolate, Zone* zone,
- const WasmModule* module,
+ WasmInterpreterInternals(Zone* zone, const WasmModule* module,
const ModuleWireBytes& wire_bytes,
Handle<WasmInstanceObject> instance_object)
: module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
- codemap_(isolate, module, module_bytes_.data(), zone),
+ codemap_(module, module_bytes_.data(), zone),
threads_(zone) {
threads_.emplace_back(zone, &codemap_, instance_object);
}
@@ -3097,8 +3115,7 @@ WasmInterpreter::WasmInterpreter(Isolate* isolate, const WasmModule* module,
Handle<WasmInstanceObject> instance_object)
: zone_(isolate->allocator(), ZONE_NAME),
internals_(new (&zone_) WasmInterpreterInternals(
- isolate, &zone_, module, wire_bytes,
- MakeWeak(isolate, instance_object))) {}
+ &zone_, module, wire_bytes, MakeWeak(isolate, instance_object))) {}
WasmInterpreter::~WasmInterpreter() { internals_->~WasmInterpreterInternals(); }
@@ -3205,6 +3222,7 @@ void InterpretedFrameDeleter::operator()(InterpretedFrame* ptr) {
}
#undef TRACE
+#undef LANE
#undef FOREACH_INTERNAL_OPCODE
#undef WASM_CTYPES
#undef FOREACH_SIMPLE_BINOP
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 7ff27dc769..1a20b88f10 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -4,8 +4,8 @@
#include "src/wasm/wasm-js.h"
+#include "src/api-inl.h"
#include "src/api-natives.h"
-#include "src/api.h"
#include "src/assert-scope.h"
#include "src/ast/ast.h"
#include "src/execution.h"
@@ -29,11 +29,39 @@ namespace v8 {
class WasmStreaming::WasmStreamingImpl {
public:
- void OnBytesReceived(const uint8_t* bytes, size_t size) {}
+ WasmStreamingImpl(
+ Isolate* isolate,
+ std::shared_ptr<internal::wasm::CompilationResultResolver> resolver)
+ : isolate_(isolate), resolver_(std::move(resolver)) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
+ streaming_decoder_ = i_isolate->wasm_engine()->StartStreamingCompilation(
+ i_isolate, enabled_features, handle(i_isolate->context(), i_isolate),
+ resolver_);
+ }
+
+ void OnBytesReceived(const uint8_t* bytes, size_t size) {
+ streaming_decoder_->OnBytesReceived(i::Vector<const uint8_t>(bytes, size));
+ }
+ void Finish() { streaming_decoder_->Finish(); }
+
+ void Abort(MaybeLocal<Value> exception) {
+ i::HandleScope scope(reinterpret_cast<i::Isolate*>(isolate_));
+ streaming_decoder_->Abort();
- void Finish() {}
+ // If no exception value is provided, we do not reject the promise. This can
+ // happen when streaming compilation gets aborted when no script execution
+ // is allowed anymore, e.g. when a browser tab gets refreshed.
+ if (exception.IsEmpty()) return;
- void Abort(MaybeLocal<Value> exception) {}
+ resolver_->OnCompilationFailed(
+ Utils::OpenHandle(*exception.ToLocalChecked()));
+ }
+
+ private:
+ Isolate* isolate_ = nullptr;
+ std::shared_ptr<internal::wasm::StreamingDecoder> streaming_decoder_;
+ std::shared_ptr<internal::wasm::CompilationResultResolver> resolver_;
};
WasmStreaming::WasmStreaming(std::unique_ptr<WasmStreamingImpl> impl)
@@ -176,30 +204,6 @@ i::MaybeHandle<i::JSReceiver> GetValueAsImports(Local<Value> arg,
return i::Handle<i::JSReceiver>::cast(v8::Utils::OpenHandle(*obj));
}
-void WebAssemblyCompileStreaming(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::Isolate* isolate = args.GetIsolate();
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-
- if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
- // Manually create a promise and reject it.
- Local<Context> context = isolate->GetCurrentContext();
- ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
- v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- return_value.Set(resolver->GetPromise());
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compileStreaming()");
- thrower.CompileError("Wasm code generation disallowed by embedder");
- auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
- CHECK_IMPLIES(!maybe.FromMaybe(false),
- i_isolate->has_scheduled_exception());
- return;
- }
-
- MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
- DCHECK_NOT_NULL(i_isolate->wasm_compile_streaming_callback());
- i_isolate->wasm_compile_streaming_callback()(args);
-}
-
namespace {
// This class resolves the result of WebAssembly.compile. It just places the
// compilation result in the supplied {promise}.
@@ -213,6 +217,8 @@ class AsyncCompilationResolver : public i::wasm::CompilationResultResolver {
}
void OnCompilationSucceeded(i::Handle<i::WasmModuleObject> result) override {
+ if (finished_) return;
+ finished_ = true;
i::MaybeHandle<i::Object> promise_result =
i::JSPromise::Resolve(promise_, result);
CHECK_EQ(promise_result.is_null(),
@@ -220,6 +226,8 @@ class AsyncCompilationResolver : public i::wasm::CompilationResultResolver {
}
void OnCompilationFailed(i::Handle<i::Object> error_reason) override {
+ if (finished_) return;
+ finished_ = true;
i::MaybeHandle<i::Object> promise_result =
i::JSPromise::Reject(promise_, error_reason);
CHECK_EQ(promise_result.is_null(),
@@ -227,6 +235,7 @@ class AsyncCompilationResolver : public i::wasm::CompilationResultResolver {
}
private:
+ bool finished_ = false;
i::Handle<i::JSPromise> promise_;
};
@@ -350,6 +359,8 @@ class AsyncInstantiateCompileResultResolver
}
void OnCompilationSucceeded(i::Handle<i::WasmModuleObject> result) override {
+ if (finished_) return;
+ finished_ = true;
isolate_->wasm_engine()->AsyncInstantiate(
isolate_,
base::make_unique<InstantiateBytesResultResolver>(isolate_, promise_,
@@ -358,12 +369,15 @@ class AsyncInstantiateCompileResultResolver
}
void OnCompilationFailed(i::Handle<i::Object> error_reason) override {
+ if (finished_) return;
+ finished_ = true;
i::MaybeHandle<i::Object> promise_result =
i::JSPromise::Reject(promise_, error_reason);
CHECK_EQ(promise_result.is_null(), isolate_->has_pending_exception());
}
private:
+ bool finished_ = false;
i::Isolate* isolate_;
i::Handle<i::JSPromise> promise_;
i::MaybeHandle<i::JSReceiver> maybe_imports_;
@@ -390,7 +404,7 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(promise);
- std::unique_ptr<i::wasm::CompilationResultResolver> resolver(
+ std::shared_ptr<i::wasm::CompilationResultResolver> resolver(
new AsyncCompilationResolver(i_isolate, Utils::OpenHandle(*promise)));
bool is_shared = false;
@@ -400,8 +414,64 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
// Asynchronous compilation handles copying wire bytes if necessary.
- i_isolate->wasm_engine()->AsyncCompile(i_isolate, std::move(resolver), bytes,
- is_shared);
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
+ i_isolate->wasm_engine()->AsyncCompile(i_isolate, enabled_features,
+ std::move(resolver), bytes, is_shared);
+}
+
+// WebAssembly.compileStreaming(Promise<Response>) -> Promise
+void WebAssemblyCompileStreaming(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
+ HandleScope scope(isolate);
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compile()");
+ Local<Context> context = isolate->GetCurrentContext();
+
+ // Create and assign the return value of this function.
+ ASSIGN(Promise::Resolver, result_resolver, Promise::Resolver::New(context));
+ Local<Promise> promise = result_resolver->GetPromise();
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(promise);
+
+ // Prepare the CompilationResultResolver for the compilation.
+ auto resolver = std::make_shared<AsyncCompilationResolver>(
+ i_isolate, Utils::OpenHandle(*promise));
+
+ if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
+ thrower.CompileError("Wasm code generation disallowed by embedder");
+ resolver->OnCompilationFailed(thrower.Reify());
+ return;
+ }
+
+ // Allocate the streaming decoder in a Managed so we can pass it to the
+ // embedder.
+ i::Handle<i::Managed<WasmStreaming>> data =
+ i::Managed<WasmStreaming>::Allocate(
+ i_isolate, 0,
+ base::make_unique<WasmStreaming::WasmStreamingImpl>(isolate,
+ resolver));
+
+ DCHECK_NOT_NULL(i_isolate->wasm_streaming_callback());
+ ASSIGN(
+ v8::Function, compile_callback,
+ v8::Function::New(context, i_isolate->wasm_streaming_callback(),
+ Utils::ToLocal(i::Handle<i::Object>::cast(data)), 1));
+
+ // The parameter may be of type {Response} or of type {Promise<Response>}.
+ // Treat either case of parameter as Promise.resolve(parameter)
+ // as per https://www.w3.org/2001/tag/doc/promises-guide#resolve-arguments
+
+ // Ending with:
+ // return Promise.resolve(parameter).then(compile_callback);
+ ASSIGN(Promise::Resolver, input_resolver, Promise::Resolver::New(context));
+ if (!input_resolver->Resolve(context, args[0]).IsJust()) return;
+
+ // We do not have any use of the result here. The {compile_callback} will
+ // start streaming compilation, which will eventually resolve the promise we
+ // set as result value.
+ USE(input_resolver->GetPromise()->Then(context, compile_callback));
}
// WebAssembly.validate(bytes) -> bool
@@ -422,6 +492,7 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
bool validated = false;
if (is_shared) {
// Make a copy of the wire bytes to avoid concurrent modification.
@@ -429,10 +500,12 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
memcpy(copy.get(), bytes.start(), bytes.length());
i::wasm::ModuleWireBytes bytes_copy(copy.get(),
copy.get() + bytes.length());
- validated = i_isolate->wasm_engine()->SyncValidate(i_isolate, bytes_copy);
+ validated = i_isolate->wasm_engine()->SyncValidate(
+ i_isolate, enabled_features, bytes_copy);
} else {
// The wire bytes are not shared, OK to use them directly.
- validated = i_isolate->wasm_engine()->SyncValidate(i_isolate, bytes);
+ validated = i_isolate->wasm_engine()->SyncValidate(i_isolate,
+ enabled_features, bytes);
}
return_value.Set(Boolean::New(isolate, validated));
@@ -462,6 +535,7 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (thrower.error()) {
return;
}
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
i::MaybeHandle<i::Object> module_obj;
if (is_shared) {
// Make a copy of the wire bytes to avoid concurrent modification.
@@ -469,12 +543,12 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
memcpy(copy.get(), bytes.start(), bytes.length());
i::wasm::ModuleWireBytes bytes_copy(copy.get(),
copy.get() + bytes.length());
- module_obj =
- i_isolate->wasm_engine()->SyncCompile(i_isolate, &thrower, bytes_copy);
+ module_obj = i_isolate->wasm_engine()->SyncCompile(
+ i_isolate, enabled_features, &thrower, bytes_copy);
} else {
// The wire bytes are not shared, OK to use them directly.
- module_obj =
- i_isolate->wasm_engine()->SyncCompile(i_isolate, &thrower, bytes);
+ module_obj = i_isolate->wasm_engine()->SyncCompile(
+ i_isolate, enabled_features, &thrower, bytes);
}
if (module_obj.is_null()) return;
@@ -563,40 +637,6 @@ MaybeLocal<Value> WebAssemblyInstantiateImpl(Isolate* isolate,
return Utils::ToLocal(instance_object.ToHandleChecked());
}
-void WebAssemblyInstantiateCallback(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- DCHECK_GE(args.Length(), 1);
- Isolate* isolate = args.GetIsolate();
- MicrotasksScope does_not_run_microtasks(isolate,
- MicrotasksScope::kDoNotRunMicrotasks);
-
- HandleScope scope(args.GetIsolate());
-
- Local<Context> context = isolate->GetCurrentContext();
- Local<Value> module = args[0];
-
- const uint8_t* instance_str = reinterpret_cast<const uint8_t*>("instance");
- const uint8_t* module_str = reinterpret_cast<const uint8_t*>("module");
- Local<Value> instance;
- if (!WebAssemblyInstantiateImpl(isolate, module, args.Data())
- .ToLocal(&instance)) {
- return;
- }
-
- Local<Object> ret = Object::New(isolate);
- Local<String> instance_name =
- String::NewFromOneByte(isolate, instance_str,
- NewStringType::kInternalized)
- .ToLocalChecked();
- Local<String> module_name =
- String::NewFromOneByte(isolate, module_str, NewStringType::kInternalized)
- .ToLocalChecked();
-
- CHECK(ret->CreateDataProperty(context, instance_name, instance).IsJust());
- CHECK(ret->CreateDataProperty(context, module_name, module).IsJust());
- args.GetReturnValue().Set(ret);
-}
-
// new WebAssembly.Instance(module, imports) -> WebAssembly.Instance
void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
@@ -634,27 +674,76 @@ void WebAssemblyInstantiateStreaming(
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i_isolate->CountUsage(
v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation);
- // we use i_isolate in DCHECKS in the ASSIGN statements.
- USE(i_isolate);
+
MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
HandleScope scope(isolate);
-
Local<Context> context = isolate->GetCurrentContext();
- ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
- Local<Value> first_arg_value = args[0];
+ ScheduledErrorThrower thrower(i_isolate,
+ "WebAssembly.instantiateStreaming()");
- ASSIGN(Function, compileStreaming,
- Function::New(context, WebAssemblyCompileStreaming));
- ASSIGN(Value, compile_retval,
- compileStreaming->Call(context, args.Holder(), 1, &first_arg_value));
- Local<Promise> module_promise = Local<Promise>::Cast(compile_retval);
+ // Create and assign the return value of this function.
+ ASSIGN(Promise::Resolver, result_resolver, Promise::Resolver::New(context));
+ Local<Promise> promise = result_resolver->GetPromise();
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(promise);
- DCHECK(!module_promise.IsEmpty());
- Local<Value> data = args[1];
- ASSIGN(Function, instantiate_impl,
- Function::New(context, WebAssemblyInstantiateCallback, data));
- ASSIGN(Promise, result, module_promise->Then(context, instantiate_impl));
- args.GetReturnValue().Set(result);
+ // Create an InstantiateResultResolver in case there is an issue with the
+ // passed parameters.
+ std::unique_ptr<i::wasm::InstantiationResultResolver> resolver(
+ new InstantiateModuleResultResolver(i_isolate,
+ Utils::OpenHandle(*promise)));
+
+ if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
+ thrower.CompileError("Wasm code generation disallowed by embedder");
+ resolver->OnInstantiationFailed(thrower.Reify());
+ return;
+ }
+
+ // If args.Length < 2, this will be undefined - see FunctionCallbackInfo.
+ Local<Value> ffi = args[1];
+ i::MaybeHandle<i::JSReceiver> maybe_imports =
+ GetValueAsImports(ffi, &thrower);
+
+ if (thrower.error()) {
+ resolver->OnInstantiationFailed(thrower.Reify());
+ return;
+ }
+
+ // We start compilation now, we have no use for the
+ // {InstantiationResultResolver}.
+ resolver.reset();
+
+ std::shared_ptr<i::wasm::CompilationResultResolver> compilation_resolver(
+ new AsyncInstantiateCompileResultResolver(
+ i_isolate, Utils::OpenHandle(*promise), maybe_imports));
+
+ // Allocate the streaming decoder in a Managed so we can pass it to the
+ // embedder.
+ i::Handle<i::Managed<WasmStreaming>> data =
+ i::Managed<WasmStreaming>::Allocate(
+ i_isolate, 0,
+ base::make_unique<WasmStreaming::WasmStreamingImpl>(
+ isolate, compilation_resolver));
+
+ DCHECK_NOT_NULL(i_isolate->wasm_streaming_callback());
+ ASSIGN(
+ v8::Function, compile_callback,
+ v8::Function::New(context, i_isolate->wasm_streaming_callback(),
+ Utils::ToLocal(i::Handle<i::Object>::cast(data)), 1));
+
+ // The parameter may be of type {Response} or of type {Promise<Response>}.
+ // Treat either case of parameter as Promise.resolve(parameter)
+ // as per https://www.w3.org/2001/tag/doc/promises-guide#resolve-arguments
+
+ // Ending with:
+ // return Promise.resolve(parameter).then(compile_callback);
+ ASSIGN(Promise::Resolver, input_resolver, Promise::Resolver::New(context));
+ if (!input_resolver->Resolve(context, args[0]).IsJust()) return;
+
+ // We do not have any use of the result here. The {compile_callback} will
+ // start streaming compilation, which will eventually resolve the promise we
+ // set as result value.
+ USE(input_resolver->GetPromise()->Then(context, compile_callback));
}
// WebAssembly.instantiate(module, imports) -> WebAssembly.Instance
@@ -667,7 +756,7 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation);
MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly.instantiate()");
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly Instantiation");
HandleScope scope(isolate);
@@ -720,7 +809,7 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
// {InstantiationResultResolver}.
resolver.reset();
- std::unique_ptr<i::wasm::CompilationResultResolver> compilation_resolver(
+ std::shared_ptr<i::wasm::CompilationResultResolver> compilation_resolver(
new AsyncInstantiateCompileResultResolver(
i_isolate, Utils::OpenHandle(*promise), maybe_imports));
@@ -733,8 +822,10 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
// Asynchronous compilation handles copying wire bytes if necessary.
- i_isolate->wasm_engine()->AsyncCompile(
- i_isolate, std::move(compilation_resolver), bytes, is_shared);
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
+ i_isolate->wasm_engine()->AsyncCompile(i_isolate, enabled_features,
+ std::move(compilation_resolver), bytes,
+ is_shared);
}
bool GetIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
@@ -788,9 +879,7 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (!maybe.ToLocal(&value)) return;
v8::Local<v8::String> string;
if (!value->ToString(context).ToLocal(&string)) return;
- bool equal;
- if (!string->Equals(context, v8_str(isolate, "anyfunc")).To(&equal)) return;
- if (!equal) {
+ if (!string->StringEquals(v8_str(isolate, "anyfunc"))) {
thrower.TypeError("Descriptor property 'element' must be 'anyfunc'");
return;
}
@@ -858,7 +947,8 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
bool is_shared_memory = false;
- if (i::FLAG_experimental_wasm_threads) {
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
+ if (enabled_features.threads) {
// Shared property of descriptor
Local<String> shared_key = v8_str(isolate, "shared");
Maybe<bool> has_shared = descriptor->Has(context, shared_key);
@@ -938,14 +1028,11 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::String> string;
if (!value->ToString(context).ToLocal(&string)) return;
- bool equal;
- if (string->Equals(context, v8_str(isolate, "i32")).To(&equal) && equal) {
+ if (string->StringEquals(v8_str(isolate, "i32"))) {
type = i::wasm::kWasmI32;
- } else if (string->Equals(context, v8_str(isolate, "f32")).To(&equal) &&
- equal) {
+ } else if (string->StringEquals(v8_str(isolate, "f32"))) {
type = i::wasm::kWasmF32;
- } else if (string->Equals(context, v8_str(isolate, "f64")).To(&equal) &&
- equal) {
+ } else if (string->StringEquals(v8_str(isolate, "f64"))) {
type = i::wasm::kWasmF64;
} else {
thrower.TypeError(
@@ -1294,19 +1381,26 @@ void WebAssemblyGlobalSetValue(
// TODO(titzer): we use the API to create the function template because the
// internal guts are too ugly to replicate here.
-static i::Handle<i::FunctionTemplateInfo> NewTemplate(i::Isolate* i_isolate,
- FunctionCallback func) {
+static i::Handle<i::FunctionTemplateInfo> NewFunctionTemplate(
+ i::Isolate* i_isolate, FunctionCallback func) {
Isolate* isolate = reinterpret_cast<Isolate*>(i_isolate);
Local<FunctionTemplate> templ = FunctionTemplate::New(isolate, func);
templ->ReadOnlyPrototype();
return v8::Utils::OpenHandle(*templ);
}
+static i::Handle<i::ObjectTemplateInfo> NewObjectTemplate(
+ i::Isolate* i_isolate) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(i_isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ return v8::Utils::OpenHandle(*templ);
+}
+
namespace internal {
Handle<JSFunction> CreateFunc(Isolate* isolate, Handle<String> name,
FunctionCallback func) {
- Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func);
+ Handle<FunctionTemplateInfo> temp = NewFunctionTemplate(isolate, func);
Handle<JSFunction> function =
ApiNatives::InstantiateFunction(temp, name).ToHandleChecked();
DCHECK(function->shared()->HasSharedName());
@@ -1365,6 +1459,15 @@ void InstallGetterSetter(Isolate* isolate, Handle<JSObject> object,
Utils::ToLocal(setter_func), attributes);
}
+// Assigns a dummy instance template to the given constructor function. Used to
+// make sure the implicit receivers for the constructors in this file have an
+// instance type different from the internal one, they allocate the resulting
+// object explicitly and ignore implicit receiver.
+void SetDummyInstanceTemplate(Isolate* isolate, Handle<JSFunction> fun) {
+ Handle<ObjectTemplateInfo> instance_template = NewObjectTemplate(isolate);
+ fun->shared()->get_api_func_data()->set_instance_template(*instance_template);
+}
+
void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<JSGlobalObject> global = isolate->global_object();
Handle<Context> context(global->native_context(), isolate);
@@ -1394,7 +1497,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
InstallFunc(isolate, webassembly, "validate", WebAssemblyValidate, 1);
InstallFunc(isolate, webassembly, "instantiate", WebAssemblyInstantiate, 1);
- if (isolate->wasm_compile_streaming_callback() != nullptr) {
+ if (isolate->wasm_streaming_callback() != nullptr) {
InstallFunc(isolate, webassembly, "compileStreaming",
WebAssemblyCompileStreaming, 1);
InstallFunc(isolate, webassembly, "instantiateStreaming",
@@ -1410,6 +1513,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<JSFunction> module_constructor =
InstallFunc(isolate, webassembly, "Module", WebAssemblyModule, 1);
context->set_wasm_module_constructor(*module_constructor);
+ SetDummyInstanceTemplate(isolate, module_constructor);
JSFunction::EnsureHasInitialMap(module_constructor);
Handle<JSObject> module_proto(
JSObject::cast(module_constructor->instance_prototype()), isolate);
@@ -1429,6 +1533,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<JSFunction> instance_constructor =
InstallFunc(isolate, webassembly, "Instance", WebAssemblyInstance, 1);
context->set_wasm_instance_constructor(*instance_constructor);
+ SetDummyInstanceTemplate(isolate, instance_constructor);
JSFunction::EnsureHasInitialMap(instance_constructor);
Handle<JSObject> instance_proto(
JSObject::cast(instance_constructor->instance_prototype()), isolate);
@@ -1445,6 +1550,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<JSFunction> table_constructor =
InstallFunc(isolate, webassembly, "Table", WebAssemblyTable, 1);
context->set_wasm_table_constructor(*table_constructor);
+ SetDummyInstanceTemplate(isolate, table_constructor);
JSFunction::EnsureHasInitialMap(table_constructor);
Handle<JSObject> table_proto(
JSObject::cast(table_constructor->instance_prototype()), isolate);
@@ -1462,6 +1568,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<JSFunction> memory_constructor =
InstallFunc(isolate, webassembly, "Memory", WebAssemblyMemory, 1);
context->set_wasm_memory_constructor(*memory_constructor);
+ SetDummyInstanceTemplate(isolate, memory_constructor);
JSFunction::EnsureHasInitialMap(memory_constructor);
Handle<JSObject> memory_proto(
JSObject::cast(memory_constructor->instance_prototype()), isolate);
@@ -1474,10 +1581,15 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
v8_str(isolate, "WebAssembly.Memory"), ro_attributes);
// Setup Global
- if (i::FLAG_experimental_wasm_mut_global) {
+
+ // The context is not set up completely yet. That's why we cannot use
+ // {WasmFeaturesFromIsolate} and have to use {WasmFeaturesFromFlags} instead.
+ auto enabled_features = i::wasm::WasmFeaturesFromFlags();
+ if (enabled_features.mut_global) {
Handle<JSFunction> global_constructor =
InstallFunc(isolate, webassembly, "Global", WebAssemblyGlobal, 1);
context->set_wasm_global_constructor(*global_constructor);
+ SetDummyInstanceTemplate(isolate, global_constructor);
JSFunction::EnsureHasInitialMap(global_constructor);
Handle<JSObject> global_proto(
JSObject::cast(global_constructor->instance_prototype()), isolate);
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index c1011c3f89..db99313e07 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -15,6 +15,8 @@ namespace v8 {
namespace internal {
namespace wasm {
+constexpr size_t kSpecMaxWasmMemoryPages = 65536;
+
// The following limits are imposed by V8 on WebAssembly modules.
// The limits are agreed upon with other engines for consistency.
constexpr size_t kV8MaxWasmTypes = 1000000;
@@ -26,9 +28,7 @@ constexpr size_t kV8MaxWasmExceptions = 1000000;
constexpr size_t kV8MaxWasmExceptionTypes = 1000000;
constexpr size_t kV8MaxWasmDataSegments = 100000;
// Don't use this limit directly, but use the value of FLAG_wasm_max_mem_pages.
-// Current limit mimics the maximum allowed allocation on an ArrayBuffer
-// (2GiB - 1 page).
-constexpr size_t kV8MaxWasmMemoryPages = 32767; // ~ 2 GiB
+constexpr size_t kV8MaxWasmMemoryPages = 32767; // = ~ 2 GiB
constexpr size_t kV8MaxWasmStringSize = 100000;
constexpr size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB
constexpr size_t kV8MaxWasmFunctionSize = 7654321;
@@ -42,14 +42,15 @@ constexpr size_t kV8MaxWasmTableEntries = 10000000;
constexpr size_t kV8MaxWasmTables = 1;
constexpr size_t kV8MaxWasmMemories = 1;
-constexpr size_t kSpecMaxWasmMemoryPages = 65536;
static_assert(kV8MaxWasmMemoryPages <= kSpecMaxWasmMemoryPages,
"v8 should not be more permissive than the spec");
constexpr size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu;
-constexpr size_t kV8MaxWasmMemoryBytes = kV8MaxWasmMemoryPages * kWasmPageSize;
-static_assert(kV8MaxWasmMemoryBytes <= std::numeric_limits<int32_t>::max(),
- "max memory bytes should fit in int32_t");
+constexpr uint64_t kV8MaxWasmMemoryBytes =
+ kV8MaxWasmMemoryPages * uint64_t{kWasmPageSize};
+
+constexpr uint64_t kSpecMaxWasmMemoryBytes =
+ kSpecMaxWasmMemoryPages * uint64_t{kWasmPageSize};
constexpr uint64_t kWasmMaxHeapOffset =
static_cast<uint64_t>(
diff --git a/deps/v8/src/wasm/wasm-linkage.h b/deps/v8/src/wasm/wasm-linkage.h
index 65fc0a2600..92390cc556 100644
--- a/deps/v8/src/wasm/wasm-linkage.h
+++ b/deps/v8/src/wasm/wasm-linkage.h
@@ -124,17 +124,85 @@ class LinkageAllocator {
const DoubleRegister* fp, int fpc)
: gp_count_(gpc), gp_regs_(gp), fp_count_(fpc), fp_regs_(fp) {}
- bool has_more_gp_regs() const { return gp_offset_ < gp_count_; }
- bool has_more_fp_regs() const { return fp_offset_ < fp_count_; }
+ bool CanAllocateGP() const { return gp_offset_ < gp_count_; }
+ bool CanAllocateFP(MachineRepresentation rep) const {
+#if V8_TARGET_ARCH_ARM
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ return extra_float_reg >= 0 || fp_offset_ < fp_count_;
+ case MachineRepresentation::kFloat64:
+ return extra_double_reg >= 0 || fp_offset_ < fp_count_;
+ case MachineRepresentation::kSimd128:
+ return ((fp_offset_ + 1) & ~1) + 1 < fp_count_;
+ default:
+ UNREACHABLE();
+ return false;
+ }
+#endif
+ return fp_offset_ < fp_count_;
+ }
- Register NextGpReg() {
+ int NextGpReg() {
DCHECK_LT(gp_offset_, gp_count_);
- return gp_regs_[gp_offset_++];
+ return gp_regs_[gp_offset_++].code();
}
- DoubleRegister NextFpReg() {
+ int NextFpReg(MachineRepresentation rep) {
+#if V8_TARGET_ARCH_ARM
+ switch (rep) {
+ case MachineRepresentation::kFloat32: {
+ // Use the extra S-register if we can.
+ if (extra_float_reg >= 0) {
+ int reg_code = extra_float_reg;
+ extra_float_reg = -1;
+ return reg_code;
+ }
+ // Allocate a D-register and split into 2 float registers.
+ int d_reg_code = NextFpReg(MachineRepresentation::kFloat64);
+ DCHECK_GT(16, d_reg_code); // D-registers 16 - 31 can't split.
+ int reg_code = d_reg_code * 2;
+ // Save the extra S-register.
+ DCHECK_EQ(-1, extra_float_reg);
+ extra_float_reg = reg_code + 1;
+ return reg_code;
+ }
+ case MachineRepresentation::kFloat64: {
+ // Use an extra D-register if we can.
+ if (extra_double_reg >= 0) {
+ int reg_code = extra_double_reg;
+ extra_double_reg = -1;
+ return reg_code;
+ }
+ DCHECK_LT(fp_offset_, fp_count_);
+ return fp_regs_[fp_offset_++].code();
+ }
+ case MachineRepresentation::kSimd128: {
+ // Q-register must be an even-odd pair, so we must try to allocate at
+ // the end, not using extra_double_reg. If we are at an odd D-register,
+ // skip past it (saving it to extra_double_reg).
+ DCHECK_LT(((fp_offset_ + 1) & ~1) + 1, fp_count_);
+ int d_reg1_code = fp_regs_[fp_offset_++].code();
+ if (d_reg1_code % 2 != 0) {
+ // If we're misaligned then extra_double_reg must have been consumed.
+ DCHECK_EQ(-1, extra_double_reg);
+ int odd_double_reg = d_reg1_code;
+ d_reg1_code = fp_regs_[fp_offset_++].code();
+ extra_double_reg = odd_double_reg;
+ }
+ // Combine the current D-register with the next to form a Q-register.
+ int d_reg2_code = fp_regs_[fp_offset_++].code();
+ DCHECK_EQ(0, d_reg1_code % 2);
+ DCHECK_EQ(d_reg1_code + 1, d_reg2_code);
+ USE(d_reg2_code);
+ return d_reg1_code / 2;
+ }
+ default:
+ UNREACHABLE();
+ }
+#else
DCHECK_LT(fp_offset_, fp_count_);
- return fp_regs_[fp_offset_++];
+ return fp_regs_[fp_offset_++].code();
+#endif
}
// Stackslots are counted upwards starting from 0 (or the offset set by
@@ -172,6 +240,14 @@ class LinkageAllocator {
int fp_offset_ = 0;
const DoubleRegister* const fp_regs_;
+#if V8_TARGET_ARCH_ARM
+ // ARM FP register aliasing may require splitting or merging double registers.
+ // Track fragments of registers below fp_offset_ here. There can only be one
+ // extra float and double register.
+ int extra_float_reg = -1;
+ int extra_double_reg = -1;
+#endif
+
int stack_offset_ = 0;
};
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index bda06e42cd..f7cc70a9e7 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -4,10 +4,12 @@
#include <limits>
-#include "src/wasm/wasm-memory.h"
+#include "src/heap/heap-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
+#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module.h"
namespace v8 {
@@ -18,6 +20,12 @@ namespace {
constexpr size_t kNegativeGuardSize = 1u << 31; // 2GiB
+void AddAllocationStatusSample(Isolate* isolate,
+ WasmMemoryTracker::AllocationStatus status) {
+ isolate->counters()->wasm_memory_allocation_result()->AddSample(
+ static_cast<int>(status));
+}
+
void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
size_t size, bool require_full_guard_regions,
void** allocation_base,
@@ -31,6 +39,7 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
//
// To protect against 32-bit integer overflow issues, we also protect the 2GiB
// before the valid part of the memory buffer.
+ // TODO(7881): do not use static_cast<uint32_t>() here
*allocation_length =
require_full_guard_regions
? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
@@ -43,37 +52,45 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
// Let the WasmMemoryTracker know we are going to reserve a bunch of
// address space.
// Try up to three times; getting rid of dead JSArrayBuffer allocations might
- // require two GCs.
- // TODO(gc): Fix this to only require one GC (crbug.com/v8/7621).
+ // require two GCs because the first GC maybe incremental and may have
+ // floating garbage.
+ static constexpr int kAllocationRetries = 2;
bool did_retry = false;
for (int trial = 0;; ++trial) {
if (memory_tracker->ReserveAddressSpace(*allocation_length)) break;
- // Collect garbage and retry.
- heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
did_retry = true;
// After first and second GC: retry.
- if (trial < 2) continue;
- // We are over the address space limit. Fail.
- //
- // When running under the correctness fuzzer (i.e.
- // --abort-on-stack-or-string-length-overflow is preset), we crash instead
- // so it is not incorrectly reported as a correctness violation. See
- // https://crbug.com/828293#c4
- if (FLAG_abort_on_stack_or_string_length_overflow) {
- FATAL("could not allocate wasm memory");
+ if (trial == kAllocationRetries) {
+ // We are over the address space limit. Fail.
+ //
+ // When running under the correctness fuzzer (i.e.
+ // --abort-on-stack-or-string-length-overflow is preset), we crash instead
+ // so it is not incorrectly reported as a correctness violation. See
+ // https://crbug.com/828293#c4
+ if (FLAG_abort_on_stack_or_string_length_overflow) {
+ FATAL("could not allocate wasm memory");
+ }
+ AddAllocationStatusSample(
+ heap->isolate(), AllocationStatus::kAddressSpaceLimitReachedFailure);
+ return nullptr;
}
- memory_tracker->AddAllocationStatusSample(
- AllocationStatus::kAddressSpaceLimitReachedFailure);
- return nullptr;
+ // Collect garbage and retry.
+ heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
}
// The Reserve makes the whole region inaccessible by default.
- *allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
- PageAllocator::kNoAccess);
- if (*allocation_base == nullptr) {
- memory_tracker->ReleaseReservation(*allocation_length);
- memory_tracker->AddAllocationStatusSample(AllocationStatus::kOtherFailure);
- return nullptr;
+ DCHECK_NULL(*allocation_base);
+ for (int trial = 0;; ++trial) {
+ *allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
+ PageAllocator::kNoAccess);
+ if (*allocation_base != nullptr) break;
+ if (trial == kAllocationRetries) {
+ memory_tracker->ReleaseReservation(*allocation_length);
+ AddAllocationStatusSample(heap->isolate(),
+ AllocationStatus::kOtherFailure);
+ return nullptr;
+ }
+ heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
}
byte* memory = reinterpret_cast<byte*>(*allocation_base);
if (require_full_guard_regions) {
@@ -91,11 +108,11 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
}
}
- memory_tracker->RegisterAllocation(*allocation_base, *allocation_length,
- memory, size);
- memory_tracker->AddAllocationStatusSample(
- did_retry ? AllocationStatus::kSuccessAfterRetry
- : AllocationStatus::kSuccess);
+ memory_tracker->RegisterAllocation(heap->isolate(), *allocation_base,
+ *allocation_length, memory, size);
+ AddAllocationStatusSample(heap->isolate(),
+ did_retry ? AllocationStatus::kSuccessAfterRetry
+ : AllocationStatus::kSuccess);
return memory;
}
} // namespace
@@ -118,17 +135,21 @@ bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
#elif V8_TARGET_ARCH_64_BIT
// We set the limit to 1 TiB + 4 GiB so that there is room for mini-guards
// once we fill everything up with full-sized guard regions.
- constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4GiB
+ constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
#else
- constexpr size_t kAddressSpaceLimit = 0x80000000; // 2 GiB
+ constexpr size_t kAddressSpaceLimit = 0x90000000; // 2 GiB + 256 MiB
#endif
- size_t const old_count = reserved_address_space_.fetch_add(num_bytes);
- DCHECK_GE(old_count + num_bytes, old_count);
- if (old_count + num_bytes <= kAddressSpaceLimit) {
- return true;
- }
- reserved_address_space_ -= num_bytes;
+ int retries = 5; // cmpxchng can fail, retry some number of times.
+ do {
+ size_t old_count = reserved_address_space_;
+ if ((kAddressSpaceLimit - old_count) < num_bytes) return false;
+ if (reserved_address_space_.compare_exchange_weak(old_count,
+ old_count + num_bytes)) {
+ return true;
+ }
+ } while (retries-- > 0);
+
return false;
}
@@ -138,14 +159,15 @@ void WasmMemoryTracker::ReleaseReservation(size_t num_bytes) {
DCHECK_LE(num_bytes, old_reserved);
}
-void WasmMemoryTracker::RegisterAllocation(void* allocation_base,
+void WasmMemoryTracker::RegisterAllocation(Isolate* isolate,
+ void* allocation_base,
size_t allocation_length,
void* buffer_start,
size_t buffer_length) {
base::LockGuard<base::Mutex> scope_lock(&mutex_);
allocated_address_space_ += allocation_length;
- AddAddressSpaceSample();
+ AddAddressSpaceSample(isolate);
allocations_.emplace(buffer_start,
AllocationData{allocation_base, allocation_length,
@@ -153,12 +175,7 @@ void WasmMemoryTracker::RegisterAllocation(void* allocation_base,
}
WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
- const void* buffer_start) {
- return InternalReleaseAllocation(buffer_start);
-}
-
-WasmMemoryTracker::AllocationData WasmMemoryTracker::InternalReleaseAllocation(
- const void* buffer_start) {
+ Isolate* isolate, const void* buffer_start) {
base::LockGuard<base::Mutex> scope_lock(&mutex_);
auto find_result = allocations_.find(buffer_start);
@@ -170,7 +187,10 @@ WasmMemoryTracker::AllocationData WasmMemoryTracker::InternalReleaseAllocation(
DCHECK_LE(num_bytes, allocated_address_space_);
reserved_address_space_ -= num_bytes;
allocated_address_space_ -= num_bytes;
- AddAddressSpaceSample();
+ // ReleaseAllocation might be called with a nullptr as isolate if the
+ // embedder is releasing the allocation and not a specific isolate. This
+ // happens if the allocation was shared between multiple isolates (threads).
+ if (isolate) AddAddressSpaceSample(isolate);
AllocationData allocation_data = find_result->second;
allocations_.erase(find_result);
@@ -209,28 +229,21 @@ bool WasmMemoryTracker::HasFullGuardRegions(const void* buffer_start) {
return start + kWasmMaxHeapOffset < limit;
}
-bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(const void* buffer_start) {
+bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(Isolate* isolate,
+ const void* buffer_start) {
if (IsWasmMemory(buffer_start)) {
- const AllocationData allocation = ReleaseAllocation(buffer_start);
+ const AllocationData allocation = ReleaseAllocation(isolate, buffer_start);
CHECK(FreePages(allocation.allocation_base, allocation.allocation_length));
return true;
}
return false;
}
-void WasmMemoryTracker::AddAllocationStatusSample(AllocationStatus status) {
- if (allocation_result_) {
- allocation_result_->AddSample(static_cast<int>(status));
- }
-}
-
-void WasmMemoryTracker::AddAddressSpaceSample() {
- if (address_space_usage_mb_) {
- // Report address space usage in MiB so the full range fits in an int on all
- // platforms.
- address_space_usage_mb_->AddSample(
- static_cast<int>(allocated_address_space_ >> 20));
- }
+void WasmMemoryTracker::AddAddressSpaceSample(Isolate* isolate) {
+ // Report address space usage in MiB so the full range fits in an int on all
+ // platforms.
+ isolate->counters()->wasm_address_space_usage_mb()->AddSample(
+ static_cast<int>(allocated_address_space_ >> 20));
}
Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
@@ -238,11 +251,9 @@ Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
SharedFlag shared) {
Handle<JSArrayBuffer> buffer =
isolate->factory()->NewJSArrayBuffer(shared, TENURED);
- DCHECK_GE(kMaxInt, size);
- if (shared == SharedFlag::kShared) DCHECK(FLAG_experimental_wasm_threads);
constexpr bool is_wasm_memory = true;
- JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store,
- static_cast<int>(size), shared, is_wasm_memory);
+ JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store, size,
+ shared, is_wasm_memory);
buffer->set_is_neuterable(false);
buffer->set_is_growable(true);
return buffer;
@@ -250,13 +261,10 @@ Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
SharedFlag shared) {
- // Check against kMaxInt, since the byte length is stored as int in the
- // JSArrayBuffer. Note that wasm_max_mem_pages can be raised from the command
- // line, and we don't want to fail a CHECK then.
- if (size > FLAG_wasm_max_mem_pages * kWasmPageSize || size > kMaxInt) {
- // TODO(titzer): lift restriction on maximum memory allocated here.
- return {};
- }
+ // Enforce engine-limited maximum allocation size.
+ if (size > kV8MaxWasmMemoryBytes) return {};
+ // Enforce flag-limited maximum allocation size.
+ if (size > (FLAG_wasm_max_mem_pages * uint64_t{kWasmPageSize})) return {};
WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker();
diff --git a/deps/v8/src/wasm/wasm-memory.h b/deps/v8/src/wasm/wasm-memory.h
index 2ab24739a8..d95f7a88c8 100644
--- a/deps/v8/src/wasm/wasm-memory.h
+++ b/deps/v8/src/wasm/wasm-memory.h
@@ -11,28 +11,30 @@
#include "src/base/platform/mutex.h"
#include "src/flags.h"
#include "src/handles.h"
-#include "src/objects/js-array.h"
+#include "src/objects/js-array-buffer.h"
namespace v8 {
namespace internal {
-
-class Histogram; // defined in counters.h
-
namespace wasm {
+// The {WasmMemoryTracker} tracks reservations and allocations for wasm memory
+// and wasm code. There is an upper limit on the total reserved memory which is
+// checked by this class. Allocations are stored so we can look them up when an
+// array buffer dies and figure out the reservation and allocation bounds for
+// that buffer.
class WasmMemoryTracker {
public:
WasmMemoryTracker() {}
- ~WasmMemoryTracker();
+ V8_EXPORT_PRIVATE ~WasmMemoryTracker();
// ReserveAddressSpace attempts to increase the reserved address space counter
- // to determine whether there is enough headroom to allocate another guarded
- // Wasm memory. Returns true if successful (meaning it is okay to go ahead and
- // allocate the buffer), false otherwise.
+ // by {num_bytes}. Returns true if successful (meaning it is okay to go ahead
+ // and reserve {num_bytes} bytes), false otherwise.
bool ReserveAddressSpace(size_t num_bytes);
- void RegisterAllocation(void* allocation_base, size_t allocation_length,
- void* buffer_start, size_t buffer_length);
+ void RegisterAllocation(Isolate* isolate, void* allocation_base,
+ size_t allocation_length, void* buffer_start,
+ size_t buffer_length);
struct AllocationData {
void* allocation_base = nullptr;
@@ -61,11 +63,11 @@ class WasmMemoryTracker {
friend WasmMemoryTracker;
};
- // Decreases the amount of reserved address space
+ // Decreases the amount of reserved address space.
void ReleaseReservation(size_t num_bytes);
- // Removes an allocation from the tracker
- AllocationData ReleaseAllocation(const void* buffer_start);
+ // Removes an allocation from the tracker.
+ AllocationData ReleaseAllocation(Isolate* isolate, const void* buffer_start);
bool IsWasmMemory(const void* buffer_start);
@@ -80,14 +82,7 @@ class WasmMemoryTracker {
// Checks if a buffer points to a Wasm memory and if so does any necessary
// work to reclaim the buffer. If this function returns false, the caller must
// free the buffer manually.
- bool FreeMemoryIfIsWasmMemory(const void* buffer_start);
-
- void SetAllocationResultHistogram(Histogram* allocation_result) {
- allocation_result_ = allocation_result;
- }
- void SetAddressSpaceUsageHistogram(Histogram* address_space_usage) {
- address_space_usage_mb_ = address_space_usage;
- }
+ bool FreeMemoryIfIsWasmMemory(Isolate* isolate, const void* buffer_start);
// Allocation results are reported to UMA
//
@@ -103,11 +98,8 @@ class WasmMemoryTracker {
kOtherFailure // Failed for an unknown reason
};
- void AddAllocationStatusSample(AllocationStatus status);
-
private:
- AllocationData InternalReleaseAllocation(const void* buffer_start);
- void AddAddressSpaceSample();
+ void AddAddressSpaceSample(Isolate* isolate);
// Clients use a two-part process. First they "reserve" the address space,
// which signifies an intent to actually allocate it. This determines whether
@@ -116,23 +108,19 @@ class WasmMemoryTracker {
//
// We should always have:
// allocated_address_space_ <= reserved_address_space_ <= kAddressSpaceLimit
- std::atomic_size_t reserved_address_space_{0};
+ std::atomic<size_t> reserved_address_space_{0};
// Used to protect access to the allocated address space counter and
// allocation map. This is needed because Wasm memories can be freed on
// another thread by the ArrayBufferTracker.
base::Mutex mutex_;
- size_t allocated_address_space_{0};
+ size_t allocated_address_space_ = 0;
// Track Wasm memory allocation information. This is keyed by the start of the
// buffer, rather than by the start of the allocation.
std::unordered_map<const void*, AllocationData> allocations_;
- // Keep pointers to
- Histogram* allocation_result_ = nullptr;
- Histogram* address_space_usage_mb_ = nullptr; // in MiB
-
DISALLOW_COPY_AND_ASSIGN(WasmMemoryTracker);
};
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index db70502886..131cda747c 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -8,6 +8,7 @@
#include "src/signature.h"
#include "src/zone/zone-containers.h"
+#include "src/v8memory.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/local-decl-encoder.h"
#include "src/wasm/wasm-opcodes.h"
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index bd23345870..ab603bfb3a 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -5,12 +5,13 @@
#include <functional>
#include <memory>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/assembler-inl.h"
#include "src/compiler/wasm-compiler.h"
#include "src/debug/interface-types.h"
#include "src/frames-inl.h"
#include "src/objects.h"
+#include "src/objects/js-array-inl.h"
#include "src/property-descriptor.h"
#include "src/simulator.h"
#include "src/snapshot/snapshot.h"
@@ -39,8 +40,8 @@ WireBytesRef WasmModule::LookupFunctionName(const ModuleWireBytes& wire_bytes,
uint32_t function_index) const {
if (!function_names) {
function_names.reset(new std::unordered_map<uint32_t, WireBytesRef>());
- wasm::DecodeFunctionNames(wire_bytes.start(), wire_bytes.end(),
- function_names.get());
+ DecodeFunctionNames(wire_bytes.start(), wire_bytes.end(),
+ function_names.get());
}
auto it = function_names->find(function_index);
if (it == function_names->end()) return WireBytesRef();
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 3020548927..b1020661ab 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -114,9 +114,9 @@ struct WasmExport {
enum ModuleOrigin : uint8_t { kWasmOrigin, kAsmJsOrigin };
-#define SELECT_WASM_COUNTER(counters, origin, prefix, suffix) \
- ((origin) == wasm::kWasmOrigin ? (counters)->prefix##_wasm_##suffix() \
- : (counters)->prefix##_asm_##suffix())
+#define SELECT_WASM_COUNTER(counters, origin, prefix, suffix) \
+ ((origin) == kWasmOrigin ? (counters)->prefix##_wasm_##suffix() \
+ : (counters)->prefix##_asm_##suffix())
struct ModuleWireBytes;
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 96bb622afc..481d2274bf 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -9,9 +9,10 @@
#include "src/contexts-inl.h"
#include "src/heap/heap-inl.h"
-#include "src/objects/js-array-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/managed.h"
#include "src/v8memory.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-module.h"
// Has to be the last include (doesn't have include guards)
@@ -85,8 +86,7 @@ ACCESSORS(WasmTableObject, dispatch_tables, FixedArray, kDispatchTablesOffset)
// WasmMemoryObject
ACCESSORS(WasmMemoryObject, array_buffer, JSArrayBuffer, kArrayBufferOffset)
SMI_ACCESSORS(WasmMemoryObject, maximum_pages, kMaximumPagesOffset)
-OPTIONAL_ACCESSORS(WasmMemoryObject, instances, FixedArrayOfWeakCells,
- kInstancesOffset)
+OPTIONAL_ACCESSORS(WasmMemoryObject, instances, WeakArrayList, kInstancesOffset)
// WasmGlobalObject
ACCESSORS(WasmGlobalObject, array_buffer, JSArrayBuffer, kArrayBufferOffset)
@@ -108,36 +108,42 @@ Address WasmGlobalObject::address() const {
return Address(array_buffer()->backing_store()) + offset();
}
-int32_t WasmGlobalObject::GetI32() { return Memory::int32_at(address()); }
+int32_t WasmGlobalObject::GetI32() {
+ return ReadLittleEndianValue<int32_t>(address());
+}
-int64_t WasmGlobalObject::GetI64() { return Memory::int64_at(address()); }
+int64_t WasmGlobalObject::GetI64() {
+ return ReadLittleEndianValue<int64_t>(address());
+}
-float WasmGlobalObject::GetF32() { return Memory::float_at(address()); }
+float WasmGlobalObject::GetF32() {
+ return ReadLittleEndianValue<float>(address());
+}
-double WasmGlobalObject::GetF64() { return Memory::double_at(address()); }
+double WasmGlobalObject::GetF64() {
+ return ReadLittleEndianValue<double>(address());
+}
void WasmGlobalObject::SetI32(int32_t value) {
- Memory::int32_at(address()) = value;
+ WriteLittleEndianValue<int32_t>(address(), value);
}
void WasmGlobalObject::SetI64(int64_t value) {
- Memory::int64_at(address()) = value;
+ WriteLittleEndianValue<int64_t>(address(), value);
}
void WasmGlobalObject::SetF32(float value) {
- Memory::float_at(address()) = value;
+ WriteLittleEndianValue<float>(address(), value);
}
void WasmGlobalObject::SetF64(double value) {
- Memory::double_at(address()) = value;
+ WriteLittleEndianValue<double>(address(), value);
}
// WasmInstanceObject
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_start, byte*, kMemoryStartOffset)
-PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_size, uint32_t,
- kMemorySizeOffset)
-PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_mask, uint32_t,
- kMemoryMaskOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_size, size_t, kMemorySizeOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_mask, size_t, kMemoryMaskOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, roots_array_address, Address,
kRootsArrayAddressOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, stack_limit_address, Address,
@@ -156,8 +162,8 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_sig_ids,
uint32_t*, kIndirectFunctionTableSigIdsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_targets,
Address*, kIndirectFunctionTableTargetsOffset)
-PRIMITIVE_ACCESSORS(WasmInstanceObject, jump_table_adjusted_start, Address,
- kJumpTableAdjustedStartOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, jump_table_start, Address,
+ kJumpTableStartOffset)
ACCESSORS(WasmInstanceObject, module_object, WasmModuleObject,
kModuleObjectOffset)
@@ -207,6 +213,8 @@ ImportedFunctionEntry::ImportedFunctionEntry(
ACCESSORS(WasmExportedFunctionData, wrapper_code, Code, kWrapperCodeOffset)
ACCESSORS(WasmExportedFunctionData, instance, WasmInstanceObject,
kInstanceOffset)
+SMI_ACCESSORS(WasmExportedFunctionData, jump_table_offset,
+ kJumpTableOffsetOffset)
SMI_ACCESSORS(WasmExportedFunctionData, function_index, kFunctionIndexOffset)
// WasmDebugInfo
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 7cd2fecb7f..4cd66a81c5 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -12,6 +12,7 @@
#include "src/debug/debug-interface.h"
#include "src/objects-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/shared-function-info.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/module-compiler.h"
@@ -177,9 +178,10 @@ enum DispatchTableElements : int {
// static
Handle<WasmModuleObject> WasmModuleObject::New(
- Isolate* isolate, std::shared_ptr<const wasm::WasmModule> shared_module,
- wasm::ModuleEnv& env, OwnedVector<const uint8_t> wire_bytes,
- Handle<Script> script, Handle<ByteArray> asm_js_offset_table) {
+ Isolate* isolate, const wasm::WasmFeatures& enabled,
+ std::shared_ptr<const wasm::WasmModule> shared_module, wasm::ModuleEnv& env,
+ OwnedVector<const uint8_t> wire_bytes, Handle<Script> script,
+ Handle<ByteArray> asm_js_offset_table) {
DCHECK_EQ(shared_module.get(), env.module);
// Create a new {NativeModule} first.
@@ -187,7 +189,7 @@ Handle<WasmModuleObject> WasmModuleObject::New(
isolate->wasm_engine()->code_manager()->EstimateNativeModuleSize(
env.module);
auto native_module = isolate->wasm_engine()->code_manager()->NewNativeModule(
- isolate, native_memory_estimate,
+ isolate, enabled, native_memory_estimate,
wasm::NativeModule::kCanAllocateMoreMemory, std::move(shared_module),
env);
native_module->set_wire_bytes(std::move(wire_bytes));
@@ -210,15 +212,6 @@ Handle<WasmModuleObject> WasmModuleObject::New(
static_cast<int>(native_module->module()->num_exported_functions);
Handle<FixedArray> export_wrappers =
isolate->factory()->NewFixedArray(export_wrapper_size, TENURED);
- Handle<WasmModuleObject> module_object = Handle<WasmModuleObject>::cast(
- isolate->factory()->NewJSObject(isolate->wasm_module_constructor()));
- module_object->set_export_wrappers(*export_wrappers);
- if (script->type() == Script::TYPE_WASM) {
- script->set_wasm_module_object(*module_object);
- }
- module_object->set_script(*script);
- module_object->set_weak_instance_list(
- ReadOnlyRoots(isolate).empty_weak_array_list());
// Use the given shared {NativeModule}, but increase its reference count by
// allocating a new {Managed<T>} that the {WasmModuleObject} references.
@@ -230,6 +223,16 @@ Handle<WasmModuleObject> WasmModuleObject::New(
Handle<Managed<wasm::NativeModule>> managed_native_module =
Managed<wasm::NativeModule>::FromSharedPtr(isolate, memory_estimate,
std::move(native_module));
+
+ Handle<WasmModuleObject> module_object = Handle<WasmModuleObject>::cast(
+ isolate->factory()->NewJSObject(isolate->wasm_module_constructor()));
+ module_object->set_export_wrappers(*export_wrappers);
+ if (script->type() == Script::TYPE_WASM) {
+ script->set_wasm_module_object(*module_object);
+ }
+ module_object->set_script(*script);
+ module_object->set_weak_instance_list(
+ ReadOnlyRoots(isolate).empty_weak_array_list());
module_object->set_managed_native_module(*managed_native_module);
return module_object;
}
@@ -674,7 +677,11 @@ Handle<String> WasmModuleObject::GetFunctionName(
MaybeHandle<String> name =
GetFunctionNameOrNull(isolate, module_object, func_index);
if (!name.is_null()) return name.ToHandleChecked();
- return isolate->factory()->NewStringFromStaticChars("<WASM UNNAMED>");
+ EmbeddedVector<char, 32> buffer;
+ int length = SNPrintF(buffer, "wasm-function[%u]", func_index);
+ return isolate->factory()
+ ->NewStringFromOneByte(Vector<uint8_t>::cast(buffer.SubVector(0, length)))
+ .ToHandleChecked();
}
Vector<const uint8_t> WasmModuleObject::GetRawFunctionName(
@@ -874,19 +881,19 @@ MaybeHandle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
uint32_t maximum_pages) {
if (!old_buffer->is_growable()) return {};
void* old_mem_start = old_buffer->backing_store();
- uint32_t old_size = 0;
- CHECK(old_buffer->byte_length()->ToUint32(&old_size));
- DCHECK_EQ(0, old_size % wasm::kWasmPageSize);
- uint32_t old_pages = old_size / wasm::kWasmPageSize;
- DCHECK_GE(std::numeric_limits<uint32_t>::max(),
- old_size + pages * wasm::kWasmPageSize);
- if (old_pages > maximum_pages || pages > maximum_pages - old_pages) return {};
- size_t new_size =
- static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
- if (new_size > FLAG_wasm_max_mem_pages * wasm::kWasmPageSize ||
- new_size > kMaxInt) {
+ size_t old_size = old_buffer->byte_length()->Number();
+ CHECK_GE(wasm::kV8MaxWasmMemoryBytes, old_size);
+ CHECK_EQ(0, old_size % wasm::kWasmPageSize);
+ size_t old_pages = old_size / wasm::kWasmPageSize;
+ if (old_pages > maximum_pages || // already reached maximum
+ (pages > maximum_pages - old_pages) || // exceeds remaining
+ (pages > FLAG_wasm_max_mem_pages - old_pages)) { // exceeds limit
return {};
}
+ size_t new_size =
+ static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
+ CHECK_GE(wasm::kV8MaxWasmMemoryBytes, new_size);
+
// Reusing the backing store from externalized buffers causes problems with
// Blink's array buffers. The connection between the two is lost, which can
// lead to Blink not knowing about the other reference to the buffer and
@@ -939,19 +946,22 @@ MaybeHandle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
}
// May GC, because SetSpecializationMemInfoFrom may GC
-void SetInstanceMemory(Isolate* isolate, Handle<WasmInstanceObject> instance,
+void SetInstanceMemory(Handle<WasmInstanceObject> instance,
Handle<JSArrayBuffer> buffer) {
instance->SetRawMemory(reinterpret_cast<byte*>(buffer->backing_store()),
buffer->byte_length()->Number());
#if DEBUG
- // To flush out bugs earlier, in DEBUG mode, check that all pages of the
- // memory are accessible by reading and writing one byte on each page.
- byte* mem_start = instance->memory_start();
- uintptr_t mem_size = instance->memory_size();
- for (uint32_t offset = 0; offset < mem_size; offset += wasm::kWasmPageSize) {
- byte val = mem_start[offset];
- USE(val);
- mem_start[offset] = val;
+ if (!FLAG_mock_arraybuffer_allocator) {
+ // To flush out bugs earlier, in DEBUG mode, check that all pages of the
+ // memory are accessible by reading and writing one byte on each page.
+ // Don't do this if the mock ArrayBuffer allocator is enabled.
+ byte* mem_start = instance->memory_start();
+ size_t mem_size = instance->memory_size();
+ for (size_t offset = 0; offset < mem_size; offset += wasm::kWasmPageSize) {
+ byte val = mem_start[offset];
+ USE(val);
+ mem_start[offset] = val;
+ }
}
#endif
}
@@ -971,14 +981,9 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(
isolate->factory()->NewJSObject(memory_ctor, TENURED));
Handle<JSArrayBuffer> buffer;
- if (maybe_buffer.is_null()) {
+ if (!maybe_buffer.ToHandle(&buffer)) {
// If no buffer was provided, create a 0-length one.
buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, false);
- } else {
- buffer = maybe_buffer.ToHandleChecked();
- // Paranoid check that the buffer size makes sense.
- uint32_t mem_size = 0;
- CHECK(buffer->byte_length()->ToUint32(&mem_size));
}
memory_obj->set_array_buffer(*buffer);
memory_obj->set_maximum_pages(maximum);
@@ -1019,22 +1024,22 @@ bool WasmMemoryObject::has_full_guard_region(Isolate* isolate) {
void WasmMemoryObject::AddInstance(Isolate* isolate,
Handle<WasmMemoryObject> memory,
Handle<WasmInstanceObject> instance) {
- Handle<FixedArrayOfWeakCells> old_instances =
+ Handle<WeakArrayList> old_instances =
memory->has_instances()
- ? Handle<FixedArrayOfWeakCells>(memory->instances(), isolate)
- : Handle<FixedArrayOfWeakCells>::null();
- Handle<FixedArrayOfWeakCells> new_instances =
- FixedArrayOfWeakCells::Add(isolate, old_instances, instance);
+ ? Handle<WeakArrayList>(memory->instances(), isolate)
+ : handle(ReadOnlyRoots(isolate->heap()).empty_weak_array_list(),
+ isolate);
+ Handle<WeakArrayList> new_instances = WeakArrayList::AddToEnd(
+ isolate, old_instances, MaybeObjectHandle::Weak(instance));
memory->set_instances(*new_instances);
Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate);
- SetInstanceMemory(isolate, instance, buffer);
+ SetInstanceMemory(instance, buffer);
}
-void WasmMemoryObject::RemoveInstance(Isolate* isolate,
- Handle<WasmMemoryObject> memory,
+void WasmMemoryObject::RemoveInstance(Handle<WasmMemoryObject> memory,
Handle<WasmInstanceObject> instance) {
if (memory->has_instances()) {
- memory->instances()->Remove(instance);
+ memory->instances()->RemoveOne(MaybeObjectHandle::Weak(instance));
}
}
@@ -1060,14 +1065,17 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
}
if (memory_object->has_instances()) {
- Handle<FixedArrayOfWeakCells> instances(memory_object->instances(),
- isolate);
- for (int i = 0; i < instances->Length(); i++) {
- Object* elem = instances->Get(i);
- if (!elem->IsWasmInstanceObject()) continue;
- Handle<WasmInstanceObject> instance(WasmInstanceObject::cast(elem),
- isolate);
- SetInstanceMemory(isolate, instance, new_buffer);
+ Handle<WeakArrayList> instances(memory_object->instances(), isolate);
+ for (int i = 0; i < instances->length(); i++) {
+ MaybeObject* elem = instances->Get(i);
+ HeapObject* heap_object;
+ if (elem->ToWeakHeapObject(&heap_object)) {
+ Handle<WasmInstanceObject> instance(
+ WasmInstanceObject::cast(heap_object), isolate);
+ SetInstanceMemory(instance, new_buffer);
+ } else {
+ DCHECK(elem->IsClearedWeakHeapObject());
+ }
}
}
memory_object->set_array_buffer(*new_buffer);
@@ -1194,14 +1202,25 @@ bool WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
return true;
}
-void WasmInstanceObject::SetRawMemory(byte* mem_start, uint32_t mem_size) {
- DCHECK_LE(mem_size, wasm::kV8MaxWasmMemoryPages * wasm::kWasmPageSize);
- uint32_t mem_size64 = mem_size;
- uint32_t mem_mask64 = base::bits::RoundUpToPowerOfTwo32(mem_size) - 1;
- DCHECK_LE(mem_size, mem_mask64 + 1);
+void WasmInstanceObject::SetRawMemory(byte* mem_start, size_t mem_size) {
+ CHECK_LE(mem_size, wasm::kV8MaxWasmMemoryBytes);
+#if V8_HOST_ARCH_64_BIT
+ uint64_t mem_mask64 = base::bits::RoundUpToPowerOfTwo64(mem_size) - 1;
set_memory_start(mem_start);
- set_memory_size(mem_size64);
+ set_memory_size(mem_size);
set_memory_mask(mem_mask64);
+#else
+ // Must handle memory > 2GiB specially.
+ CHECK_LE(mem_size, size_t{kMaxUInt32});
+ uint32_t mem_mask32 =
+ (mem_size > 2 * size_t{GB})
+ ? 0xFFFFFFFFu
+ : base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(mem_size)) -
+ 1;
+ set_memory_start(mem_start);
+ set_memory_size(mem_size);
+ set_memory_mask(mem_mask32);
+#endif
}
const WasmModule* WasmInstanceObject::module() {
@@ -1264,10 +1283,8 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
instance->set_module_object(*module_object);
instance->set_undefined_value(ReadOnlyRoots(isolate).undefined_value());
instance->set_null_value(ReadOnlyRoots(isolate).null_value());
- instance->set_jump_table_adjusted_start(
- module_object->native_module()->jump_table_start() -
- wasm::JumpTableAssembler::kJumpTableSlotSize *
- module->num_imported_functions);
+ instance->set_jump_table_start(
+ module_object->native_module()->jump_table_start());
// Insert the new instance into the modules weak list of instances.
// TODO(mstarzinger): Allow to reuse holes in the {WeakArrayList} below.
@@ -1298,8 +1315,7 @@ void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
// the next GC cycle, so we need to manually break some links (such as
// the weak references from {WasmMemoryObject::instances}.
if (instance->has_memory_object()) {
- WasmMemoryObject::RemoveInstance(isolate,
- handle(instance->memory_object(), isolate),
+ WasmMemoryObject::RemoveInstance(handle(instance->memory_object(), isolate),
handle(instance, isolate));
}
@@ -1353,11 +1369,21 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
MaybeHandle<String> maybe_name, int func_index, int arity,
Handle<Code> export_wrapper) {
DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
+ int num_imported_functions = instance->module()->num_imported_functions;
+ int jump_table_offset = -1;
+ if (func_index >= num_imported_functions) {
+ ptrdiff_t jump_table_diff =
+ instance->module_object()->native_module()->jump_table_offset(
+ func_index);
+ DCHECK(jump_table_diff >= 0 && jump_table_diff <= INT_MAX);
+ jump_table_offset = static_cast<int>(jump_table_diff);
+ }
Handle<WasmExportedFunctionData> function_data =
Handle<WasmExportedFunctionData>::cast(isolate->factory()->NewStruct(
WASM_EXPORTED_FUNCTION_DATA_TYPE, TENURED));
function_data->set_wrapper_code(*export_wrapper);
function_data->set_instance(*instance);
+ function_data->set_jump_table_offset(jump_table_offset);
function_data->set_function_index(func_index);
Handle<String> name;
if (!maybe_name.ToHandle(&name)) {
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index ee884ec0dd..a493f97e95 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -30,11 +30,11 @@ class SignatureMap;
class WireBytesRef;
class WasmInterpreter;
using FunctionSig = Signature<ValueType>;
+struct WasmFeatures;
} // namespace wasm
class BreakPoint;
class JSArrayBuffer;
-class FixedArrayOfWeakCells;
class SeqOneByteString;
class WasmDebugInfo;
class WasmInstanceObject;
@@ -134,9 +134,10 @@ class WasmModuleObject : public JSObject {
// Creates a new {WasmModuleObject} with a new {NativeModule} underneath.
static Handle<WasmModuleObject> New(
- Isolate* isolate, std::shared_ptr<const wasm::WasmModule> module,
- wasm::ModuleEnv& env, OwnedVector<const uint8_t> wire_bytes,
- Handle<Script> script, Handle<ByteArray> asm_js_offset_table);
+ Isolate* isolate, const wasm::WasmFeatures& enabled,
+ std::shared_ptr<const wasm::WasmModule> module, wasm::ModuleEnv& env,
+ OwnedVector<const uint8_t> wire_bytes, Handle<Script> script,
+ Handle<ByteArray> asm_js_offset_table);
// Creates a new {WasmModuleObject} for an existing {NativeModule} that is
// reference counted and might be shared between multiple Isolates.
@@ -174,8 +175,8 @@ class WasmModuleObject : public JSObject {
uint32_t func_index);
// Get the function name of the function identified by the given index.
- // Returns "<WASM UNNAMED>" if the function is unnamed or the name is not a
- // valid UTF-8 string.
+ // Returns "wasm-function[func_index]" if the function is unnamed or the
+ // name is not a valid UTF-8 string.
static Handle<String> GetFunctionName(Isolate*, Handle<WasmModuleObject>,
uint32_t func_index);
@@ -284,7 +285,7 @@ class WasmMemoryObject : public JSObject {
DECL_ACCESSORS(array_buffer, JSArrayBuffer)
DECL_INT_ACCESSORS(maximum_pages)
- DECL_OPTIONAL_ACCESSORS(instances, FixedArrayOfWeakCells)
+ DECL_OPTIONAL_ACCESSORS(instances, WeakArrayList)
// Layout description.
#define WASM_MEMORY_OBJECT_FIELDS(V) \
@@ -301,7 +302,7 @@ class WasmMemoryObject : public JSObject {
static void AddInstance(Isolate* isolate, Handle<WasmMemoryObject> memory,
Handle<WasmInstanceObject> object);
// Remove an instance from the internal (weak) list.
- static void RemoveInstance(Isolate* isolate, Handle<WasmMemoryObject> memory,
+ static void RemoveInstance(Handle<WasmMemoryObject> memory,
Handle<WasmInstanceObject> object);
uint32_t current_pages();
inline bool has_maximum_pages();
@@ -390,8 +391,8 @@ class WasmInstanceObject : public JSObject {
DECL_ACCESSORS(null_value, Oddball)
DECL_ACCESSORS(centry_stub, Code)
DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
- DECL_PRIMITIVE_ACCESSORS(memory_size, uint32_t)
- DECL_PRIMITIVE_ACCESSORS(memory_mask, uint32_t)
+ DECL_PRIMITIVE_ACCESSORS(memory_size, size_t)
+ DECL_PRIMITIVE_ACCESSORS(memory_mask, size_t)
DECL_PRIMITIVE_ACCESSORS(roots_array_address, Address)
DECL_PRIMITIVE_ACCESSORS(stack_limit_address, Address)
DECL_PRIMITIVE_ACCESSORS(real_stack_limit_address, Address)
@@ -401,7 +402,7 @@ class WasmInstanceObject : public JSObject {
DECL_PRIMITIVE_ACCESSORS(indirect_function_table_size, uint32_t)
DECL_PRIMITIVE_ACCESSORS(indirect_function_table_sig_ids, uint32_t*)
DECL_PRIMITIVE_ACCESSORS(indirect_function_table_targets, Address*)
- DECL_PRIMITIVE_ACCESSORS(jump_table_adjusted_start, Address)
+ DECL_PRIMITIVE_ACCESSORS(jump_table_start, Address)
// Dispatched behavior.
DECL_PRINTER(WasmInstanceObject)
@@ -426,8 +427,8 @@ class WasmInstanceObject : public JSObject {
V(kCEntryStubOffset, kPointerSize) \
V(kFirstUntaggedOffset, 0) /* marker */ \
V(kMemoryStartOffset, kPointerSize) /* untagged */ \
- V(kMemorySizeOffset, kUInt32Size) /* untagged */ \
- V(kMemoryMaskOffset, kUInt32Size) /* untagged */ \
+ V(kMemorySizeOffset, kSizetSize) /* untagged */ \
+ V(kMemoryMaskOffset, kSizetSize) /* untagged */ \
V(kRootsArrayAddressOffset, kPointerSize) /* untagged */ \
V(kStackLimitAddressOffset, kPointerSize) /* untagged */ \
V(kRealStackLimitAddressOffset, kPointerSize) /* untagged */ \
@@ -436,7 +437,7 @@ class WasmInstanceObject : public JSObject {
V(kImportedMutableGlobalsOffset, kPointerSize) /* untagged */ \
V(kIndirectFunctionTableSigIdsOffset, kPointerSize) /* untagged */ \
V(kIndirectFunctionTableTargetsOffset, kPointerSize) /* untagged */ \
- V(kJumpTableAdjustedStartOffset, kPointerSize) /* untagged */ \
+ V(kJumpTableStartOffset, kPointerSize) /* untagged */ \
V(kIndirectFunctionTableSizeOffset, kUInt32Size) /* untagged */ \
V(k64BitArchPaddingOffset, kPointerSize - kUInt32Size) /* padding */ \
V(kSize, 0)
@@ -452,7 +453,7 @@ class WasmInstanceObject : public JSObject {
bool has_indirect_function_table();
- void SetRawMemory(byte* mem_start, uint32_t mem_size);
+ void SetRawMemory(byte* mem_start, size_t mem_size);
// Get the debug info associated with the given wasm object.
// If no debug info exists yet, it is created automatically.
@@ -496,6 +497,7 @@ class WasmExportedFunctionData : public Struct {
public:
DECL_ACCESSORS(wrapper_code, Code);
DECL_ACCESSORS(instance, WasmInstanceObject)
+ DECL_INT_ACCESSORS(jump_table_offset);
DECL_INT_ACCESSORS(function_index);
DECL_CAST(WasmExportedFunctionData)
@@ -505,10 +507,11 @@ class WasmExportedFunctionData : public Struct {
DECL_VERIFIER(WasmExportedFunctionData)
// Layout description.
-#define WASM_EXPORTED_FUNCTION_DATA_FIELDS(V) \
- V(kWrapperCodeOffset, kPointerSize) \
- V(kInstanceOffset, kPointerSize) \
- V(kFunctionIndexOffset, kPointerSize) /* Smi */ \
+#define WASM_EXPORTED_FUNCTION_DATA_FIELDS(V) \
+ V(kWrapperCodeOffset, kPointerSize) \
+ V(kInstanceOffset, kPointerSize) \
+ V(kJumpTableOffsetOffset, kPointerSize) /* Smi */ \
+ V(kFunctionIndexOffset, kPointerSize) /* Smi */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
@@ -516,8 +519,11 @@ class WasmExportedFunctionData : public Struct {
#undef WASM_EXPORTED_FUNCTION_DATA_FIELDS
};
-class WasmDebugInfo : public Struct {
+class WasmDebugInfo : public Struct, public NeverReadOnlySpaceObject {
public:
+ using NeverReadOnlySpaceObject::GetHeap;
+ using NeverReadOnlySpaceObject::GetIsolate;
+
DECL_ACCESSORS(wasm_instance, WasmInstanceObject)
DECL_ACCESSORS(interpreter_handle, Object);
DECL_ACCESSORS(interpreted_functions, Object);
@@ -571,8 +577,9 @@ class WasmDebugInfo : public Struct {
// interpreter for unwinding and frame inspection.
// Returns true if exited regularly, false if a trap occurred. In the latter
// case, a pending exception will have been set on the isolate.
- bool RunInterpreter(Address frame_pointer, int func_index,
- Address arg_buffer);
+ static bool RunInterpreter(Isolate* isolate, Handle<WasmDebugInfo>,
+ Address frame_pointer, int func_index,
+ Address arg_buffer);
// Get the stack of the wasm interpreter as pairs of <function index, byte
// offset>. The list is ordered bottom-to-top, i.e. caller before callee.
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 22c906e270..650cb629f6 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -374,7 +374,7 @@ std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
bool IsJSCompatibleSignature(const FunctionSig* sig) {
for (auto type : sig->all()) {
- if (type == wasm::kWasmI64 || type == wasm::kWasmS128) return false;
+ if (type == kWasmI64 || type == kWasmS128) return false;
}
return sig->return_count() <= 1;
}
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
index 3fe63b9c71..314f320752 100644
--- a/deps/v8/src/wasm/wasm-result.cc
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -145,7 +145,7 @@ void ErrorThrower::Reset() {
error_msg_.clear();
}
-ErrorThrower::ErrorThrower(ErrorThrower&& other)
+ErrorThrower::ErrorThrower(ErrorThrower&& other) V8_NOEXCEPT
: isolate_(other.isolate_),
context_(other.context_),
error_type_(other.error_type_),
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index a1e5a885af..694a8b7f76 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -26,10 +26,10 @@ class V8_EXPORT_PRIVATE ResultBase {
protected:
ResultBase() = default;
- ResultBase& operator=(ResultBase&& other) = default;
+ ResultBase& operator=(ResultBase&& other) V8_NOEXCEPT = default;
public:
- ResultBase(ResultBase&& other)
+ ResultBase(ResultBase&& other) V8_NOEXCEPT
: error_offset_(other.error_offset_),
error_msg_(std::move(other.error_msg_)) {}
@@ -73,10 +73,10 @@ class Result : public ResultBase {
explicit Result(S&& value) : val(std::forward<S>(value)) {}
template <typename S>
- Result(Result<S>&& other)
- : ResultBase(std::move(other)), val(std::move(other.val)) {}
+ Result(Result<S>&& other) V8_NOEXCEPT : ResultBase(std::move(other)),
+ val(std::move(other.val)) {}
- Result& operator=(Result&& other) = default;
+ Result& operator=(Result&& other) V8_NOEXCEPT = default;
static Result<T> PRINTF_FORMAT(1, 2) Error(const char* format, ...) {
va_list args;
@@ -99,7 +99,7 @@ class V8_EXPORT_PRIVATE ErrorThrower {
ErrorThrower(Isolate* isolate, const char* context)
: isolate_(isolate), context_(context) {}
// Explicitly allow move-construction. Disallow copy (below).
- ErrorThrower(ErrorThrower&& other);
+ ErrorThrower(ErrorThrower&& other) V8_NOEXCEPT;
~ErrorThrower();
PRINTF_FORMAT(2, 3) void TypeError(const char* fmt, ...);
@@ -123,6 +123,7 @@ class V8_EXPORT_PRIVATE ErrorThrower {
bool error() const { return error_type_ != kNone; }
bool wasm_error() { return error_type_ >= kFirstWasmError; }
+ const char* error_msg() { return error_msg_.c_str(); }
Isolate* isolate() const { return isolate_; }
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 3c21a5d223..2edc412afa 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -146,12 +146,12 @@ void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) {
#elif V8_TARGET_ARCH_ARM64
Instruction* instr = reinterpret_cast<Instruction*>(rinfo->pc());
if (instr->IsLdrLiteralX()) {
- Memory::Address_at(rinfo->constant_pool_entry_address()) =
+ Memory<Address>(rinfo->constant_pool_entry_address()) =
static_cast<Address>(tag);
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
instr->SetBranchImmTarget(
- reinterpret_cast<Instruction*>(rinfo->pc() + tag * kInstructionSize));
+ reinterpret_cast<Instruction*>(rinfo->pc() + tag * kInstrSize));
}
#else
Address addr = static_cast<Address>(tag);
@@ -172,10 +172,10 @@ uint32_t GetWasmCalleeTag(RelocInfo* rinfo) {
Instruction* instr = reinterpret_cast<Instruction*>(rinfo->pc());
if (instr->IsLdrLiteralX()) {
return static_cast<uint32_t>(
- Memory::Address_at(rinfo->constant_pool_entry_address()));
+ Memory<Address>(rinfo->constant_pool_entry_address()));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
- return static_cast<uint32_t>(instr->ImmPCOffset() / kInstructionSize);
+ return static_cast<uint32_t>(instr->ImmPCOffset() / kInstrSize);
}
#else
Address addr;
@@ -211,7 +211,8 @@ constexpr size_t kCodeHeaderSize =
class V8_EXPORT_PRIVATE NativeModuleSerializer {
public:
NativeModuleSerializer() = delete;
- NativeModuleSerializer(Isolate*, const NativeModule*);
+ NativeModuleSerializer(Isolate*, const NativeModule*,
+ Vector<WasmCode* const>);
size_t Measure() const;
bool Write(Writer* writer);
@@ -223,6 +224,7 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
Isolate* const isolate_;
const NativeModule* const native_module_;
+ Vector<WasmCode* const> code_table_;
bool write_called_;
// Reverse lookup tables for embedded addresses.
@@ -232,9 +234,13 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
DISALLOW_COPY_AND_ASSIGN(NativeModuleSerializer);
};
-NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
- const NativeModule* module)
- : isolate_(isolate), native_module_(module), write_called_(false) {
+NativeModuleSerializer::NativeModuleSerializer(
+ Isolate* isolate, const NativeModule* module,
+ Vector<WasmCode* const> code_table)
+ : isolate_(isolate),
+ native_module_(module),
+ code_table_(code_table),
+ write_called_(false) {
DCHECK_NOT_NULL(isolate_);
DCHECK_NOT_NULL(native_module_);
// TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
@@ -263,7 +269,7 @@ size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
size_t NativeModuleSerializer::Measure() const {
size_t size = kHeaderSize;
- for (WasmCode* code : native_module_->code_table()) {
+ for (WasmCode* code : code_table_) {
size += MeasureCode(code);
}
return size;
@@ -370,26 +376,31 @@ bool NativeModuleSerializer::Write(Writer* writer) {
WriteHeader(writer);
- for (WasmCode* code : native_module_->code_table()) {
+ for (WasmCode* code : code_table_) {
WriteCode(code, writer);
}
return true;
}
-size_t GetSerializedNativeModuleSize(Isolate* isolate,
- NativeModule* native_module) {
- NativeModuleSerializer serializer(isolate, native_module);
+WasmSerializer::WasmSerializer(Isolate* isolate, NativeModule* native_module)
+ : isolate_(isolate),
+ native_module_(native_module),
+ code_table_(native_module->SnapshotCodeTable()) {}
+
+size_t WasmSerializer::GetSerializedNativeModuleSize() const {
+ Vector<WasmCode* const> code_table(code_table_.data(), code_table_.size());
+ NativeModuleSerializer serializer(isolate_, native_module_, code_table);
return kVersionSize + serializer.Measure();
}
-bool SerializeNativeModule(Isolate* isolate, NativeModule* native_module,
- Vector<byte> buffer) {
- NativeModuleSerializer serializer(isolate, native_module);
+bool WasmSerializer::SerializeNativeModule(Vector<byte> buffer) const {
+ Vector<WasmCode* const> code_table(code_table_.data(), code_table_.size());
+ NativeModuleSerializer serializer(isolate_, native_module_, code_table);
size_t measured_size = kVersionSize + serializer.Measure();
if (buffer.size() < measured_size) return false;
Writer writer(buffer);
- WriteVersion(isolate, &writer);
+ WriteVersion(isolate_, &writer);
if (!serializer.Write(&writer)) return false;
DCHECK_EQ(measured_size, writer.bytes_written());
@@ -534,9 +545,11 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
if (!IsSupportedVersion(isolate, data)) {
return {};
}
- ModuleResult decode_result =
- SyncDecodeWasmModule(isolate, wire_bytes.start(), wire_bytes.end(), false,
- i::wasm::kWasmOrigin);
+ // TODO(titzer): module features should be part of the serialization format.
+ WasmFeatures enabled_features = WasmFeaturesFromIsolate(isolate);
+ ModuleResult decode_result = DecodeWasmModule(
+ enabled_features, wire_bytes.start(), wire_bytes.end(), false,
+ i::wasm::kWasmOrigin, isolate->counters(), isolate->allocator());
if (!decode_result.ok()) return {};
CHECK_NOT_NULL(decode_result.val);
WasmModule* module = decode_result.val.get();
@@ -546,14 +559,14 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
// handler was used or not when serializing.
UseTrapHandler use_trap_handler =
trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler : kNoTrapHandler;
- wasm::ModuleEnv env(module, use_trap_handler,
- wasm::RuntimeExceptionSupport::kRuntimeExceptionSupport);
+ ModuleEnv env(module, use_trap_handler,
+ RuntimeExceptionSupport::kRuntimeExceptionSupport);
OwnedVector<uint8_t> wire_bytes_copy = OwnedVector<uint8_t>::Of(wire_bytes);
Handle<WasmModuleObject> module_object = WasmModuleObject::New(
- isolate, std::move(decode_result.val), env, std::move(wire_bytes_copy),
- script, Handle<ByteArray>::null());
+ isolate, enabled_features, std::move(decode_result.val), env,
+ std::move(wire_bytes_copy), script, Handle<ByteArray>::null());
NativeModule* native_module = module_object->native_module();
if (FLAG_wasm_lazy_compilation) {
diff --git a/deps/v8/src/wasm/wasm-serialization.h b/deps/v8/src/wasm/wasm-serialization.h
index 352195b2b0..436a369fb6 100644
--- a/deps/v8/src/wasm/wasm-serialization.h
+++ b/deps/v8/src/wasm/wasm-serialization.h
@@ -11,12 +11,27 @@ namespace v8 {
namespace internal {
namespace wasm {
-size_t GetSerializedNativeModuleSize(Isolate* isolate,
- NativeModule* native_module);
-
-bool SerializeNativeModule(Isolate* isolate, NativeModule* native_module,
- Vector<byte> buffer);
-
+// Support to serialize WebAssembly {NativeModule} objects. This class intends
+// to be thread-safe in that it takes a consistent snapshot of the module state
+// at instantiation, allowing other threads to mutate the module concurrently.
+class WasmSerializer {
+ public:
+ WasmSerializer(Isolate* isolate, NativeModule* native_module);
+
+ // Measure the required buffer size needed for serialization.
+ size_t GetSerializedNativeModuleSize() const;
+
+ // Serialize the {NativeModule} into the provided {buffer}. Returns true on
+ // success and false if the given buffer it too small for serialization.
+ bool SerializeNativeModule(Vector<byte> buffer) const;
+
+ private:
+ Isolate* isolate_;
+ NativeModule* native_module_;
+ std::vector<WasmCode*> code_table_;
+};
+
+// Support to deserialize WebAssembly {NativeModule} objects.
MaybeHandle<WasmModuleObject> DeserializeNativeModule(
Isolate* isolate, Vector<const byte> data, Vector<const byte> wire_bytes);
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index 8efccae9cc..9885f18ce1 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -100,7 +100,8 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
case kExprIf:
case kExprBlock:
case kExprTry: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
+ i.pc());
os << WasmOpcodes::OpcodeName(opcode);
if (imm.type == kWasmVar) {
os << " (type " << imm.sig_index << ")";
diff --git a/deps/v8/src/wasm/wasm-tier.h b/deps/v8/src/wasm/wasm-tier.h
new file mode 100644
index 0000000000..6445608193
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-tier.h
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_WASM_TIER_H_
+#define V8_WASM_WASM_TIER_H_
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// All the tiers of WASM execution.
+enum class ExecutionTier {
+ kInterpreter, // interpreter (used to provide debugging services).
+ kBaseline, // Liftoff.
+ kOptimized // TurboFan.
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_WASM_TIER_H_
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index 602135135c..c1538e8523 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -6,7 +6,7 @@
#define V8_WASM_WASM_VALUE_H_
#include "src/boxed-float.h"
-#include "src/utils.h"
+#include "src/v8memory.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/zone/zone-containers.h"
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 559ebedc1d..4c6cce5482 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -24,12 +24,12 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
void Assembler::emitl(uint32_t x) {
- Memory::uint32_at(reinterpret_cast<Address>(pc_)) = x;
+ Memory<uint32_t>(pc_) = x;
pc_ += sizeof(uint32_t);
}
void Assembler::emitp(Address x, RelocInfo::Mode rmode) {
- Memory::uintptr_at(reinterpret_cast<Address>(pc_)) = x;
+ Memory<uintptr_t>(pc_) = x;
if (!RelocInfo::IsNone(rmode)) {
RecordRelocInfo(rmode, x);
}
@@ -38,13 +38,13 @@ void Assembler::emitp(Address x, RelocInfo::Mode rmode) {
void Assembler::emitq(uint64_t x) {
- Memory::uint64_at(reinterpret_cast<Address>(pc_)) = x;
+ Memory<uint64_t>(pc_) = x;
pc_ += sizeof(uint64_t);
}
void Assembler::emitw(uint16_t x) {
- Memory::uint16_at(reinterpret_cast<Address>(pc_)) = x;
+ Memory<uint16_t>(pc_) = x;
pc_ += sizeof(uint16_t);
}
@@ -75,6 +75,10 @@ void Assembler::emit_rex_64(Register reg, XMMRegister rm_reg) {
emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
}
+void Assembler::emit_rex_64(XMMRegister reg, XMMRegister rm_reg) {
+ emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
+}
+
void Assembler::emit_rex_64(Register reg, Operand op) {
emit(0x48 | reg.high_bit() << 2 | op.data().rex);
}
@@ -229,13 +233,13 @@ void Assembler::emit_vex_prefix(Register reg, Register vreg, Operand rm,
Address Assembler::target_address_at(Address pc, Address constant_pool) {
- return Memory::int32_at(pc) + pc + 4;
+ return Memory<int32_t>(pc) + pc + 4;
}
void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
- Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
+ Memory<int32_t>(pc) = static_cast<int32_t>(target - pc - 4);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc, sizeof(int32_t));
}
@@ -243,7 +247,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
- Memory::Address_at(pc) = target;
+ Memory<Address>(pc) = target;
}
@@ -263,11 +267,11 @@ int Assembler::deserialization_special_target_size(
}
Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
- return GetCodeTarget(Memory::int32_at(pc));
+ return GetCodeTarget(Memory<int32_t>(pc));
}
Address Assembler::runtime_entry_at(Address pc) {
- return Memory::int32_at(pc) + options().code_range_start;
+ return Memory<int32_t>(pc) + options().code_range_start;
}
// -----------------------------------------------------------------------------
@@ -276,10 +280,10 @@ Address Assembler::runtime_entry_at(Address pc) {
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
- Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
+ Memory<int32_t>(pc_) -= static_cast<int32_t>(delta);
} else if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
- Memory::Address_at(pc_) += delta;
+ Memory<Address>(pc_) += delta;
}
}
@@ -312,13 +316,13 @@ int RelocInfo::target_address_size() {
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return HeapObject::cast(Memory::Object_at(pc_));
+ return HeapObject::cast(Memory<Object*>(pc_));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
if (rmode_ == EMBEDDED_OBJECT) {
- return Handle<HeapObject>::cast(Memory::Object_Handle_at(pc_));
+ return Handle<HeapObject>::cast(Memory<Handle<Object>>(pc_));
} else {
return origin->code_target_object_handle_at(pc_);
}
@@ -326,13 +330,13 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- return Memory::Address_at(pc_);
+ return Memory<Address>(pc_);
}
void RelocInfo::set_target_external_reference(
Address target, ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- Memory::Address_at(pc_) = target;
+ Memory<Address>(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc_, sizeof(Address));
}
@@ -340,7 +344,7 @@ void RelocInfo::set_target_external_reference(
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
- return Memory::Address_at(pc_);
+ return Memory<Address>(pc_);
}
@@ -353,13 +357,12 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Memory::Object_at(pc_) = target;
+ Memory<Object*>(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
- heap->incremental_marking()->RecordWriteIntoCode(host(), this, target);
- heap->RecordWriteIntoCode(host(), this, target);
+ WriteBarrierForCode(host(), this, target);
}
}
@@ -380,13 +383,13 @@ void RelocInfo::set_target_runtime_entry(Address target,
Address RelocInfo::target_off_heap_target() {
DCHECK(IsOffHeapTarget(rmode_));
- return Memory::Address_at(pc_);
+ return Memory<Address>(pc_);
}
void RelocInfo::WipeOut() {
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
- IsInternalReference(rmode_)) {
- Memory::Address_at(pc_) = kNullAddress;
+ IsInternalReference(rmode_) || IsOffHeapTarget(rmode_)) {
+ Memory<Address>(pc_) = kNullAddress;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
Assembler::set_target_address_at(pc_, constant_pool_,
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 31fc7d046c..27120e2d15 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -127,7 +127,7 @@ void CpuFeatures::PrintFeatures() {
void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- Memory::Address_at(pc_) = address;
+ Memory<Address>(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc_, sizeof(Address));
}
@@ -135,12 +135,12 @@ void RelocInfo::set_js_to_wasm_address(Address address,
Address RelocInfo::js_to_wasm_address() const {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
- return Memory::Address_at(pc_);
+ return Memory<Address>(pc_);
}
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
- return Memory::uint32_at(pc_);
+ return Memory<uint32_t>(pc_);
}
// -----------------------------------------------------------------------------
@@ -229,7 +229,7 @@ class OperandBuilder {
// Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
data_.buf[0] = (modrm & 0x3F) | (is_baseless ? 0x00 : 0x80);
data_.len = disp_offset + 4;
- Memory::int32_at(reinterpret_cast<Address>(&data_.buf[disp_offset])) =
+ Memory<int32_t>(reinterpret_cast<Address>(&data_.buf[disp_offset])) =
disp_value;
} else if (disp_value != 0 || (base_reg == 0x05)) {
// Need 8 bits of displacement.
@@ -341,31 +341,110 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
case HeapObjectRequest::kHeapNumber: {
Handle<HeapNumber> object =
isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
- Memory::Object_Handle_at(pc) = object;
+ Memory<Handle<Object>>(pc) = object;
break;
}
case HeapObjectRequest::kCodeStub: {
request.code_stub()->set_isolate(isolate);
- UpdateCodeTarget(Memory::int32_at(pc), request.code_stub()->GetCode());
+ UpdateCodeTarget(Memory<int32_t>(pc), request.code_stub()->GetCode());
break;
}
}
}
}
+// Partial Constant Pool.
+bool ConstPool::AddSharedEntry(uint64_t data, int offset) {
+ auto existing = entries_.find(data);
+ if (existing == entries_.end()) {
+ entries_.insert(std::make_pair(data, offset + kMoveImm64Offset));
+ return false;
+ }
+
+ // Make sure this is called with strictly ascending offsets.
+ DCHECK_GT(offset + kMoveImm64Offset, existing->second);
+
+ entries_.insert(std::make_pair(data, offset + kMoveRipRelativeDispOffset));
+ return true;
+}
+
+bool ConstPool::TryRecordEntry(intptr_t data, RelocInfo::Mode mode) {
+ if (!FLAG_partial_constant_pool) return false;
+ if (!RelocInfo::IsShareableRelocMode(mode)) return false;
+
+ // Currently, partial constant pool only handles the following kinds of
+ // RelocInfo.
+ if (mode != RelocInfo::NONE && mode != RelocInfo::EXTERNAL_REFERENCE &&
+ mode != RelocInfo::OFF_HEAP_TARGET)
+ return false;
+
+ uint64_t raw_data = static_cast<uint64_t>(data);
+ int offset = assm_->pc_offset();
+ return AddSharedEntry(raw_data, offset);
+}
+
+bool ConstPool::IsMoveRipRelative(byte* instr) {
+ if ((*reinterpret_cast<uint32_t*>(instr) & kMoveRipRelativeMask) ==
+ kMoveRipRelativeInstr)
+ return true;
+ return false;
+}
+
+void ConstPool::Clear() { entries_.clear(); }
+
+void ConstPool::PatchEntries() {
+ for (EntryMap::iterator iter = entries_.begin(); iter != entries_.end();
+ iter = entries_.upper_bound(iter->first)) {
+ std::pair<EntryMap::iterator, EntryMap::iterator> range =
+ entries_.equal_range(iter->first);
+ int constant_entry_offset = 0;
+ for (EntryMap::iterator it = range.first; it != range.second; it++) {
+ if (it == range.first) {
+ constant_entry_offset = it->second;
+ continue;
+ }
+
+ DCHECK_GT(constant_entry_offset, 0);
+ DCHECK_LT(constant_entry_offset, it->second);
+ int32_t disp32 =
+ constant_entry_offset - (it->second + kRipRelativeDispSize);
+ byte* disp_addr = assm_->addr_at(it->second);
+
+ // Check if the instruction is actually a rip-relative move.
+ DCHECK(IsMoveRipRelative(disp_addr - kMoveRipRelativeDispOffset));
+ // The displacement of the rip-relative move should be 0 before patching.
+ DCHECK(*reinterpret_cast<uint32_t*>(disp_addr) == 0);
+ *reinterpret_cast<int32_t*>(disp_addr) = disp32;
+ }
+ }
+ Clear();
+}
+
+void Assembler::PatchConstPool() {
+ // There is nothing to do if there are no pending entries.
+ if (constpool_.IsEmpty()) {
+ return;
+ }
+ constpool_.PatchEntries();
+}
+
+bool Assembler::UseConstPoolFor(RelocInfo::Mode rmode) {
+ if (!FLAG_partial_constant_pool) return false;
+ return (rmode == RelocInfo::NONE || rmode == RelocInfo::EXTERNAL_REFERENCE ||
+ rmode == RelocInfo::OFF_HEAP_TARGET);
+}
+
// -----------------------------------------------------------------------------
// Implementation of Assembler.
Assembler::Assembler(const AssemblerOptions& options, void* buffer,
int buffer_size)
- : AssemblerBase(options, buffer, buffer_size) {
+ : AssemblerBase(options, buffer, buffer_size), constpool_(this) {
// Clear the buffer in debug mode unless it was provided by the
// caller in which case we can't be sure it's okay to overwrite
// existing code in it.
#ifdef DEBUG
- if (own_buffer_) {
- memset(buffer_, 0xCC, buffer_size_); // int3
- }
+ if (own_buffer_) ZapCode(reinterpret_cast<Address>(buffer_), buffer_size_);
#endif
ReserveCodeTargetSpace(100);
@@ -373,6 +452,9 @@ Assembler::Assembler(const AssemblerOptions& options, void* buffer,
}
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
+ PatchConstPool();
+ DCHECK(constpool_.IsEmpty());
+
// At this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info.
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
@@ -546,7 +628,7 @@ void Assembler::GrowBuffer() {
// Clear the buffer in debug mode. Use 'int3' instructions to make
// sure to get into problems if we ever run uninitialized code.
#ifdef DEBUG
- memset(desc.buffer, 0xCC, desc.buffer_size);
+ ZapCode(reinterpret_cast<Address>(desc.buffer), desc.buffer_size);
#endif
// Copy the data.
@@ -847,6 +929,20 @@ void Assembler::shift(Operand dst, int subcode, int size) {
emit_operand(subcode, dst);
}
+void Assembler::bswapl(Register dst) {
+ EnsureSpace ensure_space(this);
+ emit_rex_32(dst);
+ emit(0x0F);
+ emit(0xC8 + dst.low_bits());
+}
+
+void Assembler::bswapq(Register dst) {
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst);
+ emit(0x0F);
+ emit(0xC8 + dst.low_bits());
+}
+
void Assembler::bt(Operand dst, Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src, dst);
@@ -936,7 +1032,7 @@ void Assembler::pshufw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x70);
- emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
+ emit_sse_operand(dst, src);
emit(shuffle);
}
@@ -949,6 +1045,26 @@ void Assembler::pshufw(XMMRegister dst, Operand src, uint8_t shuffle) {
emit(shuffle);
}
+void Assembler::pblendw(XMMRegister dst, Operand src, uint8_t mask) {
+ sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x0E);
+ emit(mask);
+}
+
+void Assembler::pblendw(XMMRegister dst, XMMRegister src, uint8_t mask) {
+ sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x0E);
+ emit(mask);
+}
+
+void Assembler::palignr(XMMRegister dst, Operand src, uint8_t mask) {
+ ssse3_instr(dst, src, 0x66, 0x0F, 0x3A, 0x0F);
+ emit(mask);
+}
+
+void Assembler::palignr(XMMRegister dst, XMMRegister src, uint8_t mask) {
+ ssse3_instr(dst, src, 0x66, 0x0F, 0x3A, 0x0F);
+ emit(mask);
+}
+
void Assembler::call(Label* L) {
EnsureSpace ensure_space(this);
// 1110 1000 #32-bit disp.
@@ -1681,10 +1797,17 @@ void Assembler::emit_mov(Operand dst, Immediate value, int size) {
}
void Assembler::movp(Register dst, Address value, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- emit_rex(dst, kPointerSize);
- emit(0xB8 | dst.low_bits());
- emitp(value, rmode);
+ if (constpool_.TryRecordEntry(value, rmode)) {
+ // Emit rip-relative move with offset = 0
+ Label label;
+ emit_mov(dst, Operand(&label, 0), kPointerSize);
+ bind(&label);
+ } else {
+ EnsureSpace ensure_space(this);
+ emit_rex(dst, kPointerSize);
+ emit(0xB8 | dst.low_bits());
+ emitp(value, rmode);
+ }
}
void Assembler::movp_heap_number(Register dst, double value) {
@@ -1696,13 +1819,20 @@ void Assembler::movp_heap_number(Register dst, double value) {
}
void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- if (!RelocInfo::IsNone(rmode)) {
- RecordRelocInfo(rmode, value);
+ if (constpool_.TryRecordEntry(value, rmode)) {
+ // Emit rip-relative move with offset = 0
+ Label label;
+ emit_mov(dst, Operand(&label, 0), kPointerSize);
+ bind(&label);
+ } else {
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst);
+ emit(0xB8 | dst.low_bits());
+ if (!RelocInfo::IsNone(rmode)) {
+ RecordRelocInfo(rmode, value);
+ }
+ emitq(value);
}
- emitq(value);
}
void Assembler::movq(Register dst, uint64_t value, RelocInfo::Mode rmode) {
@@ -3612,6 +3742,24 @@ void Assembler::cvttsd2siq(Register dst, Operand src) {
emit_sse_operand(dst, src);
}
+void Assembler::cvttps2dq(XMMRegister dst, Operand src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::cvttps2dq(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x5B);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::cvtlsi2sd(XMMRegister dst, Operand src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -4054,34 +4202,6 @@ void Assembler::movmskps(Register dst, XMMRegister src) {
}
-void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x62);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::punpckldq(XMMRegister dst, Operand src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x62);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::punpckhdq(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x6A);
- emit_sse_operand(dst, src);
-}
-
-
// AVX instructions
void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index aeff5ee06d..8823334a46 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -220,6 +220,7 @@ typedef XMMRegister Simd128Register;
DOUBLE_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
constexpr DoubleRegister no_double_reg = DoubleRegister::no_reg();
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
enum Condition {
// any value < 0 is considered no_condition
@@ -266,31 +267,6 @@ inline Condition NegateCondition(Condition cc) {
}
-// Commute a condition such that {a cond b == b cond' a}.
-inline Condition CommuteCondition(Condition cc) {
- switch (cc) {
- case below:
- return above;
- case above:
- return below;
- case above_equal:
- return below_equal;
- case below_equal:
- return above_equal;
- case less:
- return greater;
- case greater:
- return less;
- case greater_equal:
- return less_equal;
- case less_equal:
- return greater_equal;
- default:
- return cc;
- }
-}
-
-
enum RoundingMode {
kRoundToNearest = 0x0,
kRoundDown = 0x1,
@@ -422,7 +398,68 @@ static_assert(sizeof(Operand) <= 2 * kPointerSize,
V(shr, 0x5) \
V(sar, 0x7)
-class Assembler : public AssemblerBase {
+// Partial Constant Pool
+// Different from complete constant pool (like arm does), partial constant pool
+// only takes effects for shareable constants in order to reduce code size.
+// Partial constant pool does not emit constant pool entries at the end of each
+// code object. Instead, it keeps the first shareable constant inlined in the
+// instructions and uses rip-relative memory loadings for the same constants in
+// subsequent instructions. These rip-relative memory loadings will target at
+// the position of the first inlined constant. For example:
+//
+// REX.W movq r10,0x7f9f75a32c20 ; 10 bytes
+// …
+// REX.W movq r10,0x7f9f75a32c20 ; 10 bytes
+// …
+//
+// turns into
+//
+// REX.W movq r10,0x7f9f75a32c20 ; 10 bytes
+// …
+// REX.W movq r10,[rip+0xffffff96] ; 7 bytes
+// …
+
+class ConstPool {
+ public:
+ explicit ConstPool(Assembler* assm) : assm_(assm) {}
+ // Returns true when partial constant pool is valid for this entry.
+ bool TryRecordEntry(intptr_t data, RelocInfo::Mode mode);
+ bool IsEmpty() const { return entries_.empty(); }
+
+ void PatchEntries();
+ // Discard any pending pool entries.
+ void Clear();
+
+ private:
+ // Adds a shared entry to entries_. Returns true if this is not the first time
+ // we add this entry, false otherwise.
+ bool AddSharedEntry(uint64_t data, int offset);
+
+ // Check if the instruction is a rip-relative move.
+ bool IsMoveRipRelative(byte* instr);
+
+ Assembler* assm_;
+
+ // Values, pc offsets of entries.
+ typedef std::multimap<uint64_t, int> EntryMap;
+ EntryMap entries_;
+
+ // Number of bytes taken up by the displacement of rip-relative addressing.
+ static constexpr int kRipRelativeDispSize = 4; // 32-bit displacement.
+ // Distance between the address of the displacement in the rip-relative move
+ // instruction and the head address of the instruction.
+ static constexpr int kMoveRipRelativeDispOffset =
+ 3; // REX Opcode ModRM Displacement
+ // Distance between the address of the imm64 in the 'movq reg, imm64'
+ // instruction and the head address of the instruction.
+ static constexpr int kMoveImm64Offset = 2; // REX Opcode imm64
+ // A mask for rip-relative move instruction.
+ static constexpr uint32_t kMoveRipRelativeMask = 0x00C7FFFB;
+ // The bits for a rip-relative move instruction after mask.
+ static constexpr uint32_t kMoveRipRelativeInstr = 0x00058B48;
+};
+
+class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
private:
// We check before assembling an instruction that there is sufficient
// space to write an instruction and its relocation information.
@@ -817,6 +854,8 @@ class Assembler : public AssemblerBase {
void testw(Operand op, Register reg);
// Bit operations.
+ void bswapl(Register dst);
+ void bswapq(Register dst);
void bt(Operand dst, Register src);
void bts(Operand dst, Register src);
void bsrq(Register dst, Register src);
@@ -841,6 +880,10 @@ class Assembler : public AssemblerBase {
void pshufw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
void pshufw(XMMRegister dst, Operand src, uint8_t shuffle);
+ void pblendw(XMMRegister dst, Operand src, uint8_t mask);
+ void pblendw(XMMRegister dst, XMMRegister src, uint8_t mask);
+ void palignr(XMMRegister dst, Operand src, uint8_t mask);
+ void palignr(XMMRegister dst, XMMRegister src, uint8_t mask);
// Label operations & relative jumps (PPUM Appendix D)
//
@@ -1146,6 +1189,8 @@ class Assembler : public AssemblerBase {
void cvttss2siq(Register dst, Operand src);
void cvttsd2siq(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, Operand src);
+ void cvttps2dq(XMMRegister dst, Operand src);
+ void cvttps2dq(XMMRegister dst, XMMRegister src);
void cvtlsi2sd(XMMRegister dst, Operand src);
void cvtlsi2sd(XMMRegister dst, Register src);
@@ -1197,10 +1242,6 @@ class Assembler : public AssemblerBase {
void movmskpd(Register dst, XMMRegister src);
- void punpckldq(XMMRegister dst, XMMRegister src);
- void punpckldq(XMMRegister dst, Operand src);
- void punpckhdq(XMMRegister dst, XMMRegister src);
-
// SSE 4.1 instruction
void insertps(XMMRegister dst, XMMRegister src, byte imm8);
void extractps(Register dst, XMMRegister src, byte imm8);
@@ -1894,6 +1935,12 @@ class Assembler : public AssemblerBase {
void dp(uintptr_t data) { dq(data); }
void dq(Label* label);
+ // Patch entries for partial constant pool.
+ void PatchConstPool();
+
+ // Check if use partial constant pool for this rmode.
+ static bool UseConstPoolFor(RelocInfo::Mode rmode);
+
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
// an instruction or relocation information.
@@ -1945,6 +1992,7 @@ class Assembler : public AssemblerBase {
inline void emit_rex_64(XMMRegister reg, Register rm_reg);
inline void emit_rex_64(Register reg, XMMRegister rm_reg);
inline void emit_rex_64(Register reg, Register rm_reg);
+ inline void emit_rex_64(XMMRegister reg, XMMRegister rm_reg);
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of the destination, index, and base register codes.
@@ -2374,6 +2422,10 @@ class Assembler : public AssemblerBase {
int farjmp_num_ = 0;
std::deque<int> farjmp_positions_;
std::map<Label*, std::vector<int>> label_farjmp_maps_;
+
+ ConstPool constpool_;
+
+ friend class ConstPool;
};
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 5fe2d13201..5310a64714 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -4,7 +4,7 @@
#if V8_TARGET_ARCH_X64
-#include "src/api-arguments.h"
+#include "src/api-arguments-inl.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/counters.h"
@@ -71,8 +71,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9), xmm15);
#endif
- // Set up the roots and smi constant registers.
- // Needs to be done before any further smi loads.
__ InitializeRootRegister();
}
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 056d3d01f6..41fe3dc363 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -29,7 +29,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- DCHECK(!RelocInfo::RequiresRelocation(desc));
+ DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 216d7c7d76..371735590c 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -12,6 +12,7 @@
#include "src/base/lazy-instance.h"
#include "src/base/v8-fallthrough.h"
#include "src/disasm.h"
+#include "src/macro-assembler.h"
#include "src/x64/sse-instr.h"
namespace disasm {
@@ -277,23 +278,17 @@ static const InstructionDesc cmov_instructions[16] = {
//------------------------------------------------------------------------------
// DisassemblerX64 implementation.
-enum UnimplementedOpcodeAction {
- CONTINUE_ON_UNIMPLEMENTED_OPCODE,
- ABORT_ON_UNIMPLEMENTED_OPCODE
-};
-
// A new DisassemblerX64 object is created to disassemble each instruction.
// The object can only disassemble a single instruction.
class DisassemblerX64 {
public:
DisassemblerX64(const NameConverter& converter,
- UnimplementedOpcodeAction unimplemented_action =
- ABORT_ON_UNIMPLEMENTED_OPCODE)
+ Disassembler::UnimplementedOpcodeAction unimplemented_action)
: converter_(converter),
tmp_buffer_pos_(0),
abort_on_unimplemented_(unimplemented_action ==
- ABORT_ON_UNIMPLEMENTED_OPCODE),
+ Disassembler::kAbortOnUnimplementedOpcode),
rex_(0),
operand_size_(0),
group_1_prefix_(0),
@@ -305,9 +300,6 @@ class DisassemblerX64 {
tmp_buffer_[0] = '\0';
}
- virtual ~DisassemblerX64() {
- }
-
// Writes one disassembled instruction into 'buffer' (0-terminated).
// Returns the length of the disassembled machine instruction in bytes.
int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
@@ -459,6 +451,7 @@ class DisassemblerX64 {
typedef const char* (DisassemblerX64::*RegisterNameMapping)(int reg) const;
+ void TryAppendRootRelativeName(int offset);
int PrintRightOperandHelper(byte* modrmp,
RegisterNameMapping register_name);
int PrintRightOperand(byte* modrmp);
@@ -502,6 +495,10 @@ void DisassemblerX64::AppendToBuffer(const char* format, ...) {
tmp_buffer_pos_ += result;
}
+void DisassemblerX64::TryAppendRootRelativeName(int offset) {
+ const char* maybe_name = converter_.RootRelativeName(offset);
+ if (maybe_name != nullptr) AppendToBuffer(" (%s)", maybe_name);
+}
int DisassemblerX64::PrintRightOperandHelper(
byte* modrmp,
@@ -581,6 +578,10 @@ int DisassemblerX64::PrintRightOperandHelper(
NameOfCPURegister(rm),
disp < 0 ? "-" : "+",
disp < 0 ? -disp : disp);
+ if (rm == i::kRootRegister.code()) {
+ // For root-relative accesses, try to append a description.
+ TryAppendRootRelativeName(i::kRootRegisterBias + disp);
+ }
return (mod == 2) ? 5 : 2;
}
break;
@@ -1681,6 +1682,18 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",0x%x", (*current) & 3);
current += 1;
+ } else if (third_byte == 0x0E) {
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("pblendw %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(data);
+ AppendToBuffer(",0x%x", (*current) & 3);
+ current += 1;
+ } else if (third_byte == 0x0F) {
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("palignr %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(data);
+ AppendToBuffer(",0x%x", (*current) & 3);
+ current += 1;
} else if (third_byte == 0x14) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("pextrb "); // reg/m32, xmm, imm8
@@ -1812,32 +1825,44 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "xorpd";
} else if (opcode == 0x5B) {
mnemonic = "cvtps2dq";
- } else if (opcode == 0x2E) {
- mnemonic = "ucomisd";
- } else if (opcode == 0x2F) {
- mnemonic = "comisd";
+ } else if (opcode == 0x60) {
+ mnemonic = "punpcklbw";
+ } else if (opcode == 0x61) {
+ mnemonic = "punpcklwd";
+ } else if (opcode == 0x62) {
+ mnemonic = "punpckldq";
+ } else if (opcode == 0x63) {
+ mnemonic = "packsswb";
} else if (opcode == 0x64) {
mnemonic = "pcmpgtb";
} else if (opcode == 0x65) {
mnemonic = "pcmpgtw";
} else if (opcode == 0x66) {
mnemonic = "pcmpgtd";
- } else if (opcode == 0x74) {
- mnemonic = "pcmpeqb";
- } else if (opcode == 0x75) {
- mnemonic = "pcmpeqw";
- } else if (opcode == 0x76) {
- mnemonic = "pcmpeqd";
- } else if (opcode == 0x62) {
- mnemonic = "punpckldq";
- } else if (opcode == 0x63) {
- mnemonic = "packsswb";
} else if (opcode == 0x67) {
mnemonic = "packuswb";
+ } else if (opcode == 0x68) {
+ mnemonic = "punpckhbw";
+ } else if (opcode == 0x69) {
+ mnemonic = "punpckhwd";
} else if (opcode == 0x6A) {
mnemonic = "punpckhdq";
} else if (opcode == 0x6B) {
mnemonic = "packssdw";
+ } else if (opcode == 0x6C) {
+ mnemonic = "punpcklqdq";
+ } else if (opcode == 0x6D) {
+ mnemonic = "punpckhqdq";
+ } else if (opcode == 0x2E) {
+ mnemonic = "ucomisd";
+ } else if (opcode == 0x2F) {
+ mnemonic = "comisd";
+ } else if (opcode == 0x74) {
+ mnemonic = "pcmpeqb";
+ } else if (opcode == 0x75) {
+ mnemonic = "pcmpeqw";
+ } else if (opcode == 0x76) {
+ mnemonic = "pcmpeqd";
} else if (opcode == 0xD1) {
mnemonic = "psrlw";
} else if (opcode == 0xD2) {
@@ -1949,6 +1974,14 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer("cvtsd2si%c %s,",
operand_size_code(), NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x5B) {
+ // CVTTPS2DQ: Convert packed single-precision FP values to packed signed
+ // doubleword integer values
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("cvttps2dq%c %s,", operand_size_code(),
+ NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
} else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) {
// XMM arithmetic. Mnemonic was retrieved at the start of this function.
int mod, regop, rm;
@@ -2171,6 +2204,10 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(", %d", (*current) & 3);
current += 1;
+ } else if (opcode >= 0xC8 && opcode <= 0xCF) {
+ // bswap
+ int reg = (opcode - 0xC8) | (rex_r() ? 8 : 0);
+ AppendToBuffer("bswap%c %s", operand_size_code(), NameOfCPURegister(reg));
} else if (opcode == 0x50) {
// movmskps reg, xmm
int mod, regop, rm;
@@ -2236,6 +2273,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
// The argument is the second byte of the two-byte opcode.
// Returns nullptr if the instruction is not handled here.
const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
+ if (opcode >= 0xC8 && opcode <= 0xCF) return "bswap";
switch (opcode) {
case 0x1F:
return "nop";
@@ -2820,21 +2858,9 @@ const char* NameConverter::NameInCode(byte* addr) const {
//------------------------------------------------------------------------------
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) { }
-
-Disassembler::~Disassembler() { }
-
-
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte* instruction) {
- DisassemblerX64 d(converter_, CONTINUE_ON_UNIMPLEMENTED_OPCODE);
- return d.InstructionDecode(buffer, instruction);
-}
-
-int Disassembler::InstructionDecodeForTesting(v8::internal::Vector<char> buffer,
- byte* instruction) {
- DisassemblerX64 d(converter_, ABORT_ON_UNIMPLEMENTED_OPCODE);
+ DisassemblerX64 d(converter_, unimplemented_opcode_action());
return d.InstructionDecode(buffer, instruction);
}
@@ -2843,10 +2869,10 @@ int Disassembler::ConstantPoolSizeAt(byte* instruction) {
return -1;
}
-
-void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
+ UnimplementedOpcodeAction unimplemented_action) {
NameConverter converter;
- Disassembler d(converter);
+ Disassembler d(converter, unimplemented_action);
for (byte* pc = begin; pc < end;) {
v8::internal::EmbeddedVector<char, 128> buffer;
buffer[0] = '\0';
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index 30b368b38d..65c708024d 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -247,30 +247,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-namespace {
-
-void InterpreterCEntryDescriptor_InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- rax, // argument count (argc)
- r15, // address of first argument (argv)
- rbx // the runtime function to call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace
-
-void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
-void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 2c703e7d50..adb52afac9 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -15,6 +15,7 @@
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/globals.h"
#include "src/heap/heap-inl.h"
#include "src/instruction-stream.h"
#include "src/objects-inl.h"
@@ -186,27 +187,6 @@ Operand TurboAssembler::ExternalOperand(ExternalReference target,
return Operand(scratch, 0);
}
-int TurboAssembler::LoadAddressSize(ExternalReference source) {
- if (root_array_available_ && options().enable_root_array_delta_access) {
- // This calculation depends on the internals of LoadAddress.
- // It's correctness is ensured by the asserts in the Call
- // instruction below.
- int64_t delta = RootRegisterDelta(source);
- if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- // Operand is leap(scratch, Operand(kRootRegister, delta));
- // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
- int size = 4;
- if (!is_int8(static_cast<int32_t>(delta))) {
- size += 3; // Need full four-byte displacement in lea.
- }
- return size;
- }
- }
- // Size of movp(destination, src);
- return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
-}
-
-
void MacroAssembler::PushAddress(ExternalReference source) {
LoadAddress(kScratchRegister, source);
Push(kScratchRegister);
@@ -448,6 +428,16 @@ void TurboAssembler::Abort(AbortReason reason) {
return;
}
+ if (should_abort_hard()) {
+ // We don't care if we constructed a frame. Just pretend we did.
+ FrameScope assume_frame(this, StackFrame::NONE);
+ movl(arg_reg_1, Immediate(static_cast<int>(reason)));
+ PrepareCallCFunction(1);
+ LoadAddress(rax, ExternalReference::abort_with_reason());
+ call(rax);
+ return;
+ }
+
Move(rdx, Smi::FromInt(static_cast<int>(reason)));
if (!has_frame()) {
@@ -1504,6 +1494,7 @@ if (FLAG_embedded_builtins) {
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
@@ -1521,19 +1512,9 @@ void MacroAssembler::JumpToInstructionStream(Address entry) {
jmp(kOffHeapTrampolineRegister);
}
-int TurboAssembler::CallSize(ExternalReference ext) {
- // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
- return LoadAddressSize(ext) +
- Assembler::kCallScratchRegisterInstructionLength;
-}
-
void TurboAssembler::Call(ExternalReference ext) {
-#ifdef DEBUG
- int end_position = pc_offset() + CallSize(ext);
-#endif
LoadAddress(kScratchRegister, ext);
call(kScratchRegister);
- DCHECK_EQ(end_position, pc_offset());
}
void TurboAssembler::Call(Operand op) {
@@ -1546,12 +1527,8 @@ void TurboAssembler::Call(Operand op) {
}
void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
-#ifdef DEBUG
- int end_position = pc_offset() + CallSize(destination);
-#endif
Move(kScratchRegister, destination, rmode);
call(kScratchRegister);
- DCHECK_EQ(pc_offset(), end_position);
}
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
@@ -1572,6 +1549,7 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
@@ -1581,12 +1559,8 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
}
}
}
-#ifdef DEBUG
- int end_position = pc_offset() + CallSize(code_object);
-#endif
DCHECK(RelocInfo::IsCodeTarget(rmode));
call(code_object, rmode);
- DCHECK_EQ(end_position, pc_offset());
}
void TurboAssembler::RetpolineCall(Register reg) {
@@ -1610,14 +1584,8 @@ void TurboAssembler::RetpolineCall(Register reg) {
}
void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
-#ifdef DEBUG
-// TODO(titzer): CallSize() is wrong for RetpolineCalls
-// int end_position = pc_offset() + CallSize(destination);
-#endif
Move(kScratchRegister, destination, rmode);
RetpolineCall(kScratchRegister);
- // TODO(titzer): CallSize() is wrong for RetpolineCalls
- // DCHECK_EQ(pc_offset(), end_position);
}
void TurboAssembler::RetpolineJump(Register reg) {
@@ -2596,43 +2564,14 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
}
-
-#ifdef DEBUG
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3,
- Register reg4,
- Register reg5,
- Register reg6,
- Register reg7,
- Register reg8) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
- reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid();
-
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
- if (reg7.is_valid()) regs |= reg7.bit();
- if (reg8.is_valid()) regs |= reg8.bit();
- int n_of_non_aliasing_regs = NumRegs(regs);
-
- return n_of_valid_regs != n_of_non_aliasing_regs;
-}
-#endif
-
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met,
Label::Distance condition_met_distance) {
DCHECK(cc == zero || cc == not_zero);
if (scratch == object) {
- andp(scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(scratch, Immediate(~kPageAlignmentMask));
} else {
- movp(scratch, Immediate(~Page::kPageAlignmentMask));
+ movp(scratch, Immediate(~kPageAlignmentMask));
andp(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index b00201c9f6..6b96c3dcb3 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -35,6 +35,7 @@ constexpr Register kJavaScriptCallExtraArg1Register = rbx;
constexpr Register kRuntimeCallFunctionRegister = rbx;
constexpr Register kRuntimeCallArgCountRegister = rax;
+constexpr Register kRuntimeCallArgvRegister = r15;
constexpr Register kWasmInstanceRegister = rsi;
// Default scratch register used by MacroAssembler (and other code that needs
@@ -52,20 +53,6 @@ typedef Operand MemOperand;
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-#ifdef DEBUG
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3 = no_reg,
- Register reg4 = no_reg,
- Register reg5 = no_reg,
- Register reg6 = no_reg,
- Register reg7 = no_reg,
- Register reg8 = no_reg);
-#endif
-
-// Forward declaration.
-class JumpTarget;
-
struct SmiIndex {
SmiIndex(Register index_register, ScaleFactor scale)
: reg(index_register),
@@ -125,7 +112,7 @@ class StackArgumentsAccessor BASE_EMBEDDED {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
};
-class TurboAssembler : public TurboAssemblerBase {
+class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
@@ -409,26 +396,6 @@ class TurboAssembler : public TurboAssemblerBase {
call(target, rmode);
}
- // The size of the code generated for different call instructions.
- int CallSize(ExternalReference ext);
- int CallSize(Address destination) { return kCallSequenceLength; }
- int CallSize(Handle<Code> code_object) {
- // Code calls use 32-bit relative addressing.
- return kShortCallInstructionLength;
- }
- int CallSize(Register target) {
- // Opcode: REX_opt FF /2 m64
- return (target.high_bit() != 0) ? 3 : 2;
- }
- int CallSize(Operand target) {
- // Opcode: REX_opt FF /2 m64
- return (target.requires_rex() ? 2 : 1) + target.operand_size();
- }
-
- // Returns the size of the code generated by LoadAddress.
- // Used by CallSize(ExternalReference) to find the size of a call.
- int LoadAddressSize(ExternalReference source);
-
// Non-SSE2 instructions.
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
diff --git a/deps/v8/src/x64/sse-instr.h b/deps/v8/src/x64/sse-instr.h
index a6614c2346..a864d294f4 100644
--- a/deps/v8/src/x64/sse-instr.h
+++ b/deps/v8/src/x64/sse-instr.h
@@ -6,9 +6,18 @@
#define V8_X64_SSE_INSTR_H_
#define SSE2_INSTRUCTION_LIST(V) \
+ V(cvtps2dq, 66, 0F, 5B) \
+ V(punpcklbw, 66, 0F, 60) \
+ V(punpcklwd, 66, 0F, 61) \
+ V(punpckldq, 66, 0F, 62) \
V(packsswb, 66, 0F, 63) \
- V(packssdw, 66, 0F, 6B) \
V(packuswb, 66, 0F, 67) \
+ V(punpckhbw, 66, 0F, 68) \
+ V(punpckhwd, 66, 0F, 69) \
+ V(punpckhdq, 66, 0F, 6A) \
+ V(packssdw, 66, 0F, 6B) \
+ V(punpcklqdq, 66, 0F, 6C) \
+ V(punpckhqdq, 66, 0F, 6D) \
V(paddb, 66, 0F, FC) \
V(paddw, 66, 0F, FD) \
V(paddd, 66, 0F, FE) \
@@ -43,8 +52,7 @@
V(psubusw, 66, 0F, D9) \
V(pand, 66, 0F, DB) \
V(por, 66, 0F, EB) \
- V(pxor, 66, 0F, EF) \
- V(cvtps2dq, 66, 0F, 5B)
+ V(pxor, 66, 0F, EF)
#define SSSE3_INSTRUCTION_LIST(V) \
V(pabsb, 66, 0F, 38, 1C) \
@@ -58,7 +66,12 @@
V(psignd, 66, 0F, 38, 0A)
#define SSE4_INSTRUCTION_LIST(V) \
+ V(ptest, 66, 0F, 38, 17) \
+ V(pmovsxbw, 66, 0F, 38, 20) \
+ V(pmovsxwd, 66, 0F, 38, 23) \
V(packusdw, 66, 0F, 38, 2B) \
+ V(pmovzxbw, 66, 0F, 38, 30) \
+ V(pmovzxwd, 66, 0F, 38, 33) \
V(pminsb, 66, 0F, 38, 38) \
V(pminsd, 66, 0F, 38, 39) \
V(pminuw, 66, 0F, 38, 3A) \
@@ -67,7 +80,6 @@
V(pmaxsd, 66, 0F, 38, 3D) \
V(pmaxuw, 66, 0F, 38, 3E) \
V(pmaxud, 66, 0F, 38, 3F) \
- V(pmulld, 66, 0F, 38, 40) \
- V(ptest, 66, 0F, 38, 17)
+ V(pmulld, 66, 0F, 38, 40)
#endif // V8_X64_SSE_INSTR_H_
diff --git a/deps/v8/src/zone/zone-chunk-list.h b/deps/v8/src/zone/zone-chunk-list.h
index 229a3f3f40..9c0c073a81 100644
--- a/deps/v8/src/zone/zone-chunk-list.h
+++ b/deps/v8/src/zone/zone-chunk-list.h
@@ -4,6 +4,7 @@
#include <stdlib.h>
+#include "src/base/iterator.h"
#include "src/globals.h"
#include "src/utils.h"
#include "src/zone/zone.h"
@@ -60,6 +61,7 @@ class ZoneChunkList : public ZoneObject {
}
size_t size() const { return size_; }
+ bool is_empty() const { return size() == 0; }
T& front() const;
T& back() const;
@@ -142,7 +144,8 @@ class ZoneChunkList : public ZoneObject {
};
template <typename T, bool backwards, bool modifiable>
-class ZoneChunkListIterator {
+class ZoneChunkListIterator
+ : public base::iterator<std::bidirectional_iterator_tag, T> {
private:
template <typename S>
using maybe_const =
@@ -153,6 +156,7 @@ class ZoneChunkListIterator {
public:
maybe_const<T>& operator*() { return current_->items()[position_]; }
+ maybe_const<T>* operator->() { return &current_->items()[position_]; }
bool operator==(const ZoneChunkListIterator& other) const {
return other.current_ == current_ && other.position_ == position_;
}
@@ -182,6 +186,30 @@ class ZoneChunkListIterator {
return clone;
}
+ void Advance(int amount) {
+ // Move forwards.
+ DCHECK_GE(amount, 0);
+#if DEBUG
+ ZoneChunkListIterator clone(*this);
+ for (int i = 0; i < amount; ++i) {
+ ++clone;
+ }
+#endif
+
+ position_ += amount;
+ while (position_ > 0 && position_ >= current_->capacity_) {
+ auto overshoot = position_ - current_->capacity_;
+ current_ = current_->next_;
+ position_ = overshoot;
+
+ DCHECK(position_ == 0 || current_);
+ }
+
+#if DEBUG
+ DCHECK_EQ(clone, *this);
+#endif
+ }
+
private:
friend class ZoneChunkList<T>;
@@ -218,7 +246,9 @@ class ZoneChunkListIterator {
}
ZoneChunkListIterator(Chunk* current, size_t position)
- : current_(current), position_(position) {}
+ : current_(current), position_(position) {
+ DCHECK(current == nullptr || position < current->capacity_);
+ }
template <bool move_backward>
void Move() {
@@ -280,6 +310,7 @@ void ZoneChunkList<T>::push_back(const T& item) {
back_->items()[back_->position_] = item;
++back_->position_;
++size_;
+ DCHECK_LE(back_->position_, back_->capacity_);
}
template <typename T>
@@ -313,10 +344,11 @@ typename ZoneChunkList<T>::SeekResult ZoneChunkList<T>::SeekIndex(
size_t index) const {
DCHECK_LT(index, size());
Chunk* current = front_;
- while (index > current->capacity_) {
+ while (index >= current->capacity_) {
index -= current->capacity_;
current = current->next_;
}
+ DCHECK_LT(index, current->capacity_);
return {current, static_cast<uint32_t>(index)};
}
diff --git a/deps/v8/test/BUILD.gn b/deps/v8/test/BUILD.gn
index d0d2a40035..45e1b34032 100644
--- a/deps/v8/test/BUILD.gn
+++ b/deps/v8/test/BUILD.gn
@@ -45,6 +45,7 @@ group("v8_perf") {
data_deps = [
"cctest:cctest",
"..:d8",
+ "../tools:v8_android_test_runner_deps",
]
data = [
@@ -52,6 +53,7 @@ group("v8_perf") {
# TODO(machenbach): These files are referenced by the perf runner.
# They should be transformed into a proper python module.
+ "../tools/testrunner/local/android.py",
"../tools/testrunner/local/command.py",
"../tools/testrunner/local/utils.py",
"../tools/testrunner/objects/output.py",
@@ -62,15 +64,6 @@ group("v8_perf") {
"js-perf-test/",
"memory/",
]
-
- if (is_android && !build_with_chromium) {
- data_deps += [ "../build/android:test_runner_py" ]
-
- data += [
- # This is used by run_perf.py, but not included by test_runner_py above.
- "../third_party/catapult/devil/devil/android/perf/",
- ]
- }
}
group("v8_bot_default") {
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index 9072d6bed7..c4aa51b818 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -50,9 +50,10 @@ v8_source_set("cctest_sources") {
"$target_gen_dir/resources.cc",
### gcmole(all) ###
- "../../test/common/wasm/flag-utils.h",
- "../../test/common/wasm/test-signatures.h",
- "../../test/common/wasm/wasm-macro-gen.h",
+ "../common/assembler-tester.h",
+ "../common/wasm/flag-utils.h",
+ "../common/wasm/test-signatures.h",
+ "../common/wasm/wasm-macro-gen.h",
"cctest.cc",
"cctest.h",
"compiler/c-signature.h",
@@ -117,6 +118,7 @@ v8_source_set("cctest_sources") {
"heap/test-compaction.cc",
"heap/test-concurrent-marking.cc",
"heap/test-embedder-tracing.cc",
+ "heap/test-external-string-tracker.cc",
"heap/test-heap.cc",
"heap/test-incremental-marking.cc",
"heap/test-invalidated-slots.cc",
@@ -237,9 +239,11 @@ v8_source_set("cctest_sources") {
"types-fuzz.h",
"unicode-helpers.h",
"wasm/test-c-wasm-entry.cc",
+ "wasm/test-jump-table-assembler.cc",
"wasm/test-run-wasm-64.cc",
"wasm/test-run-wasm-asmjs.cc",
"wasm/test-run-wasm-atomics.cc",
+ "wasm/test-run-wasm-atomics64.cc",
"wasm/test-run-wasm-interpreter.cc",
"wasm/test-run-wasm-js.cc",
"wasm/test-run-wasm-module.cc",
@@ -296,7 +300,6 @@ v8_source_set("cctest_sources") {
"test-sync-primitives-arm64.cc",
"test-utils-arm64.cc",
"test-utils-arm64.h",
- "wasm/test-run-wasm-atomics64.cc",
]
} else if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ###
@@ -352,7 +355,6 @@ v8_source_set("cctest_sources") {
"test-disasm-x64.cc",
"test-log-stack-tracer.cc",
"test-macro-assembler-x64.cc",
- "wasm/test-run-wasm-atomics64.cc",
]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc) ###
diff --git a/deps/v8/test/cctest/OWNERS b/deps/v8/test/cctest/OWNERS
index 87396a0535..43d91faf84 100644
--- a/deps/v8/test/cctest/OWNERS
+++ b/deps/v8/test/cctest/OWNERS
@@ -1,6 +1,5 @@
-per-file *-mips*=ivica.bogosavljevic@mips.com
-per-file *-mips*=Miran.Karic@mips.com
-per-file *-mips*=sreten.kovacevic@mips.com
+per-file *-mips*=ibogosavljevic@wavecomp.com
+per-file *-mips*=skovacevic@wavecomp.com
per-file *-ppc*=dstence@us.ibm.com
per-file *-ppc*=joransiu@ca.ibm.com
per-file *-ppc*=jyan@ca.ibm.com
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 8bae124ea1..383771710a 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -32,7 +32,6 @@
#include "include/libplatform/libplatform.h"
#include "include/v8-platform.h"
-#include "src/assembler.h"
#include "src/debug/debug-interface.h"
#include "src/flags.h"
#include "src/heap/factory.h"
@@ -548,32 +547,6 @@ static inline void CheckDoubleEquals(double expected, double actual) {
CHECK_GE(expected, actual - kEpsilon);
}
-static inline uint8_t* AllocateAssemblerBuffer(
- size_t* allocated,
- size_t requested = v8::internal::AssemblerBase::kMinimalBufferSize) {
- size_t page_size = v8::internal::AllocatePageSize();
- size_t alloc_size = RoundUp(requested, page_size);
- void* result = v8::internal::AllocatePages(
- nullptr, alloc_size, page_size, v8::PageAllocator::kReadWriteExecute);
- CHECK(result);
- *allocated = alloc_size;
- return static_cast<uint8_t*>(result);
-}
-
-static inline void MakeAssemblerBufferExecutable(uint8_t* buffer,
- size_t allocated) {
- bool result = v8::internal::SetPermissions(buffer, allocated,
- v8::PageAllocator::kReadExecute);
- CHECK(result);
-}
-
-static inline void MakeAssemblerBufferWritable(uint8_t* buffer,
- size_t allocated) {
- bool result = v8::internal::SetPermissions(buffer, allocated,
- v8::PageAllocator::kReadWrite);
- CHECK(result);
-}
-
static v8::debug::DebugDelegate dummy_delegate;
static inline void EnableDebugger(v8::Isolate* isolate) {
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index a7fcb4f237..c59c443b06 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -345,6 +345,13 @@
}], # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips'
##############################################################################
+['arch == mipsel or arch == mips64el or arch == mips or arch == mips64 or arch == ppc or arch == ppc64 or arch == s390 or arch == s390x', {
+ # TODO(mips-team): Implement I64Atomic operations on MIPS
+ # TODO(ppc-team, s390-team): Implement I64Atomic operations on PPC/s390
+ 'test-run-wasm-atomics64/*': [SKIP],
+}], # 'arch == mipsel or arch == mips64el or arch == mips or arch == mips64 or arch == ppc or arch == ppc64 or arch == s390 or arch == s390x'
+
+##############################################################################
['mips_arch_variant == r6', {
# For MIPS[64] architecture release 6, fusion multiply-accumulate instructions
# will cause failures on several tests that expect exact floating-point
@@ -383,6 +390,7 @@
# TODO(ppc): Implement load/store reverse byte instructions
'test-run-wasm-simd/RunWasmCompiled_SimdLoadStoreLoad': [SKIP],
'test-run-wasm-simd/RunWasm_SimdLoadStoreLoad': [SKIP],
+ 'test-run-wasm-simd/RunWasm_SimdLoadStoreLoad_turbofan': [SKIP],
}], # 'system == aix or (arch == ppc64 and byteorder == big)'
@@ -404,57 +412,6 @@
}], # 'arch == ppc or arch == ppc64 or arch == s390 or arch == s390x'
##############################################################################
-['byteorder == big', {
-
- # BUG(7827). fix simd globals for big endian
- 'test-run-wasm-simd/RunWasm_SimdI32x4GetGlobal_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_SimdI32x4SetGlobal_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_SimdF32x4GetGlobal_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_SimdF32x4SetGlobal_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_SimdLoadStoreLoad_simd_turbofan': [SKIP],
- 'test-run-wasm-simd/RunWasm_SimdLoadStoreLoad_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_SimdLoadStoreLoad_simd_lowered': [SKIP],
- 'test-run-wasm-simd/RunWasm_I32x4AddHoriz_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_I16x8AddHoriz_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_F32x4AddHoriz_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S32x4Dup_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S32x4ZipLeft_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S32x4ZipRight_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S32x4UnzipLeft_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S32x4UnzipRight_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S32x4TransposeLeft_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S32x4TransposeRight_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S32x2Reverse_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S32x4Irregular_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S16x8Dup_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S16x8ZipLeft_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S16x8ZipRight_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S16x8UnzipLeft_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S16x8UnzipRight_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S16x8TransposeLeft_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S16x8TransposeRight_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S16x4Reverse_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S16x2Reverse_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S16x8Irregular_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S8x16Dup_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S8x16ZipLeft_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S8x16ZipRight_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S8x16UnzipLeft_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S8x16UnzipRight_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S8x16TransposeLeft_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S8x16TransposeRight_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S8x8Reverse_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S8x4Reverse_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S8x2Reverse_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S8x16Irregular_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S8x16Blend_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_S8x16Concat_interpreter': [SKIP],
- 'test-run-wasm-simd/RunWasm_I16x8ConvertI32x4*': [SKIP],
- 'test-run-wasm-simd/RunWasm_I8x16ConvertI16x8*': [SKIP],
-
-}], # 'byteorder == big'
-
-##############################################################################
['variant == stress_incremental_marking', {
'test-heap-profiler/SamplingHeapProfiler': [SKIP],
}], # variant == stress_incremental_marking
diff --git a/deps/v8/test/cctest/compiler/code-assembler-tester.h b/deps/v8/test/cctest/compiler/code-assembler-tester.h
index 1edb3fdf4b..7c88998f8a 100644
--- a/deps/v8/test/cctest/compiler/code-assembler-tester.h
+++ b/deps/v8/test/cctest/compiler/code-assembler-tester.h
@@ -60,6 +60,10 @@ class CodeAssemblerTester {
&state_, AssemblerOptions::Default(scope_.isolate()));
}
+ Handle<Code> GenerateCode(const AssemblerOptions& options) {
+ return CodeAssembler::GenerateCode(&state_, options);
+ }
+
Handle<Code> GenerateCodeCloseAndEscape() {
return scope_.CloseAndEscape(GenerateCode());
}
diff --git a/deps/v8/test/cctest/compiler/function-tester.cc b/deps/v8/test/cctest/compiler/function-tester.cc
index c3abcf7a15..86678606d4 100644
--- a/deps/v8/test/cctest/compiler/function-tester.cc
+++ b/deps/v8/test/cctest/compiler/function-tester.cc
@@ -4,7 +4,7 @@
#include "test/cctest/compiler/function-tester.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/compiler.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
@@ -21,6 +21,7 @@ namespace compiler {
FunctionTester::FunctionTester(const char* source, uint32_t flags)
: isolate(main_isolate()),
+ canonical(isolate),
function((FLAG_allow_natives_syntax = true, NewFunction(source))),
flags_(flags) {
Compile(function);
@@ -30,6 +31,7 @@ FunctionTester::FunctionTester(const char* source, uint32_t flags)
FunctionTester::FunctionTester(Graph* graph, int param_count)
: isolate(main_isolate()),
+ canonical(isolate),
function(NewFunction(BuildFunction(param_count).c_str())),
flags_(0) {
CompileGraph(graph);
@@ -37,6 +39,7 @@ FunctionTester::FunctionTester(Graph* graph, int param_count)
FunctionTester::FunctionTester(Handle<Code> code, int param_count)
: isolate(main_isolate()),
+ canonical(isolate),
function((FLAG_allow_natives_syntax = true,
NewFunction(BuildFunction(param_count).c_str()))),
flags_(0) {
diff --git a/deps/v8/test/cctest/compiler/function-tester.h b/deps/v8/test/cctest/compiler/function-tester.h
index abcb924cec..6e0146958f 100644
--- a/deps/v8/test/cctest/compiler/function-tester.h
+++ b/deps/v8/test/cctest/compiler/function-tester.h
@@ -31,6 +31,7 @@ class FunctionTester : public InitializedHandleScope {
explicit FunctionTester(Handle<Code> code);
Isolate* isolate;
+ CanonicalHandleScope canonical;
Handle<JSFunction> function;
MaybeHandle<Object> Call() {
diff --git a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
index 663b66b74d..f961021913 100644
--- a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
+++ b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
@@ -18,12 +18,11 @@ class BasicBlockProfilerTest : public RawMachineAssemblerTester<int32_t> {
FLAG_turbo_profiling = true;
}
- void ResetCounts() { isolate()->basic_block_profiler()->ResetCounts(); }
+ void ResetCounts() { BasicBlockProfiler::Get()->ResetCounts(); }
void Expect(size_t size, uint32_t* expected) {
- CHECK(isolate()->basic_block_profiler());
const BasicBlockProfiler::DataList* l =
- isolate()->basic_block_profiler()->data_list();
+ BasicBlockProfiler::Get()->data_list();
CHECK_NE(0, static_cast<int>(l->size()));
const BasicBlockProfiler::Data* data = l->back();
CHECK_EQ(static_cast<int>(size), static_cast<int>(data->n_blocks()));
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index 9a7b590f5a..a3b80bc887 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -83,8 +83,8 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
// First allocate the FixedArray which will hold the final results. Here we
// should take care of all allocations, meaning we allocate HeapNumbers and
// FixedArrays representing Simd128 values.
- Node* state_out = __ AllocateFixedArray(PACKED_ELEMENTS,
- __ IntPtrConstant(parameters.size()));
+ TNode<FixedArray> state_out = __ Cast(__ AllocateFixedArray(
+ PACKED_ELEMENTS, __ IntPtrConstant(parameters.size())));
for (int i = 0; i < static_cast<int>(parameters.size()); i++) {
switch (parameters[i].representation()) {
case MachineRepresentation::kTagged:
@@ -94,8 +94,8 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
__ StoreFixedArrayElement(state_out, i, __ AllocateHeapNumber());
break;
case MachineRepresentation::kSimd128: {
- Node* vector =
- __ AllocateFixedArray(PACKED_SMI_ELEMENTS, __ IntPtrConstant(4));
+ TNode<FixedArray> vector = __ Cast(
+ __ AllocateFixedArray(PACKED_SMI_ELEMENTS, __ IntPtrConstant(4)));
for (int lane = 0; lane < 4; lane++) {
__ StoreFixedArrayElement(vector, lane, __ SmiConstant(0));
}
@@ -109,7 +109,7 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
}
params.push_back(state_out);
// Then take each element of the initial state and pass them as arguments.
- Node* state_in = __ Parameter(1);
+ TNode<FixedArray> state_in = __ Cast(__ Parameter(1));
for (int i = 0; i < static_cast<int>(parameters.size()); i++) {
Node* element = __ LoadFixedArrayElement(state_in, __ IntPtrConstant(i));
// Unbox all elements before passing them as arguments.
@@ -197,7 +197,7 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate,
std::vector<AllocatedOperand> parameters) {
CodeAssemblerTester tester(isolate, call_descriptor, "teardown");
CodeStubAssembler assembler(tester.state());
- Node* result_array = __ Parameter(1);
+ TNode<FixedArray> result_array = __ Cast(__ Parameter(1));
for (int i = 0; i < static_cast<int>(parameters.size()); i++) {
// The first argument is not used and the second is "result_array".
Node* param = __ Parameter(i + 2);
@@ -216,7 +216,8 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate,
param, MachineRepresentation::kFloat64);
break;
case MachineRepresentation::kSimd128: {
- Node* vector = __ LoadFixedArrayElement(result_array, i);
+ TNode<FixedArray> vector =
+ __ Cast(__ LoadFixedArrayElement(result_array, i));
for (int lane = 0; lane < 4; lane++) {
Node* lane_value =
__ SmiFromInt32(tester.raw_assembler_for_testing()->AddNode(
@@ -995,8 +996,7 @@ class CodeGeneratorTester {
generator_ = new CodeGenerator(
environment->main_zone(), &frame_, &linkage_, environment->code(),
&info_, environment->main_isolate(), base::Optional<OsrHelper>(),
- kNoSourcePosition, nullptr, nullptr,
- PoisoningMitigationLevel::kDontPoison,
+ kNoSourcePosition, nullptr, PoisoningMitigationLevel::kDontPoison,
AssemblerOptions::Default(environment->main_isolate()),
Builtins::kNoBuiltinId);
diff --git a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
index 5843c51698..a31700ede2 100644
--- a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
+++ b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
@@ -15,7 +15,7 @@ namespace compiler {
class JSCacheTesterHelper {
protected:
- JSCacheTesterHelper(Isolate* isolate, Zone* zone)
+ explicit JSCacheTesterHelper(Zone* zone)
: main_graph_(zone),
main_common_(zone),
main_javascript_(zone),
@@ -33,7 +33,7 @@ class JSConstantCacheTester : public HandleAndZoneScope,
public JSGraph {
public:
JSConstantCacheTester()
- : JSCacheTesterHelper(main_isolate(), main_zone()),
+ : JSCacheTesterHelper(main_zone()),
JSGraph(main_isolate(), &main_graph_, &main_common_, &main_javascript_,
nullptr, &main_machine_) {
main_graph_.SetStart(main_graph_.NewNode(common()->Start(0)));
diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
index 6560bae096..7938c50069 100644
--- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
+++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
@@ -22,7 +22,8 @@ namespace compiler {
class ContextSpecializationTester : public HandleAndZoneScope {
public:
explicit ContextSpecializationTester(Maybe<OuterContext> context)
- : graph_(new (main_zone()) Graph(main_zone())),
+ : canonical_(main_isolate()),
+ graph_(new (main_zone()) Graph(main_zone())),
common_(main_zone()),
javascript_(main_zone()),
machine_(main_zone()),
@@ -30,7 +31,7 @@ class ContextSpecializationTester : public HandleAndZoneScope {
jsgraph_(main_isolate(), graph(), common(), &javascript_, &simplified_,
&machine_),
reducer_(main_zone(), graph()),
- js_heap_broker_(main_isolate()),
+ js_heap_broker_(main_isolate(), main_zone()),
spec_(&reducer_, jsgraph(), &js_heap_broker_, context,
MaybeHandle<JSFunction>()) {}
@@ -50,6 +51,7 @@ class ContextSpecializationTester : public HandleAndZoneScope {
size_t expected_new_depth);
private:
+ CanonicalHandleScope canonical_;
Graph* graph_;
CommonOperatorBuilder common_;
JSOperatorBuilder javascript_;
@@ -106,6 +108,11 @@ void ContextSpecializationTester::CheckContextInputAndDepthChanges(
static const int slot_index = Context::NATIVE_CONTEXT_INDEX;
TEST(ReduceJSLoadContext0) {
+ // TODO(neis): The native context below does not have all the fields
+ // initialized that the heap broker wants to serialize.
+ bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend;
+ FLAG_concurrent_compiler_frontend = false;
+
ContextSpecializationTester t(Nothing<OuterContext>());
Node* start = t.graph()->NewNode(t.common()->Start(0));
@@ -170,6 +177,8 @@ TEST(ReduceJSLoadContext0) {
CHECK(match.HasValue());
CHECK_EQ(*expected, *match.Value());
}
+
+ FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend;
}
TEST(ReduceJSLoadContext1) {
@@ -247,6 +256,11 @@ TEST(ReduceJSLoadContext2) {
// context2 <-- context1 <-- context0 (= HeapConstant(context_object1))
// context_object1 <~~ context_object0
+ // TODO(neis): The native context below does not have all the fields
+ // initialized that the heap broker wants to serialize.
+ bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend;
+ FLAG_concurrent_compiler_frontend = false;
+
ContextSpecializationTester t(Nothing<OuterContext>());
Node* start = t.graph()->NewNode(t.common()->Start(0));
@@ -317,6 +331,8 @@ TEST(ReduceJSLoadContext2) {
t.javascript()->LoadContext(3, slot_index, true), context2, start);
t.CheckChangesToValue(load, slot_value0);
}
+
+ FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend;
}
TEST(ReduceJSLoadContext3) {
@@ -326,6 +342,11 @@ TEST(ReduceJSLoadContext3) {
// context_object2 from ReduceJSLoadContext2 for this, so almost all test
// expectations are the same as in ReduceJSLoadContext2.
+ // TODO(neis): The native context below does not have all the fields
+ // initialized that the heap broker wants to serialize.
+ bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend;
+ FLAG_concurrent_compiler_frontend = false;
+
HandleAndZoneScope handle_zone_scope;
auto factory = handle_zone_scope.main_isolate()->factory();
@@ -400,9 +421,16 @@ TEST(ReduceJSLoadContext3) {
t.javascript()->LoadContext(3, slot_index, true), context2, start);
t.CheckChangesToValue(load, slot_value0);
}
+
+ FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend;
}
TEST(ReduceJSStoreContext0) {
+ // TODO(neis): The native context below does not have all the fields
+ // initialized that the heap broker wants to serialize.
+ bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend;
+ FLAG_concurrent_compiler_frontend = false;
+
ContextSpecializationTester t(Nothing<OuterContext>());
Node* start = t.graph()->NewNode(t.common()->Start(0));
@@ -462,6 +490,8 @@ TEST(ReduceJSStoreContext0) {
CHECK_EQ(0, static_cast<int>(access.depth()));
CHECK_EQ(false, access.immutable());
}
+
+ FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend;
}
TEST(ReduceJSStoreContext1) {
@@ -509,6 +539,11 @@ TEST(ReduceJSStoreContext1) {
}
TEST(ReduceJSStoreContext2) {
+ // TODO(neis): The native context below does not have all the fields
+ // initialized that the heap broker wants to serialize.
+ bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend;
+ FLAG_concurrent_compiler_frontend = false;
+
ContextSpecializationTester t(Nothing<OuterContext>());
Node* start = t.graph()->NewNode(t.common()->Start(0));
@@ -559,9 +594,16 @@ TEST(ReduceJSStoreContext2) {
context2, context2, start, start);
t.CheckContextInputAndDepthChanges(store, context_object0, 0);
}
+
+ FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend;
}
TEST(ReduceJSStoreContext3) {
+ // TODO(neis): The native context below does not have all the fields
+ // initialized that the heap broker wants to serialize.
+ bool concurrent_compiler_frontend = FLAG_concurrent_compiler_frontend;
+ FLAG_concurrent_compiler_frontend = false;
+
HandleAndZoneScope handle_zone_scope;
auto factory = handle_zone_scope.main_isolate()->factory();
@@ -616,6 +658,8 @@ TEST(ReduceJSStoreContext3) {
context2, context2, start, start);
t.CheckContextInputAndDepthChanges(store, context_object0, 0);
}
+
+ FLAG_concurrent_compiler_frontend = concurrent_compiler_frontend;
}
TEST(SpecializeJSFunction_ToConstant1) {
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index 3f969f8bfe..eec562cf36 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -23,7 +23,8 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
public:
explicit JSTypedLoweringTester(int num_parameters = 0)
: isolate(main_isolate()),
- js_heap_broker(isolate),
+ canonical(isolate),
+ js_heap_broker(isolate, main_zone()),
binop(nullptr),
unop(nullptr),
javascript(main_zone()),
@@ -39,6 +40,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
}
Isolate* isolate;
+ CanonicalHandleScope canonical;
JSHeapBroker js_heap_broker;
const Operator* binop;
const Operator* unop;
@@ -605,7 +607,8 @@ static void CheckIsConvertedToNumber(Node* val, Node* converted) {
CHECK_EQ(val, converted);
} else {
if (converted->opcode() == IrOpcode::kNumberConstant) return;
- CHECK_EQ(IrOpcode::kJSToNumber, converted->opcode());
+ CHECK(IrOpcode::kJSToNumber == converted->opcode() ||
+ IrOpcode::kJSToNumberConvertBigInt == converted->opcode());
CHECK_EQ(val, converted->InputAt(0));
}
}
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 39ef66c5eb..38c5d17b6b 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
@@ -35,8 +35,9 @@ static Handle<JSFunction> Compile(const char* source) {
.ToHandleChecked();
Handle<SharedFunctionInfo> shared =
Compiler::GetSharedFunctionInfoForScript(
- source_code, Compiler::ScriptDetails(), v8::ScriptOriginOptions(),
- nullptr, nullptr, v8::ScriptCompiler::kNoCompileOptions,
+ isolate, source_code, Compiler::ScriptDetails(),
+ v8::ScriptOriginOptions(), nullptr, nullptr,
+ v8::ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE)
.ToHandleChecked();
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index 13496ae829..b591d193e7 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -130,92 +130,104 @@ std::unique_ptr<wasm::NativeModule> AllocateNativeModule(Isolate* isolate,
// WasmCallDescriptor assumes that code is on the native heap and not
// within a code object.
return isolate->wasm_engine()->code_manager()->NewNativeModule(
- isolate, code_size, false, std::move(module), env);
+ isolate, wasm::kAllWasmFeatures, code_size, false, std::move(module),
+ env);
}
void TestReturnMultipleValues(MachineType type) {
const int kMaxCount = 20;
- for (int count = 0; count < kMaxCount; ++count) {
- printf("\n==== type = %s, count = %d ====\n\n\n",
- MachineReprToString(type.representation()), count);
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator, ZONE_NAME);
- CallDescriptor* desc = CreateCallDescriptor(&zone, count, 2, type);
- HandleAndZoneScope handles;
- RawMachineAssembler m(handles.main_isolate(),
- new (handles.main_zone()) Graph(handles.main_zone()),
- desc, MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags());
-
- // m.Parameter(0) is the WasmContext.
- Node* p0 = m.Parameter(1);
- Node* p1 = m.Parameter(2);
- typedef Node* Node_ptr;
- std::unique_ptr<Node_ptr[]> returns(new Node_ptr[count]);
- for (int i = 0; i < count; ++i) {
- if (i % 3 == 0) returns[i] = Add(m, type, p0, p1);
- if (i % 3 == 1) returns[i] = Sub(m, type, p0, p1);
- if (i % 3 == 2) returns[i] = Mul(m, type, p0, p1);
- }
- m.Return(count, returns.get());
-
- OptimizedCompilationInfo info(ArrayVector("testing"), handles.main_zone(),
- Code::WASM_FUNCTION);
- Handle<Code> code =
- Pipeline::GenerateCodeForTesting(
- &info, handles.main_isolate(), desc, m.graph(),
- AssemblerOptions::Default(handles.main_isolate()), m.Export())
- .ToHandleChecked();
+ const int kMaxParamCount = 9;
+ // Use 9 parameters as a regression test or https://crbug.com/838098.
+ for (int param_count : {2, kMaxParamCount}) {
+ for (int count = 0; count < kMaxCount; ++count) {
+ printf("\n==== type = %s, count = %d ====\n\n\n",
+ MachineReprToString(type.representation()), count);
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+ CallDescriptor* desc =
+ CreateCallDescriptor(&zone, count, param_count, type);
+ HandleAndZoneScope handles;
+ RawMachineAssembler m(
+ handles.main_isolate(),
+ new (handles.main_zone()) Graph(handles.main_zone()), desc,
+ MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags());
+
+ // m.Parameter(0) is the WasmContext.
+ Node* p0 = m.Parameter(1);
+ Node* p1 = m.Parameter(2);
+ typedef Node* Node_ptr;
+ std::unique_ptr<Node_ptr[]> returns(new Node_ptr[count]);
+ for (int i = 0; i < count; ++i) {
+ if (i % 3 == 0) returns[i] = Add(m, type, p0, p1);
+ if (i % 3 == 1) returns[i] = Sub(m, type, p0, p1);
+ if (i % 3 == 2) returns[i] = Mul(m, type, p0, p1);
+ }
+ m.Return(count, returns.get());
+
+ OptimizedCompilationInfo info(ArrayVector("testing"), handles.main_zone(),
+ Code::WASM_FUNCTION);
+ Handle<Code> code =
+ Pipeline::GenerateCodeForTesting(
+ &info, handles.main_isolate(), desc, m.graph(),
+ AssemblerOptions::Default(handles.main_isolate()), m.Export())
+ .ToHandleChecked();
#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code) {
- StdoutStream os;
- code->Disassemble("multi_value", os);
- }
+ if (FLAG_print_code) {
+ StdoutStream os;
+ code->Disassemble("multi_value", os);
+ }
#endif
- const int a = 47, b = 12;
- int expect = 0;
- for (int i = 0, sign = +1; i < count; ++i) {
- if (i % 3 == 0) expect += sign * (a + b);
- if (i % 3 == 1) expect += sign * (a - b);
- if (i % 3 == 2) expect += sign * (a * b);
- if (i % 4 == 0) sign = -sign;
- }
-
- std::unique_ptr<wasm::NativeModule> module = AllocateNativeModule(
- handles.main_isolate(), code->raw_instruction_size());
- byte* code_start = module->AddCodeCopy(code, wasm::WasmCode::kFunction, 0)
- ->instructions()
- .start();
-
- RawMachineAssemblerTester<int32_t> mt;
- Node* call_inputs[] = {mt.PointerConstant(code_start),
- // WasmContext dummy
- mt.PointerConstant(nullptr),
- // Inputs
- MakeConstant(mt, type, a),
- MakeConstant(mt, type, b)};
-
- Node* ret_multi = mt.AddNode(mt.common()->Call(desc),
- arraysize(call_inputs), call_inputs);
- Node* ret = MakeConstant(mt, type, 0);
- bool sign = false;
- for (int i = 0; i < count; ++i) {
- Node* x = (count == 1)
- ? ret_multi
- : mt.AddNode(mt.common()->Projection(i), ret_multi);
- ret = sign ? Sub(mt, type, ret, x) : Add(mt, type, ret, x);
- if (i % 4 == 0) sign = !sign;
- }
- mt.Return(ToInt32(mt, type, ret));
+ const int a = 47, b = 12;
+ int expect = 0;
+ for (int i = 0, sign = +1; i < count; ++i) {
+ if (i % 3 == 0) expect += sign * (a + b);
+ if (i % 3 == 1) expect += sign * (a - b);
+ if (i % 3 == 2) expect += sign * (a * b);
+ if (i % 4 == 0) sign = -sign;
+ }
+
+ std::unique_ptr<wasm::NativeModule> module = AllocateNativeModule(
+ handles.main_isolate(), code->raw_instruction_size());
+ byte* code_start = module->AddCodeCopy(code, wasm::WasmCode::kFunction, 0)
+ ->instructions()
+ .start();
+
+ RawMachineAssemblerTester<int32_t> mt;
+ const int input_count = 2 + param_count;
+ Node* call_inputs[2 + kMaxParamCount];
+ call_inputs[0] = mt.PointerConstant(code_start);
+ // WasmContext dummy
+ call_inputs[1] = mt.PointerConstant(nullptr);
+ // Special inputs for the test.
+ call_inputs[2] = MakeConstant(mt, type, a);
+ call_inputs[3] = MakeConstant(mt, type, b);
+ for (int i = 2; i < param_count; i++) {
+ call_inputs[2 + i] = MakeConstant(mt, type, i);
+ }
+
+ Node* ret_multi = mt.AddNode(mt.common()->Call(desc),
+ input_count, call_inputs);
+ Node* ret = MakeConstant(mt, type, 0);
+ bool sign = false;
+ for (int i = 0; i < count; ++i) {
+ Node* x = (count == 1)
+ ? ret_multi
+ : mt.AddNode(mt.common()->Projection(i), ret_multi);
+ ret = sign ? Sub(mt, type, ret, x) : Add(mt, type, ret, x);
+ if (i % 4 == 0) sign = !sign;
+ }
+ mt.Return(ToInt32(mt, type, ret));
#ifdef ENABLE_DISASSEMBLER
- Handle<Code> code2 = mt.GetCode();
- if (FLAG_print_code) {
- StdoutStream os;
- code2->Disassemble("multi_value_call", os);
- }
+ Handle<Code> code2 = mt.GetCode();
+ if (FLAG_print_code) {
+ StdoutStream os;
+ code2->Disassemble("multi_value_call", os);
+ }
#endif
- CHECK_EQ(expect, mt.Call());
+ CHECK_EQ(expect, mt.Call());
+ }
}
}
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index 3e0e019fd2..c62ed69105 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -4,7 +4,7 @@
#include <utility>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/compiler/pipeline.h"
#include "src/debug/debug-interface.h"
#include "src/execution.h"
diff --git a/deps/v8/test/cctest/compiler/test-run-jscalls.cc b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
index 1b6d4fc49f..b1e9ddfce3 100644
--- a/deps/v8/test/cctest/compiler/test-run-jscalls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api.h"
#include "src/contexts.h"
#include "src/flags.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 709c14e88e..71adbc738d 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -85,11 +85,7 @@ TEST(RunWord32ReverseBits) {
TEST(RunWord32ReverseBytes) {
BufferedRawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- if (!m.machine()->Word32ReverseBytes().IsSupported()) {
- // We can only test the operator if it exists on the testing platform.
- return;
- }
- m.Return(m.AddNode(m.machine()->Word32ReverseBytes().op(), m.Parameter(0)));
+ m.Return(m.AddNode(m.machine()->Word32ReverseBytes(), m.Parameter(0)));
CHECK_EQ(uint32_t(0x00000000), m.Call(uint32_t(0x00000000)));
CHECK_EQ(uint32_t(0x12345678), m.Call(uint32_t(0x78563412)));
@@ -224,11 +220,7 @@ TEST(RunWord64ReverseBits) {
TEST(RunWord64ReverseBytes) {
BufferedRawMachineAssemblerTester<uint64_t> m(MachineType::Uint64());
- if (!m.machine()->Word64ReverseBytes().IsSupported()) {
- return;
- }
-
- m.Return(m.AddNode(m.machine()->Word64ReverseBytes().op(), m.Parameter(0)));
+ m.Return(m.AddNode(m.machine()->Word64ReverseBytes(), m.Parameter(0)));
CHECK_EQ(uint64_t(0x0000000000000000), m.Call(uint64_t(0x0000000000000000)));
CHECK_EQ(uint64_t(0x1234567890ABCDEF), m.Call(uint64_t(0xEFCDAB9078563412)));
@@ -2396,7 +2388,7 @@ TEST(RunWord32AndP) {
{
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
- bt.AddReturn(m.Word32And(bt.param0, m.Word32Not(bt.param1)));
+ bt.AddReturn(m.Word32And(bt.param0, m.Word32BitwiseNot(bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
int32_t expected = *i & ~(*j);
@@ -2407,7 +2399,7 @@ TEST(RunWord32AndP) {
{
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
- bt.AddReturn(m.Word32And(m.Word32Not(bt.param0), bt.param1));
+ bt.AddReturn(m.Word32And(m.Word32BitwiseNot(bt.param0), bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
int32_t expected = ~(*i) & *j;
@@ -2516,7 +2508,8 @@ TEST(RunWord32AndImm) {
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Word32And(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
+ m.Return(
+ m.Word32And(m.Int32Constant(*i), m.Word32BitwiseNot(m.Parameter(0))));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i & ~(*j);
CHECK_EQ(expected, m.Call(*j));
@@ -2709,7 +2702,7 @@ TEST(RunWord32OrP) {
{
RawMachineAssemblerTester<int32_t> m;
Uint32BinopTester bt(&m);
- bt.AddReturn(m.Word32Or(bt.param0, m.Word32Not(bt.param1)));
+ bt.AddReturn(m.Word32Or(bt.param0, m.Word32BitwiseNot(bt.param1)));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i | ~(*j);
@@ -2720,7 +2713,7 @@ TEST(RunWord32OrP) {
{
RawMachineAssemblerTester<int32_t> m;
Uint32BinopTester bt(&m);
- bt.AddReturn(m.Word32Or(m.Word32Not(bt.param0), bt.param1));
+ bt.AddReturn(m.Word32Or(m.Word32BitwiseNot(bt.param0), bt.param1));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
uint32_t expected = ~(*i) | *j;
@@ -2745,7 +2738,8 @@ TEST(RunWord32OrImm) {
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Word32Or(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
+ m.Return(
+ m.Word32Or(m.Int32Constant(*i), m.Word32BitwiseNot(m.Parameter(0))));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i | ~(*j);
CHECK_EQ(expected, m.Call(*j));
@@ -2947,7 +2941,7 @@ TEST(RunWord32XorP) {
{
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
- bt.AddReturn(m.Word32Xor(bt.param0, m.Word32Not(bt.param1)));
+ bt.AddReturn(m.Word32Xor(bt.param0, m.Word32BitwiseNot(bt.param1)));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int32_t expected = *i ^ ~(*j);
@@ -2958,7 +2952,7 @@ TEST(RunWord32XorP) {
{
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
- bt.AddReturn(m.Word32Xor(m.Word32Not(bt.param0), bt.param1));
+ bt.AddReturn(m.Word32Xor(m.Word32BitwiseNot(bt.param0), bt.param1));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
int32_t expected = ~(*i) ^ *j;
@@ -2969,7 +2963,8 @@ TEST(RunWord32XorP) {
{
FOR_UINT32_INPUTS(i) {
RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
- m.Return(m.Word32Xor(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
+ m.Return(
+ m.Word32Xor(m.Int32Constant(*i), m.Word32BitwiseNot(m.Parameter(0))));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i ^ ~(*j);
CHECK_EQ(expected, m.Call(*j));
@@ -3454,10 +3449,9 @@ TEST(RunWord32RorInComparison) {
}
}
-
-TEST(RunWord32NotP) {
+TEST(RunWord32BitwiseNotP) {
RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
- m.Return(m.Word32Not(m.Parameter(0)));
+ m.Return(m.Word32BitwiseNot(m.Parameter(0)));
FOR_INT32_INPUTS(i) {
int expected = ~(*i);
CHECK_EQ(expected, m.Call(*i));
diff --git a/deps/v8/test/cctest/compiler/test-run-retpoline.cc b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
index 4e5f99f413..3bbab4265f 100644
--- a/deps/v8/test/cctest/compiler/test-run-retpoline.cc
+++ b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
@@ -128,6 +128,7 @@ CallDescriptor* CreateDescriptorForStackArguments(Zone* zone,
void TestHelper(int n, int m, bool tail) {
HandleAndZoneScope scope;
Isolate* isolate = scope.main_isolate();
+ CanonicalHandleScope canonical(isolate);
Zone* zone = scope.main_zone();
CallDescriptor* caller_descriptor =
CreateDescriptorForStackArguments(zone, n);
diff --git a/deps/v8/test/cctest/compiler/test-run-stubs.cc b/deps/v8/test/cctest/compiler/test-run-stubs.cc
index 1751ee0836..9c76f22b99 100644
--- a/deps/v8/test/cctest/compiler/test-run-stubs.cc
+++ b/deps/v8/test/cctest/compiler/test-run-stubs.cc
@@ -13,6 +13,7 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/pipeline.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/optimized-compilation-info.h"
#include "test/cctest/compiler/function-tester.h"
@@ -22,7 +23,7 @@ namespace compiler {
class StubTester {
public:
- StubTester(Isolate* isolate, Zone* zone, CodeStub* stub)
+ StubTester(Zone* zone, CodeStub* stub)
: zone_(zone),
info_(ArrayVector("test"), zone, Code::STUB),
interface_descriptor_(stub->GetCallInterfaceDescriptor()),
diff --git a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
index 34d6212d73..b57b4fcbac 100644
--- a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
@@ -108,6 +108,7 @@ CallDescriptor* CreateDescriptorForStackArguments(Zone* zone,
void TestHelper(int n, int m) {
HandleAndZoneScope scope;
Isolate* isolate = scope.main_isolate();
+ CanonicalHandleScope canonical(isolate);
Zone* zone = scope.main_zone();
CallDescriptor* caller_descriptor =
CreateDescriptorForStackArguments(zone, n);
diff --git a/deps/v8/test/cctest/compiler/test-run-variables.cc b/deps/v8/test/cctest/compiler/test-run-variables.cc
index 728d60f491..e2539dc16c 100644
--- a/deps/v8/test/cctest/compiler/test-run-variables.cc
+++ b/deps/v8/test/cctest/compiler/test-run-variables.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/objects-inl.h"
#include "test/cctest/compiler/function-tester.h"
diff --git a/deps/v8/test/cctest/heap/heap-tester.h b/deps/v8/test/cctest/heap/heap-tester.h
index 422560c773..a2e860a8a3 100644
--- a/deps/v8/test/cctest/heap/heap-tester.h
+++ b/deps/v8/test/cctest/heap/heap-tester.h
@@ -21,6 +21,10 @@
V(InvalidatedSlotsEvacuationCandidate) \
V(InvalidatedSlotsNoInvalidatedRanges) \
V(InvalidatedSlotsResetObjectRegression) \
+ V(InvalidatedSlotsRightTrimFixedArray) \
+ V(InvalidatedSlotsRightTrimLargeFixedArray) \
+ V(InvalidatedSlotsLeftTrimFixedArray) \
+ V(InvalidatedSlotsFastToSlow) \
V(InvalidatedSlotsSomeInvalidatedRanges) \
V(TestNewSpaceRefsInCopiedCode) \
V(GCFlags) \
diff --git a/deps/v8/test/cctest/heap/test-alloc.cc b/deps/v8/test/cctest/heap/test-alloc.cc
index ee1a27d4e8..5a19f806bc 100644
--- a/deps/v8/test/cctest/heap/test-alloc.cc
+++ b/deps/v8/test/cctest/heap/test-alloc.cc
@@ -29,7 +29,7 @@
#include "test/cctest/cctest.h"
#include "src/accessors.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/objects-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/property.h"
diff --git a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
index 31aff4f673..cb35a73126 100644
--- a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
@@ -2,11 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/spaces.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
diff --git a/deps/v8/test/cctest/heap/test-embedder-tracing.cc b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
index 47f48c0c7a..d54ffcf377 100644
--- a/deps/v8/test/cctest/heap/test-embedder-tracing.cc
+++ b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "include/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/objects-inl.h"
#include "src/objects/module.h"
#include "src/objects/script.h"
@@ -61,7 +61,7 @@ class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
void TracePrologue() final {}
void TraceEpilogue() final {}
void AbortTracing() final {}
- void EnterFinalPause() final {}
+ void EnterFinalPause(EmbedderStackState) final {}
bool IsRegisteredFromV8(void* first_field) const {
for (auto pair : registered_from_v8_) {
@@ -251,6 +251,20 @@ TEST(FinalizeTracingWhenMarking) {
CHECK(marking->IsStopped());
}
+TEST(GarbageCollectionForTesting) {
+ ManualGCScope manual_gc;
+ i::FLAG_expose_gc = true;
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ Isolate* i_isolate = CcTest::i_isolate();
+ TestEmbedderHeapTracer tracer(isolate);
+ TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+
+ int saved_gc_counter = i_isolate->heap()->gc_count();
+ tracer.GarbageCollectionForTesting(EmbedderHeapTracer::kUnknown);
+ CHECK_GT(i_isolate->heap()->gc_count(), saved_gc_counter);
+}
+
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-external-string-tracker.cc b/deps/v8/test/cctest/heap/test-external-string-tracker.cc
new file mode 100644
index 0000000000..501825a296
--- /dev/null
+++ b/deps/v8/test/cctest/heap/test-external-string-tracker.cc
@@ -0,0 +1,226 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api-inl.h"
+#include "src/api.h"
+#include "src/heap/spaces.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/heap/heap-tester.h"
+#include "test/cctest/heap/heap-utils.h"
+
+#define TEST_STR "tests are great!"
+
+namespace v8 {
+namespace internal {
+namespace heap {
+
+// Adapted from cctest/test-api.cc
+class TestOneByteResource : public v8::String::ExternalOneByteStringResource {
+ public:
+ explicit TestOneByteResource(const char* data, int* counter = nullptr,
+ size_t offset = 0)
+ : orig_data_(data),
+ data_(data + offset),
+ length_(strlen(data) - offset),
+ counter_(counter) {}
+
+ ~TestOneByteResource() {
+ i::DeleteArray(orig_data_);
+ if (counter_ != nullptr) ++*counter_;
+ }
+
+ const char* data() const { return data_; }
+
+ size_t length() const { return length_; }
+
+ private:
+ const char* orig_data_;
+ const char* data_;
+ size_t length_;
+ int* counter_;
+};
+
+TEST(ExternalString_ExternalBackingStoreSizeIncreases) {
+ CcTest::InitializeVM();
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+ ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
+
+ const size_t backing_store_before =
+ heap->old_space()->ExternalBackingStoreBytes(type);
+
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::String> es = v8::String::NewExternalOneByte(
+ isolate, new TestOneByteResource(i::StrDup(TEST_STR))).ToLocalChecked();
+ USE(es);
+
+ const size_t backing_store_after =
+ heap->old_space()->ExternalBackingStoreBytes(type);
+
+ CHECK_EQ(es->Length(), backing_store_after - backing_store_before);
+ }
+}
+
+TEST(ExternalString_ExternalBackingStoreSizeDecreases) {
+ ManualGCScope manual_gc_scope;
+ CcTest::InitializeVM();
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+ ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
+
+ const size_t backing_store_before =
+ heap->old_space()->ExternalBackingStoreBytes(type);
+
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::String> es = v8::String::NewExternalOneByte(
+ isolate, new TestOneByteResource(i::StrDup(TEST_STR))).ToLocalChecked();
+ USE(es);
+ }
+
+ heap::GcAndSweep(heap, OLD_SPACE);
+
+ const size_t backing_store_after =
+ heap->old_space()->ExternalBackingStoreBytes(type);
+
+ CHECK_EQ(0, backing_store_after - backing_store_before);
+}
+
+TEST(ExternalString_ExternalBackingStoreSizeIncreasesMarkCompact) {
+ if (FLAG_never_compact) return;
+ ManualGCScope manual_gc_scope;
+ FLAG_manual_evacuation_candidates_selection = true;
+ CcTest::InitializeVM();
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+ heap::AbandonCurrentlyFreeMemory(heap->old_space());
+ ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
+
+ const size_t backing_store_before =
+ heap->old_space()->ExternalBackingStoreBytes(type);
+
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::String> es = v8::String::NewExternalOneByte(
+ isolate, new TestOneByteResource(i::StrDup(TEST_STR))).ToLocalChecked();
+ v8::internal::Handle<v8::internal::String> esh = v8::Utils::OpenHandle(*es);
+
+ Page* page_before_gc = Page::FromAddress(esh->address());
+ heap::ForceEvacuationCandidate(page_before_gc);
+
+ CcTest::CollectAllGarbage();
+
+ const size_t backing_store_after =
+ heap->old_space()->ExternalBackingStoreBytes(type);
+ CHECK_EQ(es->Length(), backing_store_after - backing_store_before);
+ }
+
+ heap::GcAndSweep(heap, OLD_SPACE);
+ const size_t backing_store_after =
+ heap->old_space()->ExternalBackingStoreBytes(type);
+ CHECK_EQ(0, backing_store_after - backing_store_before);
+}
+
+TEST(ExternalString_ExternalBackingStoreSizeIncreasesAfterExternalization) {
+ CcTest::InitializeVM();
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+ ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
+ size_t old_backing_store_before = 0, new_backing_store_before = 0;
+
+ {
+ v8::HandleScope handle_scope(isolate);
+
+ new_backing_store_before =
+ heap->new_space()->ExternalBackingStoreBytes(type);
+ old_backing_store_before =
+ heap->old_space()->ExternalBackingStoreBytes(type);
+
+ // Allocate normal string in the new gen.
+ v8::Local<v8::String> str =
+ v8::String::NewFromUtf8(isolate, TEST_STR, v8::NewStringType::kNormal)
+ .ToLocalChecked();
+
+ CHECK_EQ(0, heap->new_space()->ExternalBackingStoreBytes(type) -
+ new_backing_store_before);
+
+ // Trigger GCs so that the newly allocated string moves to old gen.
+ heap::GcAndSweep(heap, NEW_SPACE); // in survivor space now
+ heap::GcAndSweep(heap, NEW_SPACE); // in old gen now
+
+ bool success =
+ str->MakeExternal(new TestOneByteResource(i::StrDup(TEST_STR)));
+ CHECK(success);
+
+ CHECK_EQ(str->Length(), heap->old_space()->ExternalBackingStoreBytes(type) -
+ old_backing_store_before);
+ }
+
+ heap::GcAndSweep(heap, OLD_SPACE);
+
+ CHECK_EQ(0, heap->old_space()->ExternalBackingStoreBytes(type) -
+ old_backing_store_before);
+}
+
+TEST(ExternalString_PromotedThinString) {
+ ManualGCScope manual_gc_scope;
+ CcTest::InitializeVM();
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ i::Isolate* i_isolate = CcTest::i_isolate();
+ i::Factory* factory = i_isolate->factory();
+ Heap* heap = i_isolate->heap();
+
+ {
+ v8::HandleScope handle_scope(isolate);
+
+ // New external string in the old space.
+ v8::internal::Handle<v8::internal::String> string1 =
+ factory
+ ->NewExternalStringFromOneByte(
+ new TestOneByteResource(i::StrDup(TEST_STR)))
+ .ToHandleChecked();
+
+ // Internalize external string.
+ i::Handle<i::String> isymbol1 = factory->InternalizeString(string1);
+ CHECK(isymbol1->IsInternalizedString());
+ CHECK(string1->IsExternalString());
+ CHECK(!heap->InNewSpace(*isymbol1));
+
+ // New external string in the young space. This string has the same content
+ // as the previous one (that was already internalized).
+ v8::Local<v8::String> string2 =
+ v8::String::NewFromUtf8(isolate, TEST_STR, v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ bool success =
+ string2->MakeExternal(new TestOneByteResource(i::StrDup(TEST_STR)));
+ CHECK(success);
+
+ // Internalize (it will create a thin string in the new space).
+ i::Handle<i::String> istring = v8::Utils::OpenHandle(*string2);
+ i::Handle<i::String> isymbol2 = factory->InternalizeString(istring);
+ CHECK(isymbol2->IsInternalizedString());
+ CHECK(istring->IsThinString());
+ CHECK(heap->InNewSpace(*istring));
+
+ // Collect thin string. References to the thin string will be updated to
+ // point to the actual external string in the old space.
+ heap::GcAndSweep(heap, NEW_SPACE);
+
+ USE(isymbol1);
+ USE(isymbol2);
+ }
+}
+} // namespace heap
+} // namespace internal
+} // namespace v8
+
+#undef TEST_STR
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 80b361034d..f73f6f0195 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -28,7 +28,7 @@
#include <stdlib.h>
#include <utility>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/assembler-inl.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
@@ -43,9 +43,11 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-reducer.h"
+#include "src/heap/remembered-set.h"
#include "src/ic/ic.h"
#include "src/macro-assembler-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/managed.h"
#include "src/regexp/jsregexp.h"
@@ -787,7 +789,7 @@ TEST(BytecodeArray) {
CHECK_GE(array->address() + array->BytecodeArraySize(),
array->GetFirstBytecodeAddress() + array->length());
for (int i = 0; i < kRawBytesSize; i++) {
- CHECK_EQ(Memory::uint8_at(array->GetFirstBytecodeAddress() + i),
+ CHECK_EQ(Memory<uint8_t>(array->GetFirstBytecodeAddress() + i),
kRawBytes[i]);
CHECK_EQ(array->get(i), kRawBytes[i]);
}
@@ -805,7 +807,7 @@ TEST(BytecodeArray) {
CHECK_EQ(array->frame_size(), kFrameSize);
for (int i = 0; i < kRawBytesSize; i++) {
CHECK_EQ(array->get(i), kRawBytes[i]);
- CHECK_EQ(Memory::uint8_at(array->GetFirstBytecodeAddress() + i),
+ CHECK_EQ(Memory<uint8_t>(array->GetFirstBytecodeAddress() + i),
kRawBytes[i]);
}
@@ -3628,15 +3630,15 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
int dependency_group_count = 0;
DependentCode* dependency = site->dependent_code();
- while (dependency != ReadOnlyRoots(heap).empty_fixed_array()) {
+ while (dependency != ReadOnlyRoots(heap).empty_weak_fixed_array()) {
CHECK(dependency->group() ==
DependentCode::kAllocationSiteTransitionChangedGroup ||
dependency->group() ==
DependentCode::kAllocationSiteTenuringChangedGroup);
CHECK_EQ(1, dependency->count());
- CHECK(dependency->object_at(0)->IsWeakCell());
+ CHECK(dependency->object_at(0)->IsWeakHeapObject());
Code* function_bar =
- Code::cast(WeakCell::cast(dependency->object_at(0))->value());
+ Code::cast(dependency->object_at(0)->ToWeakHeapObject());
CHECK_EQ(bar_handle->code(), function_bar);
dependency = dependency->next_link();
dependency_group_count++;
@@ -3653,8 +3655,7 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
// The site still exists because of our global handle, but the code is no
// longer referred to by dependent_code().
- CHECK(site->dependent_code()->object_at(0)->IsWeakCell() &&
- WeakCell::cast(site->dependent_code()->object_at(0))->cleared());
+ CHECK(site->dependent_code()->object_at(0)->IsClearedWeakHeapObject());
}
void CheckNumberOfAllocations(Heap* heap, const char* source,
@@ -3678,20 +3679,51 @@ TEST(AllocationSiteCreation) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
+ i::FLAG_enable_one_shot_optimization = true;
// Array literals.
- CheckNumberOfAllocations(heap, "(function f1() { return []; })()", 1, 0);
- CheckNumberOfAllocations(heap, "(function f2() { return [1, 2]; })()", 1, 0);
- CheckNumberOfAllocations(heap, "(function f3() { return [[1], [2]]; })()", 1,
- 2);
+ CheckNumberOfAllocations(heap, "function f1() { return []; }; f1()", 1, 0);
+ CheckNumberOfAllocations(heap, "function f2() { return [1, 2]; }; f2()", 1,
+ 0);
+ CheckNumberOfAllocations(heap, "function f3() { return [[1], [2]]; }; f3()",
+ 1, 2);
CheckNumberOfAllocations(heap,
- "(function f4() { "
+ "function f4() { "
"return [0, [1, 1.1, 1.2, "
"], 1.5, [2.1, 2.2], 3];"
- "})()",
+ "}; f4();",
1, 2);
+ // No allocation sites within IIFE/top-level
+ CheckNumberOfAllocations(heap,
+ R"(
+ (function f4() {
+ return [ 0, [ 1, 1.1, 1.2,], 1.5, [2.1, 2.2], 3 ];
+ })();
+ )",
+ 0, 0);
+
+ CheckNumberOfAllocations(heap,
+ R"(
+ l = [ 1, 2, 3, 4];
+ )",
+ 0, 0);
+
+ CheckNumberOfAllocations(heap,
+ R"(
+ a = [];
+ )",
+ 0, 0);
+
+ CheckNumberOfAllocations(heap,
+ R"(
+ (function f4() {
+ return [];
+ })();
+ )",
+ 0, 0);
+
// Object literals have lazy AllocationSites
CheckNumberOfAllocations(heap, "function f5() { return {}; }; f5(); ", 0, 0);
@@ -3727,6 +3759,52 @@ TEST(AllocationSiteCreation) {
// No new AllocationSites created on the second invocation.
CheckNumberOfAllocations(heap, "f9(); ", 0, 0);
+
+ // No allocation sites for literals in an iife/top level code even if it has
+ // array subliterals
+ CheckNumberOfAllocations(heap,
+ R"(
+ (function f10() {
+ return {a: [1], b: [2]};
+ })();
+ )",
+ 0, 0);
+
+ CheckNumberOfAllocations(heap,
+ R"(
+ l = {
+ a: 1,
+ b: {
+ c: [5],
+ }
+ };
+ )",
+ 0, 0);
+
+ // Eagerly create allocation sites for literals within a loop of iife or
+ // top-level code
+ CheckNumberOfAllocations(heap,
+ R"(
+ (function f11() {
+ while(true) {
+ return {a: [1], b: [2]};
+ }
+ })();
+ )",
+ 1, 2);
+
+ CheckNumberOfAllocations(heap,
+ R"(
+ for (i = 0; i < 1; ++i) {
+ l = {
+ a: 1,
+ b: {
+ c: [5],
+ }
+ };
+ }
+ )",
+ 1, 1);
}
TEST(CellsInOptimizedCodeAreWeak) {
@@ -4330,82 +4408,6 @@ TEST(PolymorphicStaysPolymorphicAfterGC) {
CheckIC(loadIC, 0, POLYMORPHIC);
}
-
-TEST(WeakCell) {
- ManualGCScope manual_gc_scope;
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::internal::Factory* factory = isolate->factory();
-
- HandleScope outer_scope(isolate);
- Handle<WeakCell> weak_cell1;
- {
- HandleScope inner_scope(isolate);
- Handle<HeapObject> value = factory->NewFixedArray(1, NOT_TENURED);
- weak_cell1 = inner_scope.CloseAndEscape(factory->NewWeakCell(value));
- }
-
- Handle<FixedArray> survivor = factory->NewFixedArray(1, NOT_TENURED);
- Handle<WeakCell> weak_cell2;
- {
- HandleScope inner_scope(isolate);
- weak_cell2 = inner_scope.CloseAndEscape(factory->NewWeakCell(survivor));
- }
- CHECK(weak_cell1->value()->IsFixedArray());
- CHECK_EQ(*survivor, weak_cell2->value());
- CcTest::CollectGarbage(NEW_SPACE);
- CHECK(weak_cell1->value()->IsFixedArray());
- CHECK_EQ(*survivor, weak_cell2->value());
- CcTest::CollectGarbage(NEW_SPACE);
- CHECK(weak_cell1->value()->IsFixedArray());
- CHECK_EQ(*survivor, weak_cell2->value());
- CcTest::CollectAllAvailableGarbage();
- CHECK(weak_cell1->cleared());
- CHECK_EQ(*survivor, weak_cell2->value());
-}
-
-
-TEST(WeakCellsWithIncrementalMarking) {
- if (!FLAG_incremental_marking) return;
- ManualGCScope manual_gc_scope;
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::internal::Heap* heap = CcTest::heap();
- v8::internal::Factory* factory = isolate->factory();
-
- const int N = 16;
- HandleScope outer_scope(isolate);
- Handle<FixedArray> survivor = factory->NewFixedArray(1, NOT_TENURED);
- Handle<WeakCell> weak_cells[N];
-
- for (int i = 0; i < N; i++) {
- HandleScope inner_scope(isolate);
- Handle<HeapObject> value =
- i == 0 ? survivor : factory->NewFixedArray(1, NOT_TENURED);
- Handle<WeakCell> weak_cell = factory->NewWeakCell(value);
- CHECK(weak_cell->value()->IsFixedArray());
- IncrementalMarking* marking = heap->incremental_marking();
- if (marking->IsStopped()) {
- heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
- i::GarbageCollectionReason::kTesting);
- }
- marking->Step(128, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- StepOrigin::kV8);
- CcTest::CollectGarbage(NEW_SPACE);
- CHECK(weak_cell->value()->IsFixedArray());
- weak_cells[i] = inner_scope.CloseAndEscape(weak_cell);
- }
- // Call collect all twice to make sure that we also cleared
- // weak cells that were allocated on black pages.
- CcTest::CollectAllGarbage();
- CcTest::CollectAllGarbage();
- CHECK_EQ(*survivor, weak_cells[0]->value());
- for (int i = 1; i < N; i++) {
- CHECK(weak_cells[i]->cleared());
- }
-}
-
-
#ifdef DEBUG
TEST(AddInstructionChangesNewSpacePromotion) {
FLAG_allow_natives_syntax = true;
@@ -4462,7 +4464,7 @@ TEST(CEntryStubOOM) {
CcTest::isolate()->SetFatalErrorHandler(OnFatalErrorExpectOOM);
v8::Local<v8::Value> result = CompileRun(
- "%SetFlags('--gc-interval=1');"
+ "%SetAllocationTimeout(1, 1);"
"var a = [];"
"a.__proto__ = [];"
"a.unshift(1)");
@@ -4694,15 +4696,15 @@ TEST(Regress3877) {
Factory* factory = isolate->factory();
HandleScope scope(isolate);
CompileRun("function cls() { this.x = 10; }");
- Handle<WeakCell> weak_prototype;
+ Handle<WeakFixedArray> weak_prototype_holder = factory->NewWeakFixedArray(1);
{
HandleScope inner_scope(isolate);
v8::Local<v8::Value> result = CompileRun("cls.prototype");
Handle<JSReceiver> proto =
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result));
- weak_prototype = inner_scope.CloseAndEscape(factory->NewWeakCell(proto));
+ weak_prototype_holder->Set(0, HeapObjectReference::Weak(*proto));
}
- CHECK(!weak_prototype->cleared());
+ CHECK(!weak_prototype_holder->Get(0)->IsClearedWeakHeapObject());
CompileRun(
"var a = { };"
"a.x = new cls();"
@@ -4711,13 +4713,13 @@ TEST(Regress3877) {
CcTest::CollectAllGarbage();
}
// The map of a.x keeps prototype alive
- CHECK(!weak_prototype->cleared());
+ CHECK(!weak_prototype_holder->Get(0)->IsClearedWeakHeapObject());
// Change the map of a.x and make the previous map garbage collectable.
CompileRun("a.x.__proto__ = {};");
for (int i = 0; i < 4; i++) {
CcTest::CollectAllGarbage();
}
- CHECK(weak_prototype->cleared());
+ CHECK(weak_prototype_holder->Get(0)->IsClearedWeakHeapObject());
}
Handle<WeakFixedArray> AddRetainedMap(Isolate* isolate, Heap* heap) {
@@ -4773,20 +4775,6 @@ TEST(WritableVsImmortalRoots) {
}
}
-TEST(FixedArrayOfWeakCells) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
-
- Handle<HeapNumber> number = isolate->factory()->NewHeapNumber(1);
- Handle<FixedArrayOfWeakCells> array =
- FixedArrayOfWeakCells::Add(isolate, Handle<Object>(), number);
- array->Remove(number);
- array->Compact<FixedArrayOfWeakCells::NullCallback>(isolate);
- FixedArrayOfWeakCells::Add(isolate, array, number);
-}
-
-
TEST(PreprocessStackTrace) {
// Do not automatically trigger early GC.
FLAG_gc_interval = -1;
@@ -5493,7 +5481,8 @@ TEST(Regress631969) {
s3->MakeExternal(&external_string);
CcTest::CollectGarbage(OLD_SPACE);
// This avoids the GC from trying to free stack allocated resources.
- i::Handle<i::ExternalOneByteString>::cast(s3)->set_resource(nullptr);
+ i::Handle<i::ExternalOneByteString>::cast(s3)->SetResource(isolate,
+ nullptr);
}
}
@@ -5673,7 +5662,7 @@ TEST(Regress618958) {
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
bool isolate_is_locked = true;
- heap->update_external_memory(100 * MB);
+ CcTest::isolate()->AdjustAmountOfExternalAllocatedMemory(100 * MB);
int mark_sweep_count_before = heap->ms_count();
heap->MemoryPressureNotification(MemoryPressureLevel::kCritical,
isolate_is_locked);
diff --git a/deps/v8/test/cctest/heap/test-invalidated-slots.cc b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
index f18878d511..9162573dd9 100644
--- a/deps/v8/test/cctest/heap/test-invalidated-slots.cc
+++ b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
@@ -109,6 +109,7 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) {
}
HEAP_TEST(InvalidatedSlotsAfterTrimming) {
+ ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
std::vector<ByteArray*> byte_arrays;
@@ -119,9 +120,7 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
byte_arrays[i]->Size());
}
// Trim byte arrays and check that the slots outside the byte arrays are
- // considered valid. Free space outside invalidated object can be reused
- // during evacuation for allocation of the evacuated objects. That can
- // add new valid slots to evacuation candidates.
+ // considered invalid if the old space page was swept.
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray* byte_array = byte_arrays[i];
@@ -129,7 +128,7 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
Address end = byte_array->address() + byte_array->Size();
heap->RightTrimFixedArray(byte_array, byte_array->length());
for (Address addr = start; addr < end; addr += kPointerSize) {
- CHECK(filter.IsValid(addr));
+ CHECK_EQ(filter.IsValid(addr), page->SweepingDone());
}
}
}
@@ -184,6 +183,185 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) {
}
}
+Handle<FixedArray> AllocateArrayOnFreshPage(Isolate* isolate,
+ PagedSpace* old_space, int length) {
+ AlwaysAllocateScope always_allocate(isolate);
+ heap::SimulateFullSpace(old_space);
+ return isolate->factory()->NewFixedArray(length, TENURED);
+}
+
+Handle<FixedArray> AllocateArrayOnEvacuationCandidate(Isolate* isolate,
+ PagedSpace* old_space,
+ int length) {
+ Handle<FixedArray> object =
+ AllocateArrayOnFreshPage(isolate, old_space, length);
+ heap::ForceEvacuationCandidate(Page::FromHeapObject(*object));
+ return object;
+}
+
+HEAP_TEST(InvalidatedSlotsRightTrimFixedArray) {
+ FLAG_manual_evacuation_candidates_selection = true;
+ FLAG_parallel_compaction = false;
+ ManualGCScope manual_gc_scope;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = CcTest::heap();
+ HandleScope scope(isolate);
+ PagedSpace* old_space = heap->old_space();
+ // Allocate a dummy page to be swept be the sweeper during evacuation.
+ AllocateArrayOnFreshPage(isolate, old_space, 1);
+ Handle<FixedArray> evacuated =
+ AllocateArrayOnEvacuationCandidate(isolate, old_space, 1);
+ Handle<FixedArray> trimmed = AllocateArrayOnFreshPage(isolate, old_space, 10);
+ heap::SimulateIncrementalMarking(heap);
+ for (int i = 1; i < trimmed->length(); i++) {
+ trimmed->set(i, *evacuated);
+ }
+ {
+ HandleScope scope(isolate);
+ Handle<HeapObject> dead = factory->NewFixedArray(1);
+ for (int i = 1; i < trimmed->length(); i++) {
+ trimmed->set(i, *dead);
+ }
+ heap->RightTrimFixedArray(*trimmed, trimmed->length() - 1);
+ }
+ CcTest::CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::OLD_SPACE);
+}
+
+HEAP_TEST(InvalidatedSlotsRightTrimLargeFixedArray) {
+ FLAG_manual_evacuation_candidates_selection = true;
+ FLAG_parallel_compaction = false;
+ ManualGCScope manual_gc_scope;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = CcTest::heap();
+ HandleScope scope(isolate);
+ PagedSpace* old_space = heap->old_space();
+ // Allocate a dummy page to be swept be the sweeper during evacuation.
+ AllocateArrayOnFreshPage(isolate, old_space, 1);
+ Handle<FixedArray> evacuated =
+ AllocateArrayOnEvacuationCandidate(isolate, old_space, 1);
+ Handle<FixedArray> trimmed;
+ {
+ AlwaysAllocateScope always_allocate(isolate);
+ trimmed =
+ factory->NewFixedArray(kMaxRegularHeapObjectSize / kPointerSize + 100);
+ DCHECK(MemoryChunk::FromHeapObject(*trimmed)->InLargeObjectSpace());
+ }
+ heap::SimulateIncrementalMarking(heap);
+ for (int i = 1; i < trimmed->length(); i++) {
+ trimmed->set(i, *evacuated);
+ }
+ {
+ HandleScope scope(isolate);
+ Handle<HeapObject> dead = factory->NewFixedArray(1);
+ for (int i = 1; i < trimmed->length(); i++) {
+ trimmed->set(i, *dead);
+ }
+ heap->RightTrimFixedArray(*trimmed, trimmed->length() - 1);
+ }
+ CcTest::CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::OLD_SPACE);
+}
+
+HEAP_TEST(InvalidatedSlotsLeftTrimFixedArray) {
+ FLAG_manual_evacuation_candidates_selection = true;
+ FLAG_parallel_compaction = false;
+ ManualGCScope manual_gc_scope;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = CcTest::heap();
+ HandleScope scope(isolate);
+ PagedSpace* old_space = heap->old_space();
+ // Allocate a dummy page to be swept be the sweeper during evacuation.
+ AllocateArrayOnFreshPage(isolate, old_space, 1);
+ Handle<FixedArray> evacuated =
+ AllocateArrayOnEvacuationCandidate(isolate, old_space, 1);
+ Handle<FixedArray> trimmed = AllocateArrayOnFreshPage(isolate, old_space, 10);
+ heap::SimulateIncrementalMarking(heap);
+ for (int i = 0; i + 1 < trimmed->length(); i++) {
+ trimmed->set(i, *evacuated);
+ }
+ {
+ HandleScope scope(isolate);
+ Handle<HeapObject> dead = factory->NewFixedArray(1);
+ for (int i = 1; i < trimmed->length(); i++) {
+ trimmed->set(i, *dead);
+ }
+ heap->LeftTrimFixedArray(*trimmed, trimmed->length() - 1);
+ }
+ CcTest::CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::OLD_SPACE);
+}
+
+HEAP_TEST(InvalidatedSlotsFastToSlow) {
+ FLAG_manual_evacuation_candidates_selection = true;
+ FLAG_parallel_compaction = false;
+ ManualGCScope manual_gc_scope;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = CcTest::heap();
+ PagedSpace* old_space = heap->old_space();
+
+ HandleScope scope(isolate);
+
+ Handle<String> name = factory->InternalizeUtf8String("TestObject");
+ Handle<String> prop_name1 = factory->InternalizeUtf8String("prop1");
+ Handle<String> prop_name2 = factory->InternalizeUtf8String("prop2");
+ Handle<String> prop_name3 = factory->InternalizeUtf8String("prop3");
+ // Allocate a dummy page to be swept be the sweeper during evacuation.
+ AllocateArrayOnFreshPage(isolate, old_space, 1);
+ Handle<FixedArray> evacuated =
+ AllocateArrayOnEvacuationCandidate(isolate, old_space, 1);
+ // Allocate a dummy page to ensure that the JSObject is allocated on
+ // a fresh page.
+ AllocateArrayOnFreshPage(isolate, old_space, 1);
+ Handle<JSObject> obj;
+ {
+ AlwaysAllocateScope always_allocate(isolate);
+ Handle<JSFunction> function = factory->NewFunctionForTest(name);
+ function->shared()->set_expected_nof_properties(3);
+ obj = factory->NewJSObject(function, TENURED);
+ }
+ // Start incremental marking.
+ heap::SimulateIncrementalMarking(heap);
+ // Set properties to point to the evacuation candidate.
+ JSReceiver::SetProperty(isolate, obj, prop_name1, evacuated,
+ LanguageMode::kSloppy)
+ .Check();
+ JSReceiver::SetProperty(isolate, obj, prop_name2, evacuated,
+ LanguageMode::kSloppy)
+ .Check();
+ JSReceiver::SetProperty(isolate, obj, prop_name3, evacuated,
+ LanguageMode::kSloppy)
+ .Check();
+
+ {
+ HandleScope scope(isolate);
+ Handle<HeapObject> dead = factory->NewFixedArray(1);
+ JSReceiver::SetProperty(isolate, obj, prop_name1, dead,
+ LanguageMode::kSloppy)
+ .Check();
+ JSReceiver::SetProperty(isolate, obj, prop_name2, dead,
+ LanguageMode::kSloppy)
+ .Check();
+ JSReceiver::SetProperty(isolate, obj, prop_name3, dead,
+ LanguageMode::kSloppy)
+ .Check();
+ Handle<Map> map(obj->map(), isolate);
+ Handle<Map> normalized_map =
+ Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES, "testing");
+ JSObject::MigrateToMap(obj, normalized_map);
+ }
+ CcTest::CollectGarbage(i::NEW_SPACE);
+ CcTest::CollectGarbage(i::OLD_SPACE);
+}
+
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-weak-references.cc b/deps/v8/test/cctest/heap/test-weak-references.cc
index 979a28b1cd..a54b13afd2 100644
--- a/deps/v8/test/cctest/heap/test-weak-references.cc
+++ b/deps/v8/test/cctest/heap/test-weak-references.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/assembler-inl.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
@@ -535,6 +535,55 @@ TEST(WeakArrayListBasic) {
CHECK_EQ(Smi::ToInt(array->Get(7)->ToSmi()), 7);
}
+TEST(WeakArrayListRemove) {
+ ManualGCScope manual_gc_scope;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ HandleScope outer_scope(isolate);
+
+ Handle<WeakArrayList> array(ReadOnlyRoots(heap).empty_weak_array_list(),
+ isolate);
+
+ Handle<FixedArray> elem0 = factory->NewFixedArray(1);
+ Handle<FixedArray> elem1 = factory->NewFixedArray(1);
+ Handle<FixedArray> elem2 = factory->NewFixedArray(1);
+
+ array =
+ WeakArrayList::AddToEnd(isolate, array, MaybeObjectHandle::Weak(elem0));
+ array =
+ WeakArrayList::AddToEnd(isolate, array, MaybeObjectHandle::Weak(elem1));
+ array =
+ WeakArrayList::AddToEnd(isolate, array, MaybeObjectHandle::Weak(elem2));
+
+ CHECK_EQ(array->length(), 3);
+ CHECK_EQ(array->Get(0), HeapObjectReference::Weak(*elem0));
+ CHECK_EQ(array->Get(1), HeapObjectReference::Weak(*elem1));
+ CHECK_EQ(array->Get(2), HeapObjectReference::Weak(*elem2));
+
+ CHECK(array->RemoveOne(MaybeObjectHandle::Weak(elem1)));
+
+ CHECK_EQ(array->length(), 2);
+ CHECK_EQ(array->Get(0), HeapObjectReference::Weak(*elem0));
+ CHECK_EQ(array->Get(1), HeapObjectReference::Weak(*elem2));
+
+ CHECK(!array->RemoveOne(MaybeObjectHandle::Weak(elem1)));
+
+ CHECK_EQ(array->length(), 2);
+ CHECK_EQ(array->Get(0), HeapObjectReference::Weak(*elem0));
+ CHECK_EQ(array->Get(1), HeapObjectReference::Weak(*elem2));
+
+ CHECK(array->RemoveOne(MaybeObjectHandle::Weak(elem0)));
+
+ CHECK_EQ(array->length(), 1);
+ CHECK_EQ(array->Get(0), HeapObjectReference::Weak(*elem2));
+
+ CHECK(array->RemoveOne(MaybeObjectHandle::Weak(elem2)));
+
+ CHECK_EQ(array->length(), 0);
+}
+
TEST(Regress7768) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_turbo_inlining = false;
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
index 0998b3dfd9..a048e82e62 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -10,7 +10,7 @@
#include "include/libplatform/libplatform.h"
#include "include/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/base/logging.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-generator.h"
@@ -21,6 +21,7 @@
#include "src/objects/module-inl.h"
#include "src/runtime/runtime.h"
#include "src/source-position-table.h"
+#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
@@ -116,6 +117,17 @@ BytecodeExpectationsPrinter::GetBytecodeArrayForScript(
return i::handle(js_function->shared()->GetBytecodeArray(), i_isolate());
}
+i::Handle<i::BytecodeArray>
+BytecodeExpectationsPrinter::GetBytecodeArrayOfCallee(
+ const char* source_code) const {
+ i::Handle<i::Object> i_object =
+ v8::Utils::OpenHandle(*CompileRun(source_code));
+ i::Handle<i::JSFunction> js_function =
+ i::Handle<i::JSFunction>::cast(i_object);
+ CHECK(js_function->shared()->HasBytecodeArray());
+ return i::handle(js_function->shared()->GetBytecodeArray(), i_isolate());
+}
+
void BytecodeExpectationsPrinter::PrintEscapedString(
std::ostream& stream, const std::string& string) const {
for (char c : string) {
@@ -372,11 +384,15 @@ void BytecodeExpectationsPrinter::PrintExpectation(
wrap_ ? WrapCodeInFunction(test_function_name_.c_str(), snippet)
: snippet;
+ i::FLAG_enable_one_shot_optimization = oneshot_opt_;
+ i::FLAG_compilation_cache = false;
i::Handle<i::BytecodeArray> bytecode_array;
if (module_) {
CHECK(top_level_ && !wrap_);
v8::Local<v8::Module> module = CompileModule(source_code.c_str());
bytecode_array = GetBytecodeArrayForModule(module);
+ } else if (print_callee_) {
+ bytecode_array = GetBytecodeArrayOfCallee(source_code.c_str());
} else {
v8::Local<v8::Script> script = CompileScript(source_code.c_str());
if (top_level_) {
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
index be5b143b5d..1d1bc437d0 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
@@ -32,6 +32,8 @@ class BytecodeExpectationsPrinter final {
module_(false),
wrap_(true),
top_level_(false),
+ print_callee_(false),
+ oneshot_opt_(false),
test_function_name_(kDefaultTopFunctionName) {}
void PrintExpectation(std::ostream& stream, // NOLINT
@@ -46,6 +48,12 @@ class BytecodeExpectationsPrinter final {
void set_top_level(bool top_level) { top_level_ = top_level; }
bool top_level() const { return top_level_; }
+ void set_print_callee(bool print_callee) { print_callee_ = print_callee; }
+ bool print_callee() { return print_callee_; }
+
+ void set_oneshot_opt(bool oneshot_opt) { oneshot_opt_ = oneshot_opt; }
+ bool oneshot_opt() { return oneshot_opt_; }
+
void set_test_function_name(const std::string& test_function_name) {
test_function_name_ = test_function_name;
}
@@ -94,6 +102,8 @@ class BytecodeExpectationsPrinter final {
v8::Local<v8::Module> module) const;
i::Handle<v8::internal::BytecodeArray> GetBytecodeArrayForScript(
v8::Local<v8::Script> script) const;
+ i::Handle<i::BytecodeArray> GetBytecodeArrayOfCallee(
+ const char* source_code) const;
i::Isolate* i_isolate() const {
return reinterpret_cast<i::Isolate*>(isolate_);
@@ -103,6 +113,8 @@ class BytecodeExpectationsPrinter final {
bool module_;
bool wrap_;
bool top_level_;
+ bool print_callee_;
+ bool oneshot_opt_;
std::string test_function_name_;
static const char* const kDefaultTopFunctionName;
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
index 9714926254..ae8d050914 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
@@ -117,3 +117,164 @@ constant pool: [
handlers: [
]
+---
+snippet: "
+ var a = [ 1, 2 ]; return [ ...a ];
+"
+frame size: 8
+parameter count: 1
+bytecode array length: 86
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
+ B(Star), R(0),
+ /* 52 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
+ B(Star), R(1),
+ B(LdaConstant), U8(2),
+ /* 64 S> */ B(Star), R(2),
+ B(LdaNamedProperty), R(0), U8(3), U8(7),
+ B(Star), R(7),
+ B(CallProperty0), R(7), R(0), U8(9),
+ B(Mov), R(0), R(6),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(Star), R(5),
+ B(LdaNamedProperty), R(5), U8(4), U8(11),
+ B(Star), R(4),
+ B(CallProperty0), R(4), R(5), U8(13),
+ B(Star), R(3),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
+ B(LdaNamedProperty), R(3), U8(5), U8(15),
+ B(JumpIfToBooleanTrue), U8(21),
+ B(LdaNamedProperty), R(3), U8(6), U8(17),
+ B(Star), R(3),
+ B(StaInArrayLiteral), R(1), R(2), U8(2),
+ B(Ldar), R(2),
+ B(Inc), U8(4),
+ B(Star), R(2),
+ B(JumpLoop), U8(35), I8(0),
+ B(Ldar), R(1),
+ /* 68 S> */ B(Return),
+]
+constant pool: [
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ Smi [0],
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = [ 1, 2 ]; return [ 0, ...a ];
+"
+frame size: 8
+parameter count: 1
+bytecode array length: 86
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
+ B(Star), R(0),
+ /* 52 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
+ B(Star), R(1),
+ B(LdaConstant), U8(2),
+ /* 67 S> */ B(Star), R(2),
+ B(LdaNamedProperty), R(0), U8(3), U8(7),
+ B(Star), R(7),
+ B(CallProperty0), R(7), R(0), U8(9),
+ B(Mov), R(0), R(6),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(Star), R(5),
+ B(LdaNamedProperty), R(5), U8(4), U8(11),
+ B(Star), R(4),
+ B(CallProperty0), R(4), R(5), U8(13),
+ B(Star), R(3),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
+ B(LdaNamedProperty), R(3), U8(5), U8(15),
+ B(JumpIfToBooleanTrue), U8(21),
+ B(LdaNamedProperty), R(3), U8(6), U8(17),
+ B(Star), R(3),
+ B(StaInArrayLiteral), R(1), R(2), U8(2),
+ B(Ldar), R(2),
+ B(Inc), U8(4),
+ B(Star), R(2),
+ B(JumpLoop), U8(35), I8(0),
+ B(Ldar), R(1),
+ /* 71 S> */ B(Return),
+]
+constant pool: [
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ Smi [1],
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+]
+handlers: [
+]
+
+---
+snippet: "
+ var a = [ 1, 2 ]; return [ ...a, 3 ];
+"
+frame size: 8
+parameter count: 1
+bytecode array length: 98
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(37),
+ B(Star), R(0),
+ /* 52 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(37),
+ B(Star), R(1),
+ B(LdaConstant), U8(2),
+ /* 64 S> */ B(Star), R(2),
+ B(LdaNamedProperty), R(0), U8(3), U8(7),
+ B(Star), R(7),
+ B(CallProperty0), R(7), R(0), U8(9),
+ B(Mov), R(0), R(6),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(Star), R(5),
+ B(LdaNamedProperty), R(5), U8(4), U8(11),
+ B(Star), R(4),
+ B(CallProperty0), R(4), R(5), U8(13),
+ B(Star), R(3),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
+ B(LdaNamedProperty), R(3), U8(5), U8(15),
+ B(JumpIfToBooleanTrue), U8(21),
+ B(LdaNamedProperty), R(3), U8(6), U8(17),
+ B(Star), R(3),
+ B(StaInArrayLiteral), R(1), R(2), U8(2),
+ B(Ldar), R(2),
+ B(Inc), U8(4),
+ B(Star), R(2),
+ B(JumpLoop), U8(35), I8(0),
+ B(LdaSmi), I8(3),
+ B(StaInArrayLiteral), R(1), R(2), U8(2),
+ B(Ldar), R(2),
+ B(Inc), U8(4),
+ B(Star), R(2),
+ B(Ldar), R(1),
+ /* 71 S> */ B(Return),
+]
+constant pool: [
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ Smi [0],
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index d7109321c6..f5cbed6a7a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -362,7 +362,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(18),
B(LdaConstant), U8(14),
B(Star), R(19),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index e965d7a689..c5fae1f4f6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -123,7 +123,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(19),
B(LdaConstant), U8(11),
B(Star), R(20),
@@ -377,7 +377,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(19),
B(LdaConstant), U8(11),
B(Star), R(20),
@@ -653,7 +653,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(19),
B(LdaConstant), U8(11),
B(Star), R(20),
@@ -885,7 +885,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(17),
B(LdaConstant), U8(9),
B(Star), R(18),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index 5c603964c8..bcb462bc75 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -85,7 +85,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(12),
B(LdaConstant), U8(7),
B(Star), R(13),
@@ -217,7 +217,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(13),
B(LdaConstant), U8(7),
B(Star), R(14),
@@ -361,7 +361,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(12),
B(LdaConstant), U8(7),
B(Star), R(13),
@@ -495,7 +495,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(11),
B(LdaConstant), U8(9),
B(Star), R(12),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index 90fcb9065d..d4fe1a091c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -89,7 +89,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(14),
B(LdaConstant), U8(6),
B(Star), R(15),
@@ -256,7 +256,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(14),
B(LdaConstant), U8(11),
B(Star), R(15),
@@ -401,7 +401,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(12),
B(LdaConstant), U8(8),
B(Star), R(13),
@@ -495,7 +495,7 @@ bytecodes: [
B(JumpIfUndefined), U8(6),
B(Ldar), R(6),
B(JumpIfNotNull), U8(16),
- B(LdaSmi), I8(78),
+ B(LdaSmi), I8(81),
B(Star), R(18),
B(LdaConstant), U8(4),
B(Star), R(19),
@@ -550,7 +550,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(17),
B(LdaConstant), U8(8),
B(Star), R(18),
@@ -697,7 +697,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(15),
B(LdaConstant), U8(9),
B(Star), R(16),
@@ -859,7 +859,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(14),
B(LdaConstant), U8(12),
B(Star), R(15),
@@ -1007,7 +1007,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(20),
B(LdaConstant), U8(6),
B(Star), R(21),
@@ -1218,7 +1218,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(19),
B(LdaConstant), U8(7),
B(Star), R(20),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index 54deec7198..641a2b2eb0 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -203,7 +203,7 @@ bytecodes: [
B(TestTypeOf), U8(6),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(149),
+ B(Wide), B(LdaSmi), I16(153),
B(Star), R(14),
B(LdaConstant), U8(13),
B(Star), R(15),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden
new file mode 100644
index 0000000000..f2653a6ed1
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithOneshotOpt.golden
@@ -0,0 +1,408 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: no
+top level: yes
+print callee: yes
+oneshot opt: yes
+
+---
+snippet: "
+
+ (function() {
+ l = {};
+ l.aa = 2;
+ l.bb = l.aa;
+ return arguments.callee;
+ })();
+
+"
+frame size: 6
+parameter count: 1
+bytecode array length: 82
+bytecodes: [
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 16 E> */ B(StackCheck),
+ /* 29 S> */ B(CreateEmptyObjectLiteral),
+ /* 31 E> */ B(StaGlobal), U8(0), U8(0),
+ /* 45 S> */ B(LdaGlobal), U8(0), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), I8(2),
+ B(Star), R(4),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(5),
+ B(Mov), R(1), R(2),
+ /* 50 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(2), U8(4),
+ /* 63 S> */ B(LdaGlobal), U8(0), U8(2),
+ B(Star), R(1),
+ /* 70 E> */ B(LdaGlobal), U8(0), U8(2),
+ B(Star), R(2),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(2), R(3),
+ /* 72 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(3), U8(2),
+ B(Star), R(4),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(5),
+ B(Mov), R(1), R(2),
+ /* 68 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(2), U8(4),
+ /* 84 S> */ B(LdaConstant), U8(3),
+ B(Star), R(3),
+ B(Mov), R(0), R(2),
+ /* 101 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ /* 108 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["l"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["aa"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["bb"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callee"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ (function() {
+ l = {};
+ for (i = 0; i < 5; ++i) {
+ l.aa = 2;
+ l.bb = l.aa;
+ }
+ return arguments.callee;
+ })();
+
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 77
+bytecodes: [
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 16 E> */ B(StackCheck),
+ /* 29 S> */ B(CreateEmptyObjectLiteral),
+ /* 31 E> */ B(StaGlobal), U8(0), U8(0),
+ /* 50 S> */ B(LdaZero),
+ /* 52 E> */ B(StaGlobal), U8(1), U8(2),
+ /* 59 S> */ B(LdaGlobal), U8(1), U8(4),
+ B(Star), R(1),
+ B(LdaSmi), I8(5),
+ /* 59 E> */ B(TestLessThan), R(1), U8(6),
+ B(JumpIfFalse), U8(43),
+ /* 45 E> */ B(StackCheck),
+ /* 81 S> */ B(LdaGlobal), U8(0), U8(7),
+ B(Star), R(1),
+ B(LdaSmi), I8(2),
+ /* 86 E> */ B(StaNamedProperty), R(1), U8(2), U8(9),
+ /* 101 S> */ B(LdaGlobal), U8(0), U8(7),
+ B(Star), R(1),
+ /* 108 E> */ B(LdaGlobal), U8(0), U8(7),
+ B(Star), R(2),
+ /* 110 E> */ B(LdaNamedProperty), R(2), U8(2), U8(11),
+ /* 106 E> */ B(StaNamedProperty), R(1), U8(3), U8(13),
+ /* 66 S> */ B(LdaGlobal), U8(1), U8(4),
+ B(Inc), U8(15),
+ /* 66 E> */ B(StaGlobal), U8(1), U8(2),
+ B(JumpLoop), U8(50), I8(0),
+ /* 132 S> */ B(LdaConstant), U8(4),
+ B(Star), R(3),
+ B(Mov), R(0), R(2),
+ /* 149 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ /* 156 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["l"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["i"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["aa"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["bb"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callee"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ (function() {
+ l = {};
+ c = 4;
+ while(c > 4) {
+ l.aa = 2;
+ l.bb = l.aa;
+ c--;
+ }
+ return arguments.callee;
+ })();
+
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 78
+bytecodes: [
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 16 E> */ B(StackCheck),
+ /* 29 S> */ B(CreateEmptyObjectLiteral),
+ /* 31 E> */ B(StaGlobal), U8(0), U8(0),
+ /* 45 S> */ B(LdaSmi), I8(4),
+ /* 47 E> */ B(StaGlobal), U8(1), U8(2),
+ /* 68 S> */ B(LdaGlobal), U8(1), U8(4),
+ B(Star), R(1),
+ B(LdaSmi), I8(4),
+ /* 68 E> */ B(TestGreaterThan), R(1), U8(6),
+ B(JumpIfFalse), U8(43),
+ /* 60 E> */ B(StackCheck),
+ /* 85 S> */ B(LdaGlobal), U8(0), U8(7),
+ B(Star), R(1),
+ B(LdaSmi), I8(2),
+ /* 90 E> */ B(StaNamedProperty), R(1), U8(2), U8(9),
+ /* 105 S> */ B(LdaGlobal), U8(0), U8(7),
+ B(Star), R(1),
+ /* 112 E> */ B(LdaGlobal), U8(0), U8(7),
+ B(Star), R(2),
+ /* 114 E> */ B(LdaNamedProperty), R(2), U8(2), U8(11),
+ /* 110 E> */ B(StaNamedProperty), R(1), U8(3), U8(13),
+ /* 128 S> */ B(LdaGlobal), U8(1), U8(4),
+ B(Dec), U8(15),
+ /* 129 E> */ B(StaGlobal), U8(1), U8(2),
+ B(JumpLoop), U8(50), I8(0),
+ /* 151 S> */ B(LdaConstant), U8(4),
+ B(Star), R(3),
+ B(Mov), R(0), R(2),
+ /* 168 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ /* 175 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["l"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["c"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["aa"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["bb"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callee"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ (function() {
+ l = {};
+ c = 4;
+ do {
+ l.aa = 2;
+ l.bb = l.aa;
+ c--;
+ } while(c > 4)
+ return arguments.callee;
+ })();
+
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 78
+bytecodes: [
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 16 E> */ B(StackCheck),
+ /* 29 S> */ B(CreateEmptyObjectLiteral),
+ /* 31 E> */ B(StaGlobal), U8(0), U8(0),
+ /* 45 S> */ B(LdaSmi), I8(4),
+ /* 47 E> */ B(StaGlobal), U8(1), U8(2),
+ /* 60 E> */ B(StackCheck),
+ /* 75 S> */ B(LdaGlobal), U8(0), U8(4),
+ B(Star), R(1),
+ B(LdaSmi), I8(2),
+ /* 80 E> */ B(StaNamedProperty), R(1), U8(2), U8(6),
+ /* 95 S> */ B(LdaGlobal), U8(0), U8(4),
+ B(Star), R(1),
+ /* 102 E> */ B(LdaGlobal), U8(0), U8(4),
+ B(Star), R(2),
+ /* 104 E> */ B(LdaNamedProperty), R(2), U8(2), U8(8),
+ /* 100 E> */ B(StaNamedProperty), R(1), U8(3), U8(10),
+ /* 118 S> */ B(LdaGlobal), U8(1), U8(12),
+ B(Dec), U8(14),
+ /* 119 E> */ B(StaGlobal), U8(1), U8(2),
+ /* 141 S> */ B(LdaGlobal), U8(1), U8(12),
+ B(Star), R(1),
+ B(LdaSmi), I8(4),
+ /* 141 E> */ B(TestGreaterThan), R(1), U8(15),
+ B(JumpIfFalse), U8(5),
+ B(JumpLoop), U8(50), I8(0),
+ /* 154 S> */ B(LdaConstant), U8(4),
+ B(Star), R(3),
+ B(Mov), R(0), R(2),
+ /* 171 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ /* 178 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["l"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["c"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["aa"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["bb"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callee"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ (function() {
+ l = {
+ 'aa': 3.3,
+ 'bb': 4.4
+ };
+ if (l.aa < 3) {
+ l.aa = 3;
+ } else {
+ l.aa = l.bb;
+ }
+ return arguments.callee;
+ })();
+
+"
+frame size: 6
+parameter count: 1
+bytecode array length: 121
+bytecodes: [
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 16 E> */ B(StackCheck),
+ /* 29 S> */ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ B(LdaSmi), I8(41),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kCreateObjectLiteralWithoutAllocationSite), R(2), U8(2),
+ /* 31 E> */ B(StaGlobal), U8(1), U8(0),
+ /* 95 S> */ B(LdaGlobal), U8(1), U8(2),
+ B(Star), R(1),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(Mov), R(1), R(2),
+ /* 101 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), I8(3),
+ /* 104 E> */ B(TestLessThan), R(1), U8(4),
+ B(JumpIfFalse), U8(28),
+ /* 121 S> */ B(LdaGlobal), U8(1), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), I8(3),
+ B(Star), R(4),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(5),
+ B(Mov), R(1), R(2),
+ /* 126 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(2), U8(4),
+ B(Jump), U8(40),
+ /* 158 S> */ B(LdaGlobal), U8(1), U8(2),
+ B(Star), R(1),
+ /* 165 E> */ B(LdaGlobal), U8(1), U8(2),
+ B(Star), R(2),
+ B(LdaConstant), U8(3),
+ B(Star), R(4),
+ B(Mov), R(2), R(3),
+ /* 167 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(3), U8(2),
+ B(Star), R(4),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(5),
+ B(Mov), R(1), R(2),
+ /* 163 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(2), U8(4),
+ /* 189 S> */ B(LdaConstant), U8(4),
+ B(Star), R(3),
+ B(Mov), R(0), R(2),
+ /* 206 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ /* 213 S> */ B(Return),
+]
+constant pool: [
+ OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["l"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["aa"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["bb"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callee"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ (function() {
+ a = [0, [1, 1,2,], 3];
+ return arguments.callee;
+ })();
+
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 32
+bytecodes: [
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 16 E> */ B(StackCheck),
+ /* 29 S> */ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(LdaSmi), I8(4),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kCreateArrayLiteralWithoutAllocationSite), R(1), U8(2),
+ /* 31 E> */ B(StaGlobal), U8(1), U8(0),
+ /* 60 S> */ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(Mov), R(0), R(2),
+ /* 77 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ /* 84 S> */ B(Return),
+]
+constant pool: [
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callee"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ (function() {
+ a = [];
+ return arguments.callee;
+ })();
+
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 32
+bytecodes: [
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 16 E> */ B(StackCheck),
+ /* 29 S> */ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(LdaSmi), I8(37),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kCreateArrayLiteralWithoutAllocationSite), R(1), U8(2),
+ /* 31 E> */ B(StaGlobal), U8(1), U8(0),
+ /* 45 S> */ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(Mov), R(0), R(2),
+ /* 62 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ /* 69 S> */ B(Return),
+]
+constant pool: [
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callee"],
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden
new file mode 100644
index 0000000000..f116bdc68f
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IIFEWithoutOneshotOpt.golden
@@ -0,0 +1,109 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: no
+top level: yes
+print callee: yes
+
+---
+snippet: "
+
+ (function() {
+ l = {};
+ l.a = 2;
+ l.b = l.a;
+ return arguments.callee;
+ })();
+
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 42
+bytecodes: [
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 16 E> */ B(StackCheck),
+ /* 29 S> */ B(CreateEmptyObjectLiteral),
+ /* 31 E> */ B(StaGlobal), U8(0), U8(0),
+ /* 45 S> */ B(LdaGlobal), U8(0), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), I8(2),
+ /* 49 E> */ B(StaNamedProperty), R(1), U8(1), U8(4),
+ /* 62 S> */ B(LdaGlobal), U8(0), U8(2),
+ B(Star), R(1),
+ /* 68 E> */ B(LdaGlobal), U8(0), U8(2),
+ B(Star), R(2),
+ /* 70 E> */ B(LdaNamedProperty), R(2), U8(1), U8(6),
+ /* 66 E> */ B(StaNamedProperty), R(1), U8(2), U8(8),
+ /* 98 S> */ B(LdaNamedProperty), R(0), U8(3), U8(10),
+ /* 105 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["l"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callee"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ (function() {
+ l = {
+ 'a': 4.3,
+ 'b': 3.4
+ };
+ if (l.a < 3) {
+ l.a = 3;
+ } else {
+ l.a = l.b;
+ }
+ return arguments.callee;
+ })();
+
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 68
+bytecodes: [
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 16 E> */ B(StackCheck),
+ /* 29 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
+ B(Ldar), R(1),
+ /* 31 E> */ B(StaGlobal), U8(1), U8(1),
+ /* 93 S> */ B(LdaGlobal), U8(1), U8(3),
+ B(Star), R(1),
+ /* 99 E> */ B(LdaNamedProperty), R(1), U8(2), U8(5),
+ B(Star), R(1),
+ B(LdaSmi), I8(3),
+ /* 101 E> */ B(TestLessThan), R(1), U8(7),
+ B(JumpIfFalse), U8(15),
+ /* 118 S> */ B(LdaGlobal), U8(1), U8(3),
+ B(Star), R(1),
+ B(LdaSmi), I8(3),
+ /* 122 E> */ B(StaNamedProperty), R(1), U8(2), U8(8),
+ B(Jump), U8(20),
+ /* 154 S> */ B(LdaGlobal), U8(1), U8(3),
+ B(Star), R(1),
+ /* 160 E> */ B(LdaGlobal), U8(1), U8(3),
+ B(Star), R(2),
+ /* 162 E> */ B(LdaNamedProperty), R(2), U8(3), U8(10),
+ /* 158 E> */ B(StaNamedProperty), R(1), U8(2), U8(8),
+ /* 200 S> */ B(LdaNamedProperty), R(0), U8(4), U8(12),
+ /* 207 S> */ B(Return),
+]
+constant pool: [
+ OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["l"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callee"],
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden
new file mode 100644
index 0000000000..3bc175b7da
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreOneShot.golden
@@ -0,0 +1,423 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: no
+top level: yes
+oneshot opt: yes
+
+---
+snippet: "
+
+ l = {
+ 'a': 1,
+ 'b': 2
+ };
+
+ v = l['a'] + l['b'];
+ l['b'] = 7;
+ l['a'] = l['b'];
+
+"
+frame size: 7
+parameter count: 1
+bytecode array length: 128
+bytecodes: [
+ /* 0 E> */ B(StackCheck),
+ /* 7 S> */ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ B(LdaSmi), I8(41),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kCreateObjectLiteralWithoutAllocationSite), R(2), U8(2),
+ /* 9 E> */ B(StaGlobal), U8(1), U8(0),
+ /* 60 S> */ B(LdaGlobal), U8(1), U8(3),
+ B(Star), R(1),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(Mov), R(1), R(2),
+ /* 65 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ B(Star), R(1),
+ /* 73 E> */ B(LdaGlobal), U8(1), U8(3),
+ B(Star), R(2),
+ B(LdaConstant), U8(3),
+ B(Star), R(4),
+ B(Mov), R(2), R(3),
+ /* 74 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(3), U8(2),
+ /* 71 E> */ B(Add), R(1), U8(2),
+ /* 62 E> */ B(StaGlobal), U8(4), U8(5),
+ /* 87 S> */ B(LdaGlobal), U8(1), U8(3),
+ B(Star), R(1),
+ B(LdaSmi), I8(7),
+ B(Star), R(4),
+ B(LdaConstant), U8(3),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(5),
+ B(Mov), R(1), R(2),
+ /* 94 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(2), U8(4),
+ /* 105 S> */ B(LdaGlobal), U8(1), U8(3),
+ B(Star), R(1),
+ /* 114 E> */ B(LdaGlobal), U8(1), U8(3),
+ B(Star), R(2),
+ B(LdaConstant), U8(3),
+ B(Star), R(4),
+ B(Mov), R(2), R(3),
+ /* 115 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(3), U8(2),
+ B(Star), R(2),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(6),
+ B(Mov), R(1), R(3),
+ B(Mov), R(2), R(5),
+ /* 112 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(3), U8(4),
+ B(Mov), R(5), R(0),
+ B(Ldar), R(0),
+ /* 128 S> */ B(Return),
+]
+constant pool: [
+ OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["l"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["v"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ l = {
+ 'a': 1.1,
+ 'b': 2.2
+ };
+ for (i = 0; i < 5; ++i) {
+ l['a'] = l['a'] + l['b'];
+ l['b'] = l['a'] + l['b'];
+ }
+
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 120
+bytecodes: [
+ /* 0 E> */ B(StackCheck),
+ /* 7 S> */ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ B(LdaSmi), I8(41),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kCreateObjectLiteralWithoutAllocationSite), R(2), U8(2),
+ /* 9 E> */ B(StaGlobal), U8(1), U8(0),
+ B(LdaUndefined),
+ B(Star), R(0),
+ /* 68 S> */ B(LdaZero),
+ /* 70 E> */ B(StaGlobal), U8(2), U8(2),
+ /* 77 S> */ B(LdaGlobal), U8(2), U8(4),
+ B(Star), R(1),
+ B(LdaSmi), I8(5),
+ /* 77 E> */ B(TestLessThan), R(1), U8(6),
+ B(JumpIfFalse), U8(83),
+ /* 63 E> */ B(StackCheck),
+ /* 97 S> */ B(LdaGlobal), U8(1), U8(7),
+ B(Star), R(1),
+ /* 106 E> */ B(LdaGlobal), U8(1), U8(7),
+ B(Star), R(2),
+ /* 107 E> */ B(LdaNamedProperty), R(2), U8(3), U8(10),
+ B(Star), R(2),
+ /* 115 E> */ B(LdaGlobal), U8(1), U8(7),
+ B(Star), R(3),
+ /* 116 E> */ B(LdaNamedProperty), R(3), U8(4), U8(12),
+ /* 113 E> */ B(Add), R(2), U8(9),
+ /* 104 E> */ B(StaNamedProperty), R(1), U8(3), U8(14),
+ /* 131 S> */ B(LdaGlobal), U8(1), U8(7),
+ B(Star), R(1),
+ /* 140 E> */ B(LdaGlobal), U8(1), U8(7),
+ B(Star), R(2),
+ /* 141 E> */ B(LdaNamedProperty), R(2), U8(3), U8(10),
+ B(Star), R(2),
+ /* 149 E> */ B(LdaGlobal), U8(1), U8(7),
+ B(Star), R(3),
+ /* 150 E> */ B(LdaNamedProperty), R(3), U8(4), U8(12),
+ /* 147 E> */ B(Add), R(2), U8(16),
+ B(Star), R(2),
+ /* 138 E> */ B(StaNamedProperty), R(1), U8(4), U8(17),
+ B(Mov), R(2), R(0),
+ /* 84 S> */ B(LdaGlobal), U8(2), U8(4),
+ B(Inc), U8(19),
+ /* 84 E> */ B(StaGlobal), U8(2), U8(2),
+ B(JumpLoop), U8(90), I8(0),
+ B(Ldar), R(0),
+ /* 171 S> */ B(Return),
+]
+constant pool: [
+ OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["l"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["i"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ l = {
+ 'a': 1.1,
+ 'b': 2.2
+ };
+ while (s > 0) {
+ l['a'] = l['a'] - l['b'];
+ l['b'] = l['b'] - l['a'];
+ }
+
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 109
+bytecodes: [
+ /* 0 E> */ B(StackCheck),
+ /* 7 S> */ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ B(LdaSmi), I8(41),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kCreateObjectLiteralWithoutAllocationSite), R(2), U8(2),
+ /* 9 E> */ B(StaGlobal), U8(1), U8(0),
+ B(LdaUndefined),
+ B(Star), R(0),
+ /* 72 S> */ B(LdaGlobal), U8(2), U8(2),
+ B(Star), R(1),
+ B(LdaZero),
+ /* 72 E> */ B(TestGreaterThan), R(1), U8(4),
+ B(JumpIfFalse), U8(77),
+ /* 63 E> */ B(StackCheck),
+ /* 87 S> */ B(LdaGlobal), U8(1), U8(5),
+ B(Star), R(1),
+ /* 97 E> */ B(LdaGlobal), U8(1), U8(5),
+ B(Star), R(2),
+ /* 98 E> */ B(LdaNamedProperty), R(2), U8(3), U8(8),
+ B(Star), R(2),
+ /* 106 E> */ B(LdaGlobal), U8(1), U8(5),
+ B(Star), R(3),
+ /* 107 E> */ B(LdaNamedProperty), R(3), U8(4), U8(10),
+ /* 104 E> */ B(Sub), R(2), U8(7),
+ /* 95 E> */ B(StaNamedProperty), R(1), U8(3), U8(12),
+ /* 122 S> */ B(LdaGlobal), U8(1), U8(5),
+ B(Star), R(1),
+ /* 132 E> */ B(LdaGlobal), U8(1), U8(5),
+ B(Star), R(2),
+ /* 133 E> */ B(LdaNamedProperty), R(2), U8(4), U8(10),
+ B(Star), R(2),
+ /* 141 E> */ B(LdaGlobal), U8(1), U8(5),
+ B(Star), R(3),
+ /* 142 E> */ B(LdaNamedProperty), R(3), U8(3), U8(8),
+ /* 139 E> */ B(Sub), R(2), U8(14),
+ B(Star), R(2),
+ /* 130 E> */ B(StaNamedProperty), R(1), U8(4), U8(15),
+ B(Mov), R(2), R(0),
+ B(Ldar), R(2),
+ B(JumpLoop), U8(83), I8(0),
+ B(Ldar), R(0),
+ /* 163 S> */ B(Return),
+]
+constant pool: [
+ OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["l"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["s"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ l = {
+ 'a': 1.1,
+ 'b': 2.2
+ };
+ s = 10;
+ do {
+ l['a'] = l['b'] - l['a'];
+ } while (s < 10);
+
+"
+frame size: 4
+parameter count: 1
+bytecode array length: 81
+bytecodes: [
+ /* 0 E> */ B(StackCheck),
+ /* 7 S> */ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ B(LdaSmi), I8(41),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kCreateObjectLiteralWithoutAllocationSite), R(2), U8(2),
+ /* 9 E> */ B(StaGlobal), U8(1), U8(0),
+ /* 63 S> */ B(LdaSmi), I8(10),
+ /* 65 E> */ B(StaGlobal), U8(2), U8(2),
+ B(LdaUndefined),
+ B(Star), R(0),
+ /* 77 E> */ B(StackCheck),
+ /* 90 S> */ B(LdaGlobal), U8(1), U8(4),
+ B(Star), R(1),
+ /* 99 E> */ B(LdaGlobal), U8(1), U8(4),
+ B(Star), R(2),
+ /* 100 E> */ B(LdaNamedProperty), R(2), U8(3), U8(7),
+ B(Star), R(2),
+ /* 108 E> */ B(LdaGlobal), U8(1), U8(4),
+ B(Star), R(3),
+ /* 109 E> */ B(LdaNamedProperty), R(3), U8(4), U8(9),
+ /* 106 E> */ B(Sub), R(2), U8(6),
+ B(Star), R(2),
+ /* 97 E> */ B(StaNamedProperty), R(1), U8(4), U8(11),
+ B(Mov), R(2), R(0),
+ /* 133 S> */ B(LdaGlobal), U8(2), U8(13),
+ B(Star), R(1),
+ B(LdaSmi), I8(10),
+ /* 133 E> */ B(TestLessThan), R(1), U8(15),
+ B(JumpIfFalse), U8(5),
+ B(JumpLoop), U8(50), I8(0),
+ B(Ldar), R(0),
+ /* 146 S> */ B(Return),
+]
+constant pool: [
+ OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["l"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["s"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ l = {
+ 'c': 1.1,
+ 'd': 2.2
+ };
+ if (l['c'] < 3) {
+ l['c'] = 3;
+ } else {
+ l['d'] = 3;
+ }
+
+"
+frame size: 7
+parameter count: 1
+bytecode array length: 111
+bytecodes: [
+ /* 0 E> */ B(StackCheck),
+ /* 7 S> */ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ B(LdaSmi), I8(41),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kCreateObjectLiteralWithoutAllocationSite), R(2), U8(2),
+ /* 9 E> */ B(StaGlobal), U8(1), U8(0),
+ /* 63 S> */ B(LdaGlobal), U8(1), U8(2),
+ B(Star), R(1),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(Mov), R(1), R(2),
+ /* 68 E> */ B(InvokeIntrinsic), U8(Runtime::k_GetProperty), R(2), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), I8(3),
+ /* 74 E> */ B(TestLessThan), R(1), U8(4),
+ B(JumpIfFalse), U8(36),
+ /* 89 S> */ B(LdaGlobal), U8(1), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), I8(3),
+ B(Star), R(2),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(6),
+ B(Mov), R(1), R(3),
+ B(Mov), R(2), R(5),
+ /* 96 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(3), U8(4),
+ B(Mov), R(5), R(0),
+ B(Ldar), R(2),
+ B(Jump), U8(34),
+ /* 124 S> */ B(LdaGlobal), U8(1), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), I8(3),
+ B(Star), R(2),
+ B(LdaConstant), U8(3),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(6),
+ B(Mov), R(1), R(3),
+ B(Mov), R(2), R(5),
+ /* 131 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(3), U8(4),
+ B(Mov), R(5), R(0),
+ B(Ldar), R(2),
+ B(Ldar), R(0),
+ /* 150 S> */ B(Return),
+]
+constant pool: [
+ OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["l"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["c"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["d"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ a = [1.1, [2.2, 4.5]];
+
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 20
+bytecodes: [
+ /* 0 E> */ B(StackCheck),
+ /* 7 S> */ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(LdaSmi), I8(4),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kCreateArrayLiteralWithoutAllocationSite), R(1), U8(2),
+ /* 9 E> */ B(StaGlobal), U8(1), U8(0),
+ B(Star), R(0),
+ /* 36 S> */ B(Return),
+]
+constant pool: [
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ b = [];
+
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 20
+bytecodes: [
+ /* 0 E> */ B(StackCheck),
+ /* 7 S> */ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(LdaSmi), I8(37),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kCreateArrayLiteralWithoutAllocationSite), R(1), U8(2),
+ /* 9 E> */ B(StaGlobal), U8(1), U8(0),
+ B(Star), R(0),
+ /* 21 S> */ B(Return),
+]
+constant pool: [
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["b"],
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden
new file mode 100644
index 0000000000..4b249ea15f
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoadStoreWithoutOneShot.golden
@@ -0,0 +1,119 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: no
+top level: yes
+
+---
+snippet: "
+
+ l = {
+ 'aa': 1.1,
+ 'bb': 2.2
+ };
+
+ v = l['aa'] + l['bb'];
+ l['bb'] = 7;
+ l['aa'] = l['bb'];
+
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 74
+bytecodes: [
+ /* 0 E> */ B(StackCheck),
+ /* 7 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
+ B(Ldar), R(1),
+ /* 9 E> */ B(StaGlobal), U8(1), U8(1),
+ /* 66 S> */ B(LdaGlobal), U8(1), U8(4),
+ B(Star), R(1),
+ /* 71 E> */ B(LdaNamedProperty), R(1), U8(2), U8(6),
+ B(Star), R(1),
+ /* 80 E> */ B(LdaGlobal), U8(1), U8(4),
+ B(Star), R(2),
+ /* 81 E> */ B(LdaNamedProperty), R(2), U8(3), U8(8),
+ /* 78 E> */ B(Add), R(1), U8(3),
+ /* 68 E> */ B(StaGlobal), U8(4), U8(10),
+ /* 95 S> */ B(LdaGlobal), U8(1), U8(4),
+ B(Star), R(1),
+ B(LdaSmi), I8(7),
+ /* 103 E> */ B(StaNamedProperty), R(1), U8(3), U8(12),
+ /* 114 S> */ B(LdaGlobal), U8(1), U8(4),
+ B(Star), R(1),
+ /* 124 E> */ B(LdaGlobal), U8(1), U8(4),
+ B(Star), R(2),
+ /* 125 E> */ B(LdaNamedProperty), R(2), U8(3), U8(8),
+ B(Star), R(2),
+ /* 122 E> */ B(StaNamedProperty), R(1), U8(2), U8(14),
+ B(Mov), R(2), R(0),
+ B(Ldar), R(0),
+ /* 139 S> */ B(Return),
+]
+constant pool: [
+ OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["l"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["aa"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["bb"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["v"],
+]
+handlers: [
+]
+
+---
+snippet: "
+
+ l = {
+ 'cc': 3.1,
+ 'dd': 4.2
+ };
+ if (l['cc'] < 3) {
+ l['cc'] = 3;
+ } else {
+ l['dd'] = 3;
+ }
+
+"
+frame size: 3
+parameter count: 1
+bytecode array length: 70
+bytecodes: [
+ /* 0 E> */ B(StackCheck),
+ /* 7 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(41), R(1),
+ B(Ldar), R(1),
+ /* 9 E> */ B(StaGlobal), U8(1), U8(1),
+ /* 65 S> */ B(LdaGlobal), U8(1), U8(3),
+ B(Star), R(1),
+ /* 70 E> */ B(LdaNamedProperty), R(1), U8(2), U8(5),
+ B(Star), R(1),
+ B(LdaSmi), I8(3),
+ /* 77 E> */ B(TestLessThan), R(1), U8(7),
+ B(JumpIfFalse), U8(22),
+ /* 92 S> */ B(LdaGlobal), U8(1), U8(3),
+ B(Star), R(1),
+ B(LdaSmi), I8(3),
+ B(Star), R(2),
+ /* 100 E> */ B(StaNamedProperty), R(1), U8(2), U8(8),
+ B(Mov), R(2), R(0),
+ B(Ldar), R(2),
+ B(Jump), U8(20),
+ /* 128 S> */ B(LdaGlobal), U8(1), U8(3),
+ B(Star), R(1),
+ B(LdaSmi), I8(3),
+ B(Star), R(2),
+ /* 136 E> */ B(StaNamedProperty), R(1), U8(3), U8(10),
+ B(Mov), R(2), R(0),
+ B(Ldar), R(2),
+ B(Ldar), R(0),
+ /* 155 S> */ B(Return),
+]
+constant pool: [
+ OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["l"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["cc"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["dd"],
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
index e87ceaf0de..d870c4362f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
@@ -229,7 +229,7 @@ bytecodes: [
B(JumpIfUndefined), U8(6),
B(Ldar), R(3),
B(JumpIfNotNull), U8(16),
- B(LdaSmi), I8(78),
+ B(LdaSmi), I8(81),
B(Star), R(4),
B(LdaConstant), U8(1),
B(Star), R(5),
diff --git a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
index 4dad7d48ce..ef0f616528 100644
--- a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
+++ b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
@@ -42,6 +42,8 @@ class ProgramOptions final {
wrap_(true),
module_(false),
top_level_(false),
+ print_callee_(false),
+ oneshot_opt_(false),
do_expressions_(false),
async_iteration_(false),
public_fields_(false),
@@ -64,6 +66,8 @@ class ProgramOptions final {
bool wrap() const { return wrap_; }
bool module() const { return module_; }
bool top_level() const { return top_level_; }
+ bool print_callee() const { return print_callee_; }
+ bool oneshot_opt() const { return oneshot_opt_; }
bool do_expressions() const { return do_expressions_; }
bool async_iteration() const { return async_iteration_; }
bool public_fields() const { return public_fields_; }
@@ -84,6 +88,8 @@ class ProgramOptions final {
bool wrap_;
bool module_;
bool top_level_;
+ bool print_callee_;
+ bool oneshot_opt_;
bool do_expressions_;
bool async_iteration_;
bool public_fields_;
@@ -174,6 +180,10 @@ ProgramOptions ProgramOptions::FromCommandLine(int argc, char** argv) {
options.module_ = true;
} else if (strcmp(argv[i], "--top-level") == 0) {
options.top_level_ = true;
+ } else if (strcmp(argv[i], "--print-callee") == 0) {
+ options.print_callee_ = true;
+ } else if (strcmp(argv[i], "--disable-oneshot-opt") == 0) {
+ options.oneshot_opt_ = false;
} else if (strcmp(argv[i], "--do-expressions") == 0) {
options.do_expressions_ = true;
} else if (strcmp(argv[i], "--async-iteration") == 0) {
@@ -269,6 +279,8 @@ bool ProgramOptions::Validate() const {
void ProgramOptions::UpdateFromHeader(std::istream& stream) {
std::string line;
+ const char* kPrintCallee = "print callee: ";
+ const char* kOneshotOpt = "oneshot opt: ";
// Skip to the beginning of the options header
while (std::getline(stream, line)) {
@@ -284,6 +296,10 @@ void ProgramOptions::UpdateFromHeader(std::istream& stream) {
test_function_name_ = line.c_str() + 20;
} else if (line.compare(0, 11, "top level: ") == 0) {
top_level_ = ParseBoolean(line.c_str() + 11);
+ } else if (line.compare(0, strlen(kPrintCallee), kPrintCallee) == 0) {
+ print_callee_ = ParseBoolean(line.c_str() + strlen(kPrintCallee));
+ } else if (line.compare(0, strlen(kOneshotOpt), kOneshotOpt) == 0) {
+ oneshot_opt_ = ParseBoolean(line.c_str() + strlen(kOneshotOpt));
} else if (line.compare(0, 16, "do expressions: ") == 0) {
do_expressions_ = ParseBoolean(line.c_str() + 16);
} else if (line.compare(0, 17, "async iteration: ") == 0) {
@@ -315,6 +331,8 @@ void ProgramOptions::PrintHeader(std::ostream& stream) const { // NOLINT
if (module_) stream << "\nmodule: yes";
if (top_level_) stream << "\ntop level: yes";
+ if (print_callee_) stream << "\nprint callee: yes";
+ if (oneshot_opt_) stream << "\noneshot opt: yes";
if (do_expressions_) stream << "\ndo expressions: yes";
if (async_iteration_) stream << "\nasync iteration: yes";
if (public_fields_) stream << "\npublic fields: yes";
@@ -364,6 +382,10 @@ bool ReadNextSnippet(std::istream& stream, std::string* string_out) { // NOLINT
}
if (!found_begin_snippet) continue;
if (line == "\"") return true;
+ if (line.size() == 0) {
+ string_out->append("\n"); // consume empty line
+ continue;
+ }
CHECK_GE(line.size(), 2u); // We should have the indent
string_out->append(line.begin() + 2, line.end());
*string_out += '\n';
@@ -418,6 +440,8 @@ void GenerateExpectationsFile(std::ostream& stream, // NOLINT
printer.set_wrap(options.wrap());
printer.set_module(options.module());
printer.set_top_level(options.top_level());
+ printer.set_print_callee(options.print_callee());
+ printer.set_oneshot_opt(options.oneshot_opt());
if (!options.test_function_name().empty()) {
printer.set_test_function_name(options.test_function_name());
}
@@ -478,6 +502,9 @@ void PrintUsage(const char* exec_path) {
" --stdin Read from standard input instead of file.\n"
" --rebaseline Rebaseline input snippet file.\n"
" --no-wrap Do not wrap the snippet in a function.\n"
+ " --disable-oneshot-opt Disable Oneshot Optimization.\n"
+ " --print-callee Print bytecode of callee, function should "
+ "return arguments.callee.\n"
" --module Compile as JavaScript module.\n"
" --test-function-name=foo "
"Specify the name of the test function.\n"
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.cc b/deps/v8/test/cctest/interpreter/interpreter-tester.cc
index d112511d22..acb06f2d8a 100644
--- a/deps/v8/test/cctest/interpreter/interpreter-tester.cc
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.cc
@@ -4,6 +4,7 @@
#include "test/cctest/interpreter/interpreter-tester.h"
+#include "src/api-inl.h"
#include "src/objects-inl.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index 561d163a69..0ec28d3653 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -129,6 +129,27 @@ std::string BuildActual(const BytecodeExpectationsPrinter& printer,
return actual_stream.str();
}
+// inplace left trim
+static inline void ltrim(std::string& str) {
+ str.erase(str.begin(),
+ std::find_if(str.begin(), str.end(),
+ [](unsigned char ch) { return !std::isspace(ch); }));
+}
+
+// inplace right trim
+static inline void rtrim(std::string& str) {
+ str.erase(std::find_if(str.rbegin(), str.rend(),
+ [](unsigned char ch) { return !std::isspace(ch); })
+ .base(),
+ str.end());
+}
+
+static inline std::string trim(std::string& str) {
+ ltrim(str);
+ rtrim(str);
+ return str;
+}
+
bool CompareTexts(const std::string& generated, const std::string& expected) {
std::istringstream generated_stream(generated);
std::istringstream expected_stream(expected);
@@ -157,7 +178,7 @@ bool CompareTexts(const std::string& generated, const std::string& expected) {
return false;
}
- if (generated_line != expected_line) {
+ if (trim(generated_line) != trim(expected_line)) {
std::cerr << "Inputs differ at line " << line_number << "\n";
std::cerr << " Generated: '" << generated_line << "'\n";
std::cerr << " Expected: '" << expected_line << "'\n";
@@ -411,6 +432,242 @@ TEST(PropertyLoads) {
LoadGolden("PropertyLoads.golden")));
}
+TEST(PropertyLoadStoreOneShot) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
+ printer.set_wrap(false);
+ printer.set_top_level(true);
+ printer.set_oneshot_opt(true);
+
+ const char* snippets[] = {
+ R"(
+ l = {
+ 'a': 1,
+ 'b': 2
+ };
+
+ v = l['a'] + l['b'];
+ l['b'] = 7;
+ l['a'] = l['b'];
+ )",
+
+ R"(
+ l = {
+ 'a': 1.1,
+ 'b': 2.2
+ };
+ for (i = 0; i < 5; ++i) {
+ l['a'] = l['a'] + l['b'];
+ l['b'] = l['a'] + l['b'];
+ }
+ )",
+
+ R"(
+ l = {
+ 'a': 1.1,
+ 'b': 2.2
+ };
+ while (s > 0) {
+ l['a'] = l['a'] - l['b'];
+ l['b'] = l['b'] - l['a'];
+ }
+ )",
+
+ R"(
+ l = {
+ 'a': 1.1,
+ 'b': 2.2
+ };
+ s = 10;
+ do {
+ l['a'] = l['b'] - l['a'];
+ } while (s < 10);
+ )",
+
+ R"(
+ l = {
+ 'c': 1.1,
+ 'd': 2.2
+ };
+ if (l['c'] < 3) {
+ l['c'] = 3;
+ } else {
+ l['d'] = 3;
+ }
+ )",
+
+ R"(
+ a = [1.1, [2.2, 4.5]];
+ )",
+
+ R"(
+ b = [];
+ )",
+ };
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("PropertyLoadStoreOneShot.golden")));
+}
+
+TEST(PropertyLoadStoreWithoutOneShot) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
+ printer.set_wrap(false);
+ printer.set_top_level(true);
+
+ const char* snippets[] = {
+ R"(
+ l = {
+ 'aa': 1.1,
+ 'bb': 2.2
+ };
+
+ v = l['aa'] + l['bb'];
+ l['bb'] = 7;
+ l['aa'] = l['bb'];
+ )",
+
+ R"(
+ l = {
+ 'cc': 3.1,
+ 'dd': 4.2
+ };
+ if (l['cc'] < 3) {
+ l['cc'] = 3;
+ } else {
+ l['dd'] = 3;
+ }
+ )",
+ };
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("PropertyLoadStoreWithoutOneShot.golden")));
+}
+
+TEST(IIFEWithOneshotOpt) {
+ InitializedIgnitionHandleScope scope;
+ v8::Isolate* isolate = CcTest::isolate();
+ BytecodeExpectationsPrinter printer(isolate);
+ printer.set_wrap(false);
+ printer.set_top_level(true);
+ printer.set_print_callee(true);
+ printer.set_oneshot_opt(true);
+
+ const char* snippets[] = {
+ // No feedback vectors for top-level loads/store named property in an IIFE
+ R"(
+ (function() {
+ l = {};
+ l.aa = 2;
+ l.bb = l.aa;
+ return arguments.callee;
+ })();
+ )",
+ // Normal load/store within loops of an IIFE
+ R"(
+ (function() {
+ l = {};
+ for (i = 0; i < 5; ++i) {
+ l.aa = 2;
+ l.bb = l.aa;
+ }
+ return arguments.callee;
+ })();
+ )",
+
+ R"(
+ (function() {
+ l = {};
+ c = 4;
+ while(c > 4) {
+ l.aa = 2;
+ l.bb = l.aa;
+ c--;
+ }
+ return arguments.callee;
+ })();
+ )",
+
+ R"(
+ (function() {
+ l = {};
+ c = 4;
+ do {
+ l.aa = 2;
+ l.bb = l.aa;
+ c--;
+ } while(c > 4)
+ return arguments.callee;
+ })();
+ )",
+ // No feedback vectors for loads/stores in conditionals
+ R"(
+ (function() {
+ l = {
+ 'aa': 3.3,
+ 'bb': 4.4
+ };
+ if (l.aa < 3) {
+ l.aa = 3;
+ } else {
+ l.aa = l.bb;
+ }
+ return arguments.callee;
+ })();
+ )",
+
+ R"(
+ (function() {
+ a = [0, [1, 1,2,], 3];
+ return arguments.callee;
+ })();
+ )",
+
+ R"(
+ (function() {
+ a = [];
+ return arguments.callee;
+ })();
+ )",
+ };
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("IIFEWithOneshotOpt.golden")));
+}
+
+TEST(IIFEWithoutOneshotOpt) {
+ InitializedIgnitionHandleScope scope;
+ v8::Isolate* isolate = CcTest::isolate();
+ BytecodeExpectationsPrinter printer(isolate);
+ printer.set_wrap(false);
+ printer.set_top_level(true);
+ printer.set_print_callee(true);
+
+ const char* snippets[] = {
+ R"(
+ (function() {
+ l = {};
+ l.a = 2;
+ l.b = l.a;
+ return arguments.callee;
+ })();
+ )",
+ R"(
+ (function() {
+ l = {
+ 'a': 4.3,
+ 'b': 3.4
+ };
+ if (l.a < 3) {
+ l.a = 3;
+ } else {
+ l.a = l.b;
+ }
+ return arguments.callee;
+ })();
+ )",
+ };
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("IIFEWithoutOneshotOpt.golden")));
+}
+
TEST(PropertyStores) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
@@ -1202,6 +1459,12 @@ TEST(ArrayLiterals) {
"return [ [ 1, 2 ], [ 3 ] ];\n",
"var a = 1; return [ [ a, 2 ], [ a + 2 ] ];\n",
+
+ "var a = [ 1, 2 ]; return [ ...a ];\n",
+
+ "var a = [ 1, 2 ]; return [ 0, ...a ];\n",
+
+ "var a = [ 1, 2 ]; return [ ...a, 3 ];\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
index 7eb76ecb21..57d42e2a83 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
@@ -4,6 +4,7 @@
#include "src/v8.h"
+#include "src/api-inl.h"
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects-inl.h"
#include "test/cctest/interpreter/interpreter-tester.h"
@@ -209,14 +210,14 @@ TEST(IntrinsicAsStubCall) {
InvokeIntrinsicHelper has_property_helper(isolate, handles.main_zone(),
Runtime::kInlineHasProperty);
- CHECK_EQ(*factory->true_value(),
- *has_property_helper.Invoke(
- has_property_helper.NewObject("'x'"),
- has_property_helper.NewObject("({ x: 20 })")));
- CHECK_EQ(*factory->false_value(),
- *has_property_helper.Invoke(
- has_property_helper.NewObject("'y'"),
- has_property_helper.NewObject("({ x: 20 })")));
+ CHECK_EQ(
+ *factory->true_value(),
+ *has_property_helper.Invoke(has_property_helper.NewObject("({ x: 20 })"),
+ has_property_helper.NewObject("'x'")));
+ CHECK_EQ(
+ *factory->false_value(),
+ *has_property_helper.Invoke(has_property_helper.NewObject("({ x: 20 })"),
+ has_property_helper.NewObject("'y'")));
}
} // namespace interpreter
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index 11163d9688..c1898adf4e 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -6,6 +6,7 @@
#include "src/v8.h"
+#include "src/api-inl.h"
#include "src/execution.h"
#include "src/handles.h"
#include "src/interpreter/bytecode-array-builder.h"
diff --git a/deps/v8/test/cctest/interpreter/test-source-positions.cc b/deps/v8/test/cctest/interpreter/test-source-positions.cc
index 51dd41c720..8f2aae7e0b 100644
--- a/deps/v8/test/cctest/interpreter/test-source-positions.cc
+++ b/deps/v8/test/cctest/interpreter/test-source-positions.cc
@@ -4,7 +4,7 @@
#include "src/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/compiler/pipeline.h"
#include "src/handles.h"
#include "src/interpreter/bytecode-generator.h"
diff --git a/deps/v8/test/cctest/parsing/test-parse-decision.cc b/deps/v8/test/cctest/parsing/test-parse-decision.cc
index 926d4c24e0..f44a9e4b82 100644
--- a/deps/v8/test/cctest/parsing/test-parse-decision.cc
+++ b/deps/v8/test/cctest/parsing/test-parse-decision.cc
@@ -10,7 +10,7 @@
#include <unordered_map>
#include "include/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/handles-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/test/cctest/parsing/test-preparser.cc b/deps/v8/test/cctest/parsing/test-preparser.cc
index b63a079a78..473debec40 100644
--- a/deps/v8/test/cctest/parsing/test-preparser.cc
+++ b/deps/v8/test/cctest/parsing/test-preparser.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/ast/ast.h"
#include "src/compiler.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/test/cctest/parsing/test-scanner-streams.cc b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
index 6f9b58daf3..a9dc4482ef 100644
--- a/deps/v8/test/cctest/parsing/test-scanner-streams.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
@@ -185,6 +185,110 @@ TEST(Utf8SplitBOM) {
}
}
+TEST(Utf8SplitMultiBOM) {
+ // Construct chunks with a split BOM followed by another split BOM.
+ const char* chunks = "\xef\xbb\0\xbf\xef\xbb\0\xbf\0\0";
+ ChunkSource chunk_source(chunks);
+ std::unique_ptr<i::Utf16CharacterStream> stream(
+ v8::internal::ScannerStream::For(
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
+
+ // Read the data, ensuring we get exactly one of the two BOMs back.
+ CHECK_EQ(0xFEFF, stream->Advance());
+ CHECK_EQ(i::Utf16CharacterStream::kEndOfInput, stream->Advance());
+}
+
+TEST(Utf8AdvanceUntil) {
+ // Test utf-8 advancing until a certain char.
+
+ const char line_term = '\n';
+ const size_t kLen = arraysize(unicode_utf8);
+ char data[kLen + 1];
+ strncpy(data, unicode_utf8, kLen);
+ data[kLen - 1] = line_term;
+ data[kLen] = '\0';
+
+ {
+ const char* chunks[] = {data, "\0"};
+ ChunkSource chunk_source(chunks);
+ std::unique_ptr<v8::internal::Utf16CharacterStream> stream(
+ v8::internal::ScannerStream::For(
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
+
+ int32_t res = stream->AdvanceUntil(
+ [](int32_t c0_) { return unibrow::IsLineTerminator(c0_); });
+ CHECK_EQ(line_term, res);
+ }
+}
+
+TEST(AdvanceMatchAdvanceUntil) {
+ // Test if single advance and advanceUntil behave the same
+
+ char data[] = {'a', 'b', '\n', 'c', '\0'};
+
+ {
+ const char* chunks[] = {data, "\0"};
+ ChunkSource chunk_source_a(chunks);
+
+ std::unique_ptr<v8::internal::Utf16CharacterStream> stream_advance(
+ v8::internal::ScannerStream::For(
+ &chunk_source_a, v8::ScriptCompiler::StreamedSource::UTF8,
+ nullptr));
+
+ ChunkSource chunk_source_au(chunks);
+ std::unique_ptr<v8::internal::Utf16CharacterStream> stream_advance_until(
+ v8::internal::ScannerStream::For(
+ &chunk_source_au, v8::ScriptCompiler::StreamedSource::UTF8,
+ nullptr));
+
+ int32_t au_c0_ = stream_advance_until->AdvanceUntil(
+ [](int32_t c0_) { return unibrow::IsLineTerminator(c0_); });
+
+ int32_t a_c0_ = '0';
+ while (!unibrow::IsLineTerminator(a_c0_)) {
+ a_c0_ = stream_advance->Advance();
+ }
+
+ // Check both advances methods have the same output
+ CHECK_EQ(a_c0_, au_c0_);
+
+ // Check if both set the cursor to the correct position by advancing both
+ // streams by one character.
+ a_c0_ = stream_advance->Advance();
+ au_c0_ = stream_advance_until->Advance();
+ CHECK_EQ(a_c0_, au_c0_);
+ }
+}
+
+TEST(Utf8AdvanceUntilOverChunkBoundaries) {
+ // Test utf-8 advancing until a certain char, crossing chunk boundaries.
+
+ // Split the test string at each byte and pass it to the stream. This way,
+ // we'll have a split at each possible boundary.
+ size_t len = strlen(unicode_utf8);
+ char buffer[arraysize(unicode_utf8) + 4];
+ for (size_t i = 1; i < len; i++) {
+ // Copy source string into buffer, splitting it at i.
+ // Then add three chunks, 0..i-1, i..strlen-1, empty.
+ strncpy(buffer, unicode_utf8, i);
+ strncpy(buffer + i + 1, unicode_utf8 + i, len - i);
+ buffer[i] = '\0';
+ buffer[len + 1] = '\n';
+ buffer[len + 2] = '\0';
+ buffer[len + 3] = '\0';
+ const char* chunks[] = {buffer, buffer + i + 1, buffer + len + 2};
+
+ ChunkSource chunk_source(chunks);
+ std::unique_ptr<v8::internal::Utf16CharacterStream> stream(
+ v8::internal::ScannerStream::For(
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
+
+ int32_t res = stream->AdvanceUntil(
+ [](int32_t c0_) { return unibrow::IsLineTerminator(c0_); });
+ CHECK_EQ(buffer[len + 1], res);
+ }
+}
+
TEST(Utf8ChunkBoundaries) {
// Test utf-8 parsing at chunk boundaries.
@@ -323,7 +427,7 @@ void TestCharacterStreams(const char* one_byte_source, unsigned length,
// This avoids the GC from trying to free a stack allocated resource.
if (uc16_string->IsExternalString())
i::Handle<i::ExternalTwoByteString>::cast(uc16_string)
- ->set_resource(nullptr);
+ ->SetResource(isolate, nullptr);
}
// 1-byte external string
@@ -343,7 +447,7 @@ void TestCharacterStreams(const char* one_byte_source, unsigned length,
// This avoids the GC from trying to free a stack allocated resource.
if (ext_one_byte_string->IsExternalString())
i::Handle<i::ExternalOneByteString>::cast(ext_one_byte_string)
- ->set_resource(nullptr);
+ ->SetResource(isolate, nullptr);
}
// 1-byte generic i::String
@@ -560,3 +664,34 @@ TEST(TestOverlongAndInvalidSequences) {
CHECK_EQ(unicode_expected.size(), arraysize(cases));
TestChunkStreamAgainstReference(cases, unicode_expected);
}
+
+TEST(RelocatingCharacterStream) {
+ ManualGCScope manual_gc_scope;
+ CcTest::InitializeVM();
+ i::Isolate* i_isolate = CcTest::i_isolate();
+ v8::HandleScope scope(CcTest::isolate());
+
+ const char* string = "abcd";
+ int length = static_cast<int>(strlen(string));
+ std::unique_ptr<i::uc16[]> uc16_buffer(new i::uc16[length]);
+ for (int i = 0; i < length; i++) {
+ uc16_buffer[i] = string[i];
+ }
+ i::Vector<const i::uc16> two_byte_vector(uc16_buffer.get(), length);
+ i::Handle<i::String> two_byte_string =
+ i_isolate->factory()
+ ->NewStringFromTwoByte(two_byte_vector, i::NOT_TENURED)
+ .ToHandleChecked();
+ std::unique_ptr<i::Utf16CharacterStream> two_byte_string_stream(
+ i::ScannerStream::For(i_isolate, two_byte_string, 0, length));
+ CHECK_EQ('a', two_byte_string_stream->Advance());
+ CHECK_EQ('b', two_byte_string_stream->Advance());
+ CHECK_EQ(size_t{2}, two_byte_string_stream->pos());
+ i::String* raw = *two_byte_string;
+ i_isolate->heap()->CollectGarbage(i::NEW_SPACE,
+ i::GarbageCollectionReason::kUnknown);
+ // GC moved the string.
+ CHECK_NE(raw, *two_byte_string);
+ CHECK_EQ('c', two_byte_string_stream->Advance());
+ CHECK_EQ('d', two_byte_string_stream->Advance());
+}
diff --git a/deps/v8/test/cctest/parsing/test-scanner.cc b/deps/v8/test/cctest/parsing/test-scanner.cc
index ea7a8fbaa2..56fe0ed83a 100644
--- a/deps/v8/test/cctest/parsing/test-scanner.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner.cc
@@ -21,7 +21,7 @@ const char src_simple[] = "function foo() { var x = 2 * a() + b; }";
struct ScannerTestHelper {
ScannerTestHelper() = default;
- ScannerTestHelper(ScannerTestHelper&& other)
+ ScannerTestHelper(ScannerTestHelper&& other) V8_NOEXCEPT
: unicode_cache(std::move(other.unicode_cache)),
stream(std::move(other.stream)),
scanner(std::move(other.scanner)) {}
@@ -38,9 +38,9 @@ ScannerTestHelper make_scanner(const char* src) {
ScannerTestHelper helper;
helper.unicode_cache = std::unique_ptr<UnicodeCache>(new UnicodeCache);
helper.stream = ScannerStream::ForTesting(src);
- helper.scanner =
- std::unique_ptr<Scanner>(new Scanner(helper.unicode_cache.get()));
- helper.scanner->Initialize(helper.stream.get(), false);
+ helper.scanner = std::unique_ptr<Scanner>(
+ new Scanner(helper.unicode_cache.get(), helper.stream.get(), false));
+ helper.scanner->Initialize();
return helper;
}
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index a998a28642..de1901b6d3 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -29,7 +29,7 @@
#include "src/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/frames-inl.h"
#include "src/string-stream.h"
#include "test/cctest/cctest.h"
@@ -812,7 +812,7 @@ TEST(PrototypeGetterAccessCheck) {
CHECK(try_catch.HasCaught());
}
- // Test crankshaft.
+ // Test TurboFan.
CompileRun("%OptimizeFunctionOnNextCall(f);");
security_check_value = true;
diff --git a/deps/v8/test/cctest/test-api-accessors.cc b/deps/v8/test/cctest/test-api-accessors.cc
index a570301213..5bda0432ea 100644
--- a/deps/v8/test/cctest/test-api-accessors.cc
+++ b/deps/v8/test/cctest/test-api-accessors.cc
@@ -17,7 +17,7 @@ static void UnreachableCallback(
}
TEST(CachedAccessor) {
- // Crankshaft support for fast accessors is not implemented; crankshafted
+ // TurboFan support for fast accessors is not implemented; turbofanned
// code uses the slow accessor which breaks this test's expectations.
v8::internal::FLAG_always_opt = false;
LocalContext env;
@@ -64,7 +64,7 @@ TEST(CachedAccessor) {
"Shhh, I'm private!");
}
-TEST(CachedAccessorCrankshaft) {
+TEST(CachedAccessorTurboFan) {
i::FLAG_allow_natives_syntax = true;
// v8::internal::FLAG_always_opt = false;
LocalContext env;
@@ -116,7 +116,7 @@ TEST(CachedAccessorCrankshaft) {
CHECK(obj->SetPrivate(env.local(), priv, v8::Integer::New(isolate, 456))
.FromJust());
- // Test Crankshaft.
+ // Test TurboFan.
CompileRun("%OptimizeFunctionOnNextCall(f);");
ExpectInt32("f()", 456);
@@ -140,7 +140,7 @@ TEST(CachedAccessorCrankshaft) {
CHECK(obj->SetPrivate(env.local(), priv, v8::Integer::New(isolate, 789))
.FromJust());
- // Test non-global access in Crankshaft.
+ // Test non-global access in TurboFan.
CompileRun("%OptimizeFunctionOnNextCall(g);");
ExpectInt32("g()", 789);
@@ -198,7 +198,7 @@ TEST(CachedAccessorOnGlobalObject) {
CHECK(obj->SetPrivate(env.local(), priv, v8::Integer::New(isolate, 456))
.FromJust());
- // Test Crankshaft.
+ // Test TurboFan.
CompileRun("%OptimizeFunctionOnNextCall(f);");
ExpectInt32("f()", 456);
@@ -222,7 +222,7 @@ TEST(CachedAccessorOnGlobalObject) {
CHECK(obj->SetPrivate(env.local(), priv, v8::Integer::New(isolate, 789))
.FromJust());
- // Test non-global access in Crankshaft.
+ // Test non-global access in TurboFan.
CompileRun("%OptimizeFunctionOnNextCall(g);");
ExpectInt32("g()", 789);
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index 030db759fe..9d9138670e 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -7,7 +7,7 @@
#include "test/cctest/test-api.h"
#include "include/v8-util.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/arguments.h"
#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
@@ -76,16 +76,17 @@ void EmptyInterceptorEnumerator(
void SimpleAccessorGetter(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
Local<Object> self = Local<Object>::Cast(info.This());
- info.GetReturnValue().Set(self->Get(info.GetIsolate()->GetCurrentContext(),
- String::Concat(v8_str("accessor_"), name))
- .ToLocalChecked());
+ info.GetReturnValue().Set(
+ self->Get(info.GetIsolate()->GetCurrentContext(),
+ String::Concat(info.GetIsolate(), v8_str("accessor_"), name))
+ .ToLocalChecked());
}
void SimpleAccessorSetter(Local<String> name, Local<Value> value,
const v8::PropertyCallbackInfo<void>& info) {
Local<Object> self = Local<Object>::Cast(info.This());
self->Set(info.GetIsolate()->GetCurrentContext(),
- String::Concat(v8_str("accessor_"), name), value)
+ String::Concat(info.GetIsolate(), v8_str("accessor_"), name), value)
.FromJust();
}
@@ -155,13 +156,14 @@ void GenericInterceptorGetter(Local<Name> generic_name,
if (generic_name->IsSymbol()) {
Local<Value> name = Local<Symbol>::Cast(generic_name)->Name();
if (name->IsUndefined()) return;
- str = String::Concat(v8_str("_sym_"), Local<String>::Cast(name));
+ str = String::Concat(info.GetIsolate(), v8_str("_sym_"),
+ Local<String>::Cast(name));
} else {
Local<String> name = Local<String>::Cast(generic_name);
String::Utf8Value utf8(info.GetIsolate(), name);
char* name_str = *utf8;
if (*name_str == '_') return;
- str = String::Concat(v8_str("_str_"), name);
+ str = String::Concat(info.GetIsolate(), v8_str("_str_"), name);
}
Local<Object> self = Local<Object>::Cast(info.This());
@@ -175,13 +177,14 @@ void GenericInterceptorSetter(Local<Name> generic_name, Local<Value> value,
if (generic_name->IsSymbol()) {
Local<Value> name = Local<Symbol>::Cast(generic_name)->Name();
if (name->IsUndefined()) return;
- str = String::Concat(v8_str("_sym_"), Local<String>::Cast(name));
+ str = String::Concat(info.GetIsolate(), v8_str("_sym_"),
+ Local<String>::Cast(name));
} else {
Local<String> name = Local<String>::Cast(generic_name);
String::Utf8Value utf8(info.GetIsolate(), name);
char* name_str = *utf8;
if (*name_str == '_') return;
- str = String::Concat(v8_str("_str_"), name);
+ str = String::Concat(info.GetIsolate(), v8_str("_str_"), name);
}
Local<Object> self = Local<Object>::Cast(info.This());
@@ -4304,8 +4307,7 @@ THREADED_TEST(Regress256330) {
CHECK_EQ(mask, status & mask);
}
-
-THREADED_TEST(CrankshaftInterceptorSetter) {
+THREADED_TEST(OptimizedInterceptorSetter) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
Local<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
@@ -4334,8 +4336,7 @@ THREADED_TEST(CrankshaftInterceptorSetter) {
ExpectInt32("obj.accessor_age", 42);
}
-
-THREADED_TEST(CrankshaftInterceptorGetter) {
+THREADED_TEST(OptimizedInterceptorGetter) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
Local<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
@@ -4361,8 +4362,7 @@ THREADED_TEST(CrankshaftInterceptorGetter) {
ExpectInt32("getAge()", 1);
}
-
-THREADED_TEST(CrankshaftInterceptorFieldRead) {
+THREADED_TEST(OptimizedInterceptorFieldRead) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
Local<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
@@ -4385,8 +4385,7 @@ THREADED_TEST(CrankshaftInterceptorFieldRead) {
ExpectInt32("getAge();", 100);
}
-
-THREADED_TEST(CrankshaftInterceptorFieldWrite) {
+THREADED_TEST(OptimizedInterceptorFieldWrite) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
Local<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
@@ -5026,7 +5025,7 @@ void ConcatNamedPropertyGetter(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
info.GetReturnValue().Set(
// Return the property name concatenated with itself.
- String::Concat(name.As<String>(), name.As<String>()));
+ String::Concat(info.GetIsolate(), name.As<String>(), name.As<String>()));
}
void ConcatIndexedPropertyGetter(
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index a018e12853..bf5aba2df6 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -38,7 +38,7 @@
#endif
#include "include/v8-util.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/arguments.h"
#include "src/base/platform/platform.h"
#include "src/code-stubs.h"
@@ -52,12 +52,14 @@
#include "src/lookup.h"
#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/js-promise-inl.h"
-#include "src/parsing/preparse-data.h"
#include "src/profiler/cpu-profiler.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "src/vm-state.h"
+#include "src/wasm/wasm-js.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
@@ -374,7 +376,7 @@ THREADED_TEST(HulIgennem) {
v8::Local<v8::Primitive> undef = v8::Undefined(isolate);
Local<String> undef_str = undef->ToString(env.local()).ToLocalChecked();
char* value = i::NewArray<char>(undef_str->Utf8Length(isolate) + 1);
- undef_str->WriteUtf8(value);
+ undef_str->WriteUtf8(isolate, value);
CHECK_EQ(0, strcmp(value, "undefined"));
i::DeleteArray(value);
}
@@ -911,7 +913,8 @@ TEST(ExternalStringWithDisposeHandling) {
THREADED_TEST(StringConcat) {
{
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
const char* one_byte_string_1 = "function a_times_t";
const char* two_byte_string_1 = "wo_plus_b(a, b) {return ";
const char* one_byte_extern_1 = "a * 2 + b;} a_times_two_plus_b(4, 8) + ";
@@ -928,19 +931,19 @@ THREADED_TEST(StringConcat) {
.ToLocalChecked();
i::DeleteArray(two_byte_source);
- Local<String> source = String::Concat(left, right);
+ Local<String> source = String::Concat(isolate, left, right);
right = String::NewExternalOneByte(
env->GetIsolate(),
new TestOneByteResource(i::StrDup(one_byte_extern_1)))
.ToLocalChecked();
- source = String::Concat(source, right);
+ source = String::Concat(isolate, source, right);
right = String::NewExternalTwoByte(
env->GetIsolate(),
new TestResource(AsciiToTwoByteString(two_byte_extern_1)))
.ToLocalChecked();
- source = String::Concat(source, right);
+ source = String::Concat(isolate, source, right);
right = v8_str(one_byte_string_2);
- source = String::Concat(source, right);
+ source = String::Concat(isolate, source, right);
two_byte_source = AsciiToTwoByteString(two_byte_string_2);
right = String::NewFromTwoByte(env->GetIsolate(), two_byte_source,
@@ -948,12 +951,12 @@ THREADED_TEST(StringConcat) {
.ToLocalChecked();
i::DeleteArray(two_byte_source);
- source = String::Concat(source, right);
+ source = String::Concat(isolate, source, right);
right = String::NewExternalTwoByte(
env->GetIsolate(),
new TestResource(AsciiToTwoByteString(two_byte_extern_2)))
.ToLocalChecked();
- source = String::Concat(source, right);
+ source = String::Concat(isolate, source, right);
Local<Script> script = v8_compile(source);
Local<Value> value = script->Run(env.local()).ToLocalChecked();
CHECK(value->IsNumber());
@@ -1736,7 +1739,8 @@ THREADED_TEST(StringObject) {
Local<v8::String> the_string = as_boxed->ValueOf();
CHECK(!the_string.IsEmpty());
ExpectObject("\"test\"", the_string);
- v8::Local<v8::Value> new_boxed_string = v8::StringObject::New(the_string);
+ v8::Local<v8::Value> new_boxed_string =
+ v8::StringObject::New(CcTest::isolate(), the_string);
CHECK(new_boxed_string->IsStringObject());
as_boxed = new_boxed_string.As<v8::StringObject>();
the_string = as_boxed->ValueOf();
@@ -2469,7 +2473,7 @@ THREADED_TEST(DescriptorInheritance2) {
for (int i = 0; i < kDataPropertiesNumber; i++) {
v8::Local<v8::Value> val = v8_num(i);
v8::Local<v8::String> val_str = val->ToString(env.local()).ToLocalChecked();
- v8::Local<v8::String> name = String::Concat(v8_str("p"), val_str);
+ v8::Local<v8::String> name = String::Concat(isolate, v8_str("p"), val_str);
templ->Set(name, val);
templ->Set(val_str, val);
@@ -2506,7 +2510,7 @@ THREADED_TEST(DescriptorInheritance2) {
for (int i = 0; i < kDataPropertiesNumber; i++) {
v8::Local<v8::Value> val = v8_num(i);
v8::Local<v8::String> val_str = val->ToString(env.local()).ToLocalChecked();
- v8::Local<v8::String> name = String::Concat(v8_str("p"), val_str);
+ v8::Local<v8::String> name = String::Concat(isolate, v8_str("p"), val_str);
CHECK_EQ(i, object->Get(env.local(), name)
.ToLocalChecked()
@@ -2525,16 +2529,18 @@ THREADED_TEST(DescriptorInheritance2) {
void SimpleAccessorGetter(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
Local<Object> self = Local<Object>::Cast(info.This());
- info.GetReturnValue().Set(self->Get(info.GetIsolate()->GetCurrentContext(),
- String::Concat(v8_str("accessor_"), name))
- .ToLocalChecked());
+ info.GetReturnValue().Set(
+ self->Get(info.GetIsolate()->GetCurrentContext(),
+ String::Concat(info.GetIsolate(), v8_str("accessor_"), name))
+ .ToLocalChecked());
}
void SimpleAccessorSetter(Local<String> name, Local<Value> value,
const v8::PropertyCallbackInfo<void>& info) {
Local<Object> self = Local<Object>::Cast(info.This());
CHECK(self->Set(info.GetIsolate()->GetCurrentContext(),
- String::Concat(v8_str("accessor_"), name), value)
+ String::Concat(info.GetIsolate(), v8_str("accessor_"), name),
+ value)
.FromJust());
}
@@ -7841,10 +7847,18 @@ static void IndependentWeakHandle(bool global_gc, bool interlinked) {
v8::WeakCallbackType::kParameter);
object_b.handle.SetWeak(&object_b, &SetFlag,
v8::WeakCallbackType::kParameter);
+#if __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated"
+#endif
+ // MarkIndependent is marked deprecated but we still rely on it temporarily.
CHECK(!object_b.handle.IsIndependent());
object_a.handle.MarkIndependent();
object_b.handle.MarkIndependent();
CHECK(object_b.handle.IsIndependent());
+#if __clang__
+#pragma clang diagnostic pop
+#endif
if (global_gc) {
CcTest::CollectAllGarbage();
} else {
@@ -7996,9 +8010,17 @@ void v8::internal::heap::HeapTester::ResetWeakHandle(bool global_gc) {
object_b.handle.SetWeak(&object_b, &ResetUseValueAndSetFlag,
v8::WeakCallbackType::kParameter);
if (!global_gc) {
+#if __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated"
+#endif
+ // MarkIndependent is marked deprecated but we still rely on it temporarily.
object_a.handle.MarkIndependent();
object_b.handle.MarkIndependent();
CHECK(object_b.handle.IsIndependent());
+#if __clang__
+#pragma clang diagnostic pop
+#endif
}
if (global_gc) {
CcTest::CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
@@ -8067,7 +8089,16 @@ THREADED_TEST(GCFromWeakCallbacks) {
object.flag = false;
object.handle.SetWeak(&object, gc_forcing_callback[inner_gc],
v8::WeakCallbackType::kParameter);
+#if __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated"
+#endif
+ // MarkIndependent is marked deprecated but we still rely on it
+ // temporarily.
object.handle.MarkIndependent();
+#if __clang__
+#pragma clang diagnostic pop
+#endif
invoke_gc[outer_gc]();
EmptyMessageQueues(isolate);
CHECK(object.flag);
@@ -8285,7 +8316,8 @@ int GetUtf8Length(v8::Isolate* isolate, Local<String> str) {
THREADED_TEST(StringWrite) {
LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
v8::Local<String> str = v8_str("abcde");
// abc<Icelandic eth><Unicode snowman>.
v8::Local<String> str2 = v8_str("abc\xC3\xB0\xE2\x98\x83");
@@ -8347,65 +8379,63 @@ THREADED_TEST(StringWrite) {
int charlen;
memset(utf8buf, 0x1, 1000);
- len = str2->WriteUtf8(utf8buf, sizeof(utf8buf), &charlen);
+ len = str2->WriteUtf8(isolate, utf8buf, sizeof(utf8buf), &charlen);
CHECK_EQ(9, len);
CHECK_EQ(5, charlen);
CHECK_EQ(0, strcmp(utf8buf, "abc\xC3\xB0\xE2\x98\x83"));
memset(utf8buf, 0x1, 1000);
- len = str2->WriteUtf8(utf8buf, 8, &charlen);
+ len = str2->WriteUtf8(isolate, utf8buf, 8, &charlen);
CHECK_EQ(8, len);
CHECK_EQ(5, charlen);
CHECK_EQ(0, strncmp(utf8buf, "abc\xC3\xB0\xE2\x98\x83\x01", 9));
memset(utf8buf, 0x1, 1000);
- len = str2->WriteUtf8(utf8buf, 7, &charlen);
+ len = str2->WriteUtf8(isolate, utf8buf, 7, &charlen);
CHECK_EQ(5, len);
CHECK_EQ(4, charlen);
CHECK_EQ(0, strncmp(utf8buf, "abc\xC3\xB0\x01", 5));
memset(utf8buf, 0x1, 1000);
- len = str2->WriteUtf8(utf8buf, 6, &charlen);
+ len = str2->WriteUtf8(isolate, utf8buf, 6, &charlen);
CHECK_EQ(5, len);
CHECK_EQ(4, charlen);
CHECK_EQ(0, strncmp(utf8buf, "abc\xC3\xB0\x01", 5));
memset(utf8buf, 0x1, 1000);
- len = str2->WriteUtf8(utf8buf, 5, &charlen);
+ len = str2->WriteUtf8(isolate, utf8buf, 5, &charlen);
CHECK_EQ(5, len);
CHECK_EQ(4, charlen);
CHECK_EQ(0, strncmp(utf8buf, "abc\xC3\xB0\x01", 5));
memset(utf8buf, 0x1, 1000);
- len = str2->WriteUtf8(utf8buf, 4, &charlen);
+ len = str2->WriteUtf8(isolate, utf8buf, 4, &charlen);
CHECK_EQ(3, len);
CHECK_EQ(3, charlen);
CHECK_EQ(0, strncmp(utf8buf, "abc\x01", 4));
memset(utf8buf, 0x1, 1000);
- len = str2->WriteUtf8(utf8buf, 3, &charlen);
+ len = str2->WriteUtf8(isolate, utf8buf, 3, &charlen);
CHECK_EQ(3, len);
CHECK_EQ(3, charlen);
CHECK_EQ(0, strncmp(utf8buf, "abc\x01", 4));
memset(utf8buf, 0x1, 1000);
- len = str2->WriteUtf8(utf8buf, 2, &charlen);
+ len = str2->WriteUtf8(isolate, utf8buf, 2, &charlen);
CHECK_EQ(2, len);
CHECK_EQ(2, charlen);
CHECK_EQ(0, strncmp(utf8buf, "ab\x01", 3));
// allow orphan surrogates by default
memset(utf8buf, 0x1, 1000);
- len = orphans_str->WriteUtf8(utf8buf, sizeof(utf8buf), &charlen);
+ len = orphans_str->WriteUtf8(isolate, utf8buf, sizeof(utf8buf), &charlen);
CHECK_EQ(13, len);
CHECK_EQ(8, charlen);
CHECK_EQ(0, strcmp(utf8buf, "ab\xED\xA0\x80wx\xED\xB0\x80yz"));
// replace orphan surrogates with Unicode replacement character
memset(utf8buf, 0x1, 1000);
- len = orphans_str->WriteUtf8(utf8buf,
- sizeof(utf8buf),
- &charlen,
+ len = orphans_str->WriteUtf8(isolate, utf8buf, sizeof(utf8buf), &charlen,
String::REPLACE_INVALID_UTF8);
CHECK_EQ(13, len);
CHECK_EQ(8, charlen);
@@ -8413,9 +8443,7 @@ THREADED_TEST(StringWrite) {
// replace single lead surrogate with Unicode replacement character
memset(utf8buf, 0x1, 1000);
- len = lead_str->WriteUtf8(utf8buf,
- sizeof(utf8buf),
- &charlen,
+ len = lead_str->WriteUtf8(isolate, utf8buf, sizeof(utf8buf), &charlen,
String::REPLACE_INVALID_UTF8);
CHECK_EQ(4, len);
CHECK_EQ(1, charlen);
@@ -8423,9 +8451,7 @@ THREADED_TEST(StringWrite) {
// replace single trail surrogate with Unicode replacement character
memset(utf8buf, 0x1, 1000);
- len = trail_str->WriteUtf8(utf8buf,
- sizeof(utf8buf),
- &charlen,
+ len = trail_str->WriteUtf8(isolate, utf8buf, sizeof(utf8buf), &charlen,
String::REPLACE_INVALID_UTF8);
CHECK_EQ(4, len);
CHECK_EQ(1, charlen);
@@ -8434,19 +8460,17 @@ THREADED_TEST(StringWrite) {
// do not replace / write anything if surrogate pair does not fit the buffer
// space
memset(utf8buf, 0x1, 1000);
- len = pair_str->WriteUtf8(utf8buf,
- 3,
- &charlen,
- String::REPLACE_INVALID_UTF8);
+ len = pair_str->WriteUtf8(isolate, utf8buf, 3, &charlen,
+ String::REPLACE_INVALID_UTF8);
CHECK_EQ(0, len);
CHECK_EQ(0, charlen);
memset(utf8buf, 0x1, sizeof(utf8buf));
- len = GetUtf8Length(context->GetIsolate(), left_tree);
+ len = GetUtf8Length(isolate, left_tree);
int utf8_expected =
(0x80 + (0x800 - 0x80) * 2 + (0xD800 - 0x800) * 3) / kStride;
CHECK_EQ(utf8_expected, len);
- len = left_tree->WriteUtf8(utf8buf, utf8_expected, &charlen);
+ len = left_tree->WriteUtf8(isolate, utf8buf, utf8_expected, &charlen);
CHECK_EQ(utf8_expected, len);
CHECK_EQ(0xD800 / kStride, charlen);
CHECK_EQ(0xED, static_cast<unsigned char>(utf8buf[utf8_expected - 3]));
@@ -8456,9 +8480,9 @@ THREADED_TEST(StringWrite) {
CHECK_EQ(1, utf8buf[utf8_expected]);
memset(utf8buf, 0x1, sizeof(utf8buf));
- len = GetUtf8Length(context->GetIsolate(), right_tree);
+ len = GetUtf8Length(isolate, right_tree);
CHECK_EQ(utf8_expected, len);
- len = right_tree->WriteUtf8(utf8buf, utf8_expected, &charlen);
+ len = right_tree->WriteUtf8(isolate, utf8buf, utf8_expected, &charlen);
CHECK_EQ(utf8_expected, len);
CHECK_EQ(0xD800 / kStride, charlen);
CHECK_EQ(0xED, static_cast<unsigned char>(utf8buf[0]));
@@ -8468,9 +8492,9 @@ THREADED_TEST(StringWrite) {
memset(buf, 0x1, sizeof(buf));
memset(wbuf, 0x1, sizeof(wbuf));
- len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf));
+ len = str->WriteOneByte(isolate, reinterpret_cast<uint8_t*>(buf));
CHECK_EQ(5, len);
- len = str->Write(wbuf);
+ len = str->Write(isolate, wbuf);
CHECK_EQ(5, len);
CHECK_EQ(0, strcmp("abcde", buf));
uint16_t answer1[] = {'a', 'b', 'c', 'd', 'e', '\0'};
@@ -8478,9 +8502,9 @@ THREADED_TEST(StringWrite) {
memset(buf, 0x1, sizeof(buf));
memset(wbuf, 0x1, sizeof(wbuf));
- len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf), 0, 4);
+ len = str->WriteOneByte(isolate, reinterpret_cast<uint8_t*>(buf), 0, 4);
CHECK_EQ(4, len);
- len = str->Write(wbuf, 0, 4);
+ len = str->Write(isolate, wbuf, 0, 4);
CHECK_EQ(4, len);
CHECK_EQ(0, strncmp("abcd\x01", buf, 5));
uint16_t answer2[] = {'a', 'b', 'c', 'd', 0x101};
@@ -8488,9 +8512,9 @@ THREADED_TEST(StringWrite) {
memset(buf, 0x1, sizeof(buf));
memset(wbuf, 0x1, sizeof(wbuf));
- len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf), 0, 5);
+ len = str->WriteOneByte(isolate, reinterpret_cast<uint8_t*>(buf), 0, 5);
CHECK_EQ(5, len);
- len = str->Write(wbuf, 0, 5);
+ len = str->Write(isolate, wbuf, 0, 5);
CHECK_EQ(5, len);
CHECK_EQ(0, strncmp("abcde\x01", buf, 6));
uint16_t answer3[] = {'a', 'b', 'c', 'd', 'e', 0x101};
@@ -8498,9 +8522,9 @@ THREADED_TEST(StringWrite) {
memset(buf, 0x1, sizeof(buf));
memset(wbuf, 0x1, sizeof(wbuf));
- len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf), 0, 6);
+ len = str->WriteOneByte(isolate, reinterpret_cast<uint8_t*>(buf), 0, 6);
CHECK_EQ(5, len);
- len = str->Write(wbuf, 0, 6);
+ len = str->Write(isolate, wbuf, 0, 6);
CHECK_EQ(5, len);
CHECK_EQ(0, strcmp("abcde", buf));
uint16_t answer4[] = {'a', 'b', 'c', 'd', 'e', '\0'};
@@ -8508,9 +8532,9 @@ THREADED_TEST(StringWrite) {
memset(buf, 0x1, sizeof(buf));
memset(wbuf, 0x1, sizeof(wbuf));
- len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf), 4, -1);
+ len = str->WriteOneByte(isolate, reinterpret_cast<uint8_t*>(buf), 4, -1);
CHECK_EQ(1, len);
- len = str->Write(wbuf, 4, -1);
+ len = str->Write(isolate, wbuf, 4, -1);
CHECK_EQ(1, len);
CHECK_EQ(0, strcmp("e", buf));
uint16_t answer5[] = {'e', '\0'};
@@ -8518,18 +8542,18 @@ THREADED_TEST(StringWrite) {
memset(buf, 0x1, sizeof(buf));
memset(wbuf, 0x1, sizeof(wbuf));
- len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf), 4, 6);
+ len = str->WriteOneByte(isolate, reinterpret_cast<uint8_t*>(buf), 4, 6);
CHECK_EQ(1, len);
- len = str->Write(wbuf, 4, 6);
+ len = str->Write(isolate, wbuf, 4, 6);
CHECK_EQ(1, len);
CHECK_EQ(0, strcmp("e", buf));
CHECK_EQ(0, StrCmp16(answer5, wbuf));
memset(buf, 0x1, sizeof(buf));
memset(wbuf, 0x1, sizeof(wbuf));
- len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf), 4, 1);
+ len = str->WriteOneByte(isolate, reinterpret_cast<uint8_t*>(buf), 4, 1);
CHECK_EQ(1, len);
- len = str->Write(wbuf, 4, 1);
+ len = str->Write(isolate, wbuf, 4, 1);
CHECK_EQ(1, len);
CHECK_EQ(0, strncmp("e\x01", buf, 2));
uint16_t answer6[] = {'e', 0x101};
@@ -8537,9 +8561,9 @@ THREADED_TEST(StringWrite) {
memset(buf, 0x1, sizeof(buf));
memset(wbuf, 0x1, sizeof(wbuf));
- len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf), 3, 1);
+ len = str->WriteOneByte(isolate, reinterpret_cast<uint8_t*>(buf), 3, 1);
CHECK_EQ(1, len);
- len = str->Write(wbuf, 3, 1);
+ len = str->Write(isolate, wbuf, 3, 1);
CHECK_EQ(1, len);
CHECK_EQ(0, strncmp("d\x01", buf, 2));
uint16_t answer7[] = {'d', 0x101};
@@ -8547,7 +8571,7 @@ THREADED_TEST(StringWrite) {
memset(wbuf, 0x1, sizeof(wbuf));
wbuf[5] = 'X';
- len = str->Write(wbuf, 0, 6, String::NO_NULL_TERMINATION);
+ len = str->Write(isolate, wbuf, 0, 6, String::NO_NULL_TERMINATION);
CHECK_EQ(5, len);
CHECK_EQ('X', wbuf[5]);
uint16_t answer8a[] = {'a', 'b', 'c', 'd', 'e'};
@@ -8559,9 +8583,7 @@ THREADED_TEST(StringWrite) {
memset(buf, 0x1, sizeof(buf));
buf[5] = 'X';
- len = str->WriteOneByte(reinterpret_cast<uint8_t*>(buf),
- 0,
- 6,
+ len = str->WriteOneByte(isolate, reinterpret_cast<uint8_t*>(buf), 0, 6,
String::NO_NULL_TERMINATION);
CHECK_EQ(5, len);
CHECK_EQ('X', buf[5]);
@@ -8572,7 +8594,7 @@ THREADED_TEST(StringWrite) {
memset(utf8buf, 0x1, sizeof(utf8buf));
utf8buf[8] = 'X';
- len = str2->WriteUtf8(utf8buf, sizeof(utf8buf), &charlen,
+ len = str2->WriteUtf8(isolate, utf8buf, sizeof(utf8buf), &charlen,
String::NO_NULL_TERMINATION);
CHECK_EQ(8, len);
CHECK_EQ('X', utf8buf[8]);
@@ -8584,8 +8606,8 @@ THREADED_TEST(StringWrite) {
memset(utf8buf, 0x1, sizeof(utf8buf));
utf8buf[5] = 'X';
- len = str->WriteUtf8(utf8buf, sizeof(utf8buf), &charlen,
- String::NO_NULL_TERMINATION);
+ len = str->WriteUtf8(isolate, utf8buf, sizeof(utf8buf), &charlen,
+ String::NO_NULL_TERMINATION);
CHECK_EQ(5, len);
CHECK_EQ('X', utf8buf[5]); // Test that the sixth character is untouched.
CHECK_EQ(5, charlen);
@@ -8593,15 +8615,17 @@ THREADED_TEST(StringWrite) {
CHECK_EQ(0, strcmp(utf8buf, "abcde"));
memset(buf, 0x1, sizeof(buf));
- len = str3->WriteOneByte(reinterpret_cast<uint8_t*>(buf));
+ len = str3->WriteOneByte(isolate, reinterpret_cast<uint8_t*>(buf));
CHECK_EQ(7, len);
CHECK_EQ(0, strcmp("abc", buf));
CHECK_EQ(0, buf[3]);
CHECK_EQ(0, strcmp("def", buf + 4));
- CHECK_EQ(0, str->WriteOneByte(nullptr, 0, 0, String::NO_NULL_TERMINATION));
- CHECK_EQ(0, str->WriteUtf8(nullptr, 0, 0, String::NO_NULL_TERMINATION));
- CHECK_EQ(0, str->Write(nullptr, 0, 0, String::NO_NULL_TERMINATION));
+ CHECK_EQ(0, str->WriteOneByte(isolate, nullptr, 0, 0,
+ String::NO_NULL_TERMINATION));
+ CHECK_EQ(0,
+ str->WriteUtf8(isolate, nullptr, 0, 0, String::NO_NULL_TERMINATION));
+ CHECK_EQ(0, str->Write(isolate, nullptr, 0, 0, String::NO_NULL_TERMINATION));
}
@@ -8627,14 +8651,14 @@ static void Utf16Helper(
}
void TestUtf8DecodingAgainstReference(
- const char* cases[],
+ v8::Isolate* isolate, const char* cases[],
const std::vector<std::vector<uint16_t>>& unicode_expected) {
for (size_t test_ix = 0; test_ix < unicode_expected.size(); ++test_ix) {
v8::Local<String> str = v8_str(cases[test_ix]);
CHECK_EQ(unicode_expected[test_ix].size(), str->Length());
std::unique_ptr<uint16_t[]> buffer(new uint16_t[str->Length()]);
- str->Write(buffer.get(), 0, -1, String::NO_NULL_TERMINATION);
+ str->Write(isolate, buffer.get(), 0, -1, String::NO_NULL_TERMINATION);
for (size_t i = 0; i < unicode_expected[test_ix].size(); ++i) {
CHECK_EQ(unicode_expected[test_ix][i], buffer[i]);
@@ -8684,7 +8708,8 @@ THREADED_TEST(OverlongSequencesAndSurrogates) {
{0x58, 0xFFFD, 0xFFFD, 0xFFFD, 0x59},
};
CHECK_EQ(unicode_expected.size(), arraysize(cases));
- TestUtf8DecodingAgainstReference(cases, unicode_expected);
+ TestUtf8DecodingAgainstReference(context->GetIsolate(), cases,
+ unicode_expected);
}
THREADED_TEST(Utf16) {
@@ -10592,7 +10617,7 @@ THREADED_TEST(GlobalObjectInstanceProperties) {
v8::FunctionTemplate::New(isolate,
InstanceFunctionCallback));
- // The script to check how Crankshaft compiles missing global function
+ // The script to check how TurboFan compiles missing global function
// invocations. function g is not defined and should throw on call.
const char* script =
"function wrapper(call) {"
@@ -10639,8 +10664,8 @@ THREADED_TEST(ObjectGetOwnPropertyNames) {
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Object> value =
- v8::Local<v8::Object>::Cast(v8::StringObject::New(v8_str("test")));
+ v8::Local<v8::Object> value = v8::Local<v8::Object>::Cast(
+ v8::StringObject::New(CcTest::isolate(), v8_str("test")));
v8::Local<v8::Array> properties;
CHECK(value
@@ -15366,6 +15391,9 @@ THREADED_TEST(GetPropertyNames) {
v8::HandleScope scope(isolate);
v8::Local<v8::Value> result = CompileRun(
"var result = {0: 0, 1: 1, a: 2, b: 3};"
+ "result[2**32] = '4294967296';"
+ "result[2**32-1] = '4294967295';"
+ "result[2**32-2] = '4294967294';"
"result[Symbol('symbol')] = true;"
"result.__proto__ = {__proto__:null, 2: 4, 3: 5, c: 6, d: 7};"
"result;");
@@ -15376,8 +15404,10 @@ THREADED_TEST(GetPropertyNames) {
v8::Local<v8::Array> properties =
object->GetPropertyNames(context.local()).ToLocalChecked();
- const char* expected_properties1[] = {"0", "1", "a", "b", "2", "3", "c", "d"};
- CheckStringArray(isolate, properties, 8, expected_properties1);
+ const char* expected_properties1[] = {"0", "1", "4294967294", "a",
+ "b", "4294967296", "4294967295", "2",
+ "3", "c", "d"};
+ CheckStringArray(isolate, properties, 11, expected_properties1);
properties =
object
@@ -15385,7 +15415,7 @@ THREADED_TEST(GetPropertyNames) {
v8::KeyCollectionMode::kIncludePrototypes,
default_filter, v8::IndexFilter::kIncludeIndices)
.ToLocalChecked();
- CheckStringArray(isolate, properties, 8, expected_properties1);
+ CheckStringArray(isolate, properties, 11, expected_properties1);
properties = object
->GetPropertyNames(context.local(),
@@ -15393,10 +15423,11 @@ THREADED_TEST(GetPropertyNames) {
include_symbols_filter,
v8::IndexFilter::kIncludeIndices)
.ToLocalChecked();
- const char* expected_properties1_1[] = {"0", "1", "a", "b", nullptr,
- "2", "3", "c", "d"};
- CheckStringArray(isolate, properties, 9, expected_properties1_1);
- CheckIsSymbolAt(isolate, properties, 4, "symbol");
+ const char* expected_properties1_1[] = {
+ "0", "1", "4294967294", "a", "b", "4294967296",
+ "4294967295", nullptr, "2", "3", "c", "d"};
+ CheckStringArray(isolate, properties, 12, expected_properties1_1);
+ CheckIsSymbolAt(isolate, properties, 7, "symbol");
properties =
object
@@ -15404,8 +15435,9 @@ THREADED_TEST(GetPropertyNames) {
v8::KeyCollectionMode::kIncludePrototypes,
default_filter, v8::IndexFilter::kSkipIndices)
.ToLocalChecked();
- const char* expected_properties2[] = {"a", "b", "c", "d"};
- CheckStringArray(isolate, properties, 4, expected_properties2);
+ const char* expected_properties2[] = {"a", "b", "4294967296",
+ "4294967295", "c", "d"};
+ CheckStringArray(isolate, properties, 6, expected_properties2);
properties = object
->GetPropertyNames(context.local(),
@@ -15413,43 +15445,48 @@ THREADED_TEST(GetPropertyNames) {
include_symbols_filter,
v8::IndexFilter::kSkipIndices)
.ToLocalChecked();
- const char* expected_properties2_1[] = {"a", "b", nullptr, "c", "d"};
- CheckStringArray(isolate, properties, 5, expected_properties2_1);
- CheckIsSymbolAt(isolate, properties, 2, "symbol");
+ const char* expected_properties2_1[] = {
+ "a", "b", "4294967296", "4294967295", nullptr, "c", "d"};
+ CheckStringArray(isolate, properties, 7, expected_properties2_1);
+ CheckIsSymbolAt(isolate, properties, 4, "symbol");
properties =
object
->GetPropertyNames(context.local(), v8::KeyCollectionMode::kOwnOnly,
default_filter, v8::IndexFilter::kIncludeIndices)
.ToLocalChecked();
- const char* expected_properties3[] = {"0", "1", "a", "b"};
- CheckStringArray(isolate, properties, 4, expected_properties3);
+ const char* expected_properties3[] = {
+ "0", "1", "4294967294", "a", "b", "4294967296", "4294967295",
+ };
+ CheckStringArray(isolate, properties, 7, expected_properties3);
properties = object
->GetPropertyNames(
context.local(), v8::KeyCollectionMode::kOwnOnly,
include_symbols_filter, v8::IndexFilter::kIncludeIndices)
.ToLocalChecked();
- const char* expected_properties3_1[] = {"0", "1", "a", "b", nullptr};
- CheckStringArray(isolate, properties, 5, expected_properties3_1);
- CheckIsSymbolAt(isolate, properties, 4, "symbol");
+ const char* expected_properties3_1[] = {
+ "0", "1", "4294967294", "a", "b", "4294967296", "4294967295", nullptr};
+ CheckStringArray(isolate, properties, 8, expected_properties3_1);
+ CheckIsSymbolAt(isolate, properties, 7, "symbol");
properties =
object
->GetPropertyNames(context.local(), v8::KeyCollectionMode::kOwnOnly,
default_filter, v8::IndexFilter::kSkipIndices)
.ToLocalChecked();
- const char* expected_properties4[] = {"a", "b"};
- CheckStringArray(isolate, properties, 2, expected_properties4);
+ const char* expected_properties4[] = {"a", "b", "4294967296", "4294967295"};
+ CheckStringArray(isolate, properties, 4, expected_properties4);
properties = object
->GetPropertyNames(
context.local(), v8::KeyCollectionMode::kOwnOnly,
include_symbols_filter, v8::IndexFilter::kSkipIndices)
.ToLocalChecked();
- const char* expected_properties4_1[] = {"a", "b", nullptr};
- CheckStringArray(isolate, properties, 3, expected_properties4_1);
- CheckIsSymbolAt(isolate, properties, 2, "symbol");
+ const char* expected_properties4_1[] = {"a", "b", "4294967296", "4294967295",
+ nullptr};
+ CheckStringArray(isolate, properties, 5, expected_properties4_1);
+ CheckIsSymbolAt(isolate, properties, 4, "symbol");
}
THREADED_TEST(ProxyGetPropertyNames) {
@@ -15458,6 +15495,9 @@ THREADED_TEST(ProxyGetPropertyNames) {
v8::HandleScope scope(isolate);
v8::Local<v8::Value> result = CompileRun(
"var target = {0: 0, 1: 1, a: 2, b: 3};"
+ "target[2**32] = '4294967296';"
+ "target[2**32-1] = '4294967295';"
+ "target[2**32-2] = '4294967294';"
"target[Symbol('symbol')] = true;"
"target.__proto__ = {__proto__:null, 2: 4, 3: 5, c: 6, d: 7};"
"var result = new Proxy(target, {});"
@@ -15469,8 +15509,10 @@ THREADED_TEST(ProxyGetPropertyNames) {
v8::Local<v8::Array> properties =
object->GetPropertyNames(context.local()).ToLocalChecked();
- const char* expected_properties1[] = {"0", "1", "a", "b", "2", "3", "c", "d"};
- CheckStringArray(isolate, properties, 8, expected_properties1);
+ const char* expected_properties1[] = {"0", "1", "4294967294", "a",
+ "b", "4294967296", "4294967295", "2",
+ "3", "c", "d"};
+ CheckStringArray(isolate, properties, 11, expected_properties1);
properties =
object
@@ -15478,7 +15520,7 @@ THREADED_TEST(ProxyGetPropertyNames) {
v8::KeyCollectionMode::kIncludePrototypes,
default_filter, v8::IndexFilter::kIncludeIndices)
.ToLocalChecked();
- CheckStringArray(isolate, properties, 8, expected_properties1);
+ CheckStringArray(isolate, properties, 11, expected_properties1);
properties = object
->GetPropertyNames(context.local(),
@@ -15486,10 +15528,11 @@ THREADED_TEST(ProxyGetPropertyNames) {
include_symbols_filter,
v8::IndexFilter::kIncludeIndices)
.ToLocalChecked();
- const char* expected_properties1_1[] = {"0", "1", "a", "b", nullptr,
- "2", "3", "c", "d"};
- CheckStringArray(isolate, properties, 9, expected_properties1_1);
- CheckIsSymbolAt(isolate, properties, 4, "symbol");
+ const char* expected_properties1_1[] = {
+ "0", "1", "4294967294", "a", "b", "4294967296",
+ "4294967295", nullptr, "2", "3", "c", "d"};
+ CheckStringArray(isolate, properties, 12, expected_properties1_1);
+ CheckIsSymbolAt(isolate, properties, 7, "symbol");
properties =
object
@@ -15497,8 +15540,9 @@ THREADED_TEST(ProxyGetPropertyNames) {
v8::KeyCollectionMode::kIncludePrototypes,
default_filter, v8::IndexFilter::kSkipIndices)
.ToLocalChecked();
- const char* expected_properties2[] = {"a", "b", "c", "d"};
- CheckStringArray(isolate, properties, 4, expected_properties2);
+ const char* expected_properties2[] = {"a", "b", "4294967296",
+ "4294967295", "c", "d"};
+ CheckStringArray(isolate, properties, 6, expected_properties2);
properties = object
->GetPropertyNames(context.local(),
@@ -15506,43 +15550,47 @@ THREADED_TEST(ProxyGetPropertyNames) {
include_symbols_filter,
v8::IndexFilter::kSkipIndices)
.ToLocalChecked();
- const char* expected_properties2_1[] = {"a", "b", nullptr, "c", "d"};
- CheckStringArray(isolate, properties, 5, expected_properties2_1);
- CheckIsSymbolAt(isolate, properties, 2, "symbol");
+ const char* expected_properties2_1[] = {
+ "a", "b", "4294967296", "4294967295", nullptr, "c", "d"};
+ CheckStringArray(isolate, properties, 7, expected_properties2_1);
+ CheckIsSymbolAt(isolate, properties, 4, "symbol");
properties =
object
->GetPropertyNames(context.local(), v8::KeyCollectionMode::kOwnOnly,
default_filter, v8::IndexFilter::kIncludeIndices)
.ToLocalChecked();
- const char* expected_properties3[] = {"0", "1", "a", "b"};
- CheckStringArray(isolate, properties, 4, expected_properties3);
+ const char* expected_properties3[] = {"0", "1", "4294967294", "a",
+ "b", "4294967296", "4294967295"};
+ CheckStringArray(isolate, properties, 7, expected_properties3);
properties = object
->GetPropertyNames(
context.local(), v8::KeyCollectionMode::kOwnOnly,
include_symbols_filter, v8::IndexFilter::kIncludeIndices)
.ToLocalChecked();
- const char* expected_properties3_1[] = {"0", "1", "a", "b", nullptr};
- CheckStringArray(isolate, properties, 5, expected_properties3_1);
- CheckIsSymbolAt(isolate, properties, 4, "symbol");
+ const char* expected_properties3_1[] = {
+ "0", "1", "4294967294", "a", "b", "4294967296", "4294967295", nullptr};
+ CheckStringArray(isolate, properties, 8, expected_properties3_1);
+ CheckIsSymbolAt(isolate, properties, 7, "symbol");
properties =
object
->GetPropertyNames(context.local(), v8::KeyCollectionMode::kOwnOnly,
default_filter, v8::IndexFilter::kSkipIndices)
.ToLocalChecked();
- const char* expected_properties4[] = {"a", "b"};
- CheckStringArray(isolate, properties, 2, expected_properties4);
+ const char* expected_properties4[] = {"a", "b", "4294967296", "4294967295"};
+ CheckStringArray(isolate, properties, 4, expected_properties4);
properties = object
->GetPropertyNames(
context.local(), v8::KeyCollectionMode::kOwnOnly,
include_symbols_filter, v8::IndexFilter::kSkipIndices)
.ToLocalChecked();
- const char* expected_properties4_1[] = {"a", "b", nullptr};
- CheckStringArray(isolate, properties, 3, expected_properties4_1);
- CheckIsSymbolAt(isolate, properties, 2, "symbol");
+ const char* expected_properties4_1[] = {"a", "b", "4294967296", "4294967295",
+ nullptr};
+ CheckStringArray(isolate, properties, 5, expected_properties4_1);
+ CheckIsSymbolAt(isolate, properties, 4, "symbol");
}
THREADED_TEST(AccessChecksReenabledCorrectly) {
@@ -15739,6 +15787,7 @@ class UC16VectorResource : public v8::String::ExternalStringResource {
static void MorphAString(i::String* string,
OneByteVectorResource* one_byte_resource,
UC16VectorResource* uc16_resource) {
+ i::Isolate* isolate = CcTest::i_isolate();
CHECK(i::StringShape(string).IsExternal());
i::ReadOnlyRoots roots(CcTest::heap());
if (string->IsOneByteRepresentation()) {
@@ -15748,14 +15797,16 @@ static void MorphAString(i::String* string,
string->set_map(roots.external_string_map());
i::ExternalTwoByteString* morphed =
i::ExternalTwoByteString::cast(string);
- morphed->set_resource(uc16_resource);
+ CcTest::heap()->UpdateExternalString(morphed, string->length(), 0);
+ morphed->SetResource(isolate, uc16_resource);
} else {
// Check old map is not internalized or long.
CHECK(string->map() == roots.external_string_map());
// Morph external string to be one-byte string.
string->set_map(roots.external_one_byte_string_map());
i::ExternalOneByteString* morphed = i::ExternalOneByteString::cast(string);
- morphed->set_resource(one_byte_resource);
+ CcTest::heap()->UpdateExternalString(morphed, string->length(), 0);
+ morphed->SetResource(isolate, one_byte_resource);
}
}
@@ -15771,6 +15822,7 @@ THREADED_TEST(MorphCompositeStringTest) {
LocalContext env;
i::Factory* factory = CcTest::i_isolate()->factory();
v8::Isolate* isolate = env->GetIsolate();
+ i::Isolate* i_isolate = CcTest::i_isolate();
v8::HandleScope scope(isolate);
OneByteVectorResource one_byte_resource(
i::Vector<const char>(c_string, i::StrLength(c_string)));
@@ -15805,7 +15857,7 @@ THREADED_TEST(MorphCompositeStringTest) {
v8_compile("cons")->Run(env.local()).ToLocalChecked().As<String>();
CHECK_EQ(128, cons->Utf8Length(isolate));
int nchars = -1;
- CHECK_EQ(129, cons->WriteUtf8(utf_buffer, -1, &nchars));
+ CHECK_EQ(129, cons->WriteUtf8(isolate, utf_buffer, -1, &nchars));
CHECK_EQ(128, nchars);
CHECK_EQ(0, strcmp(
utf_buffer,
@@ -15844,13 +15896,13 @@ THREADED_TEST(MorphCompositeStringTest) {
// This avoids the GC from trying to free a stack allocated resource.
if (ilhs->IsExternalOneByteString())
- i::ExternalOneByteString::cast(ilhs)->set_resource(nullptr);
+ i::ExternalOneByteString::cast(ilhs)->SetResource(i_isolate, nullptr);
else
- i::ExternalTwoByteString::cast(ilhs)->set_resource(nullptr);
+ i::ExternalTwoByteString::cast(ilhs)->SetResource(i_isolate, nullptr);
if (irhs->IsExternalOneByteString())
- i::ExternalOneByteString::cast(irhs)->set_resource(nullptr);
+ i::ExternalOneByteString::cast(irhs)->SetResource(i_isolate, nullptr);
else
- i::ExternalTwoByteString::cast(irhs)->set_resource(nullptr);
+ i::ExternalTwoByteString::cast(irhs)->SetResource(i_isolate, nullptr);
}
i::DeleteArray(two_byte_string);
}
@@ -17342,60 +17394,67 @@ void AnalyzeStackInNativeCode(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK_EQ(args.Length(), 1);
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ v8::Isolate* isolate = args.GetIsolate();
int testGroup = args[0]->Int32Value(context).FromJust();
if (testGroup == kOverviewTest) {
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 10, v8::StackTrace::kOverview);
CHECK_EQ(4, stackTrace->GetFrameCount());
checkStackFrame(origin, "bar", 2, 10, false, false,
- stackTrace->GetFrame(0));
- checkStackFrame(origin, "foo", 6, 3, false, true, stackTrace->GetFrame(1));
+ stackTrace->GetFrame(args.GetIsolate(), 0));
+ checkStackFrame(origin, "foo", 6, 3, false, true,
+ stackTrace->GetFrame(isolate, 1));
// This is the source string inside the eval which has the call to foo.
- checkStackFrame(nullptr, "", 1, 1, true, false, stackTrace->GetFrame(2));
+ checkStackFrame(nullptr, "", 1, 1, true, false,
+ stackTrace->GetFrame(isolate, 2));
// The last frame is an anonymous function which has the initial eval call.
- checkStackFrame(origin, "", 8, 7, false, false, stackTrace->GetFrame(3));
+ checkStackFrame(origin, "", 8, 7, false, false,
+ stackTrace->GetFrame(isolate, 3));
} else if (testGroup == kDetailedTest) {
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 10, v8::StackTrace::kDetailed);
CHECK_EQ(4, stackTrace->GetFrameCount());
checkStackFrame(origin, "bat", 4, 22, false, false,
- stackTrace->GetFrame(0));
+ stackTrace->GetFrame(isolate, 0));
checkStackFrame(origin, "baz", 8, 3, false, true,
- stackTrace->GetFrame(1));
+ stackTrace->GetFrame(isolate, 1));
bool is_eval = true;
// This is the source string inside the eval which has the call to baz.
- checkStackFrame(nullptr, "", 1, 1, is_eval, false, stackTrace->GetFrame(2));
+ checkStackFrame(nullptr, "", 1, 1, is_eval, false,
+ stackTrace->GetFrame(isolate, 2));
// The last frame is an anonymous function which has the initial eval call.
- checkStackFrame(origin, "", 10, 1, false, false, stackTrace->GetFrame(3));
+ checkStackFrame(origin, "", 10, 1, false, false,
+ stackTrace->GetFrame(isolate, 3));
} else if (testGroup == kFunctionName) {
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 5, v8::StackTrace::kOverview);
CHECK_EQ(3, stackTrace->GetFrameCount());
checkStackFrame(nullptr, "function.name", 3, 1, true, false,
- stackTrace->GetFrame(0));
+ stackTrace->GetFrame(isolate, 0));
} else if (testGroup == kDisplayName) {
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 5, v8::StackTrace::kOverview);
CHECK_EQ(3, stackTrace->GetFrameCount());
checkStackFrame(nullptr, "function.displayName", 3, 1, true, false,
- stackTrace->GetFrame(0));
+ stackTrace->GetFrame(isolate, 0));
} else if (testGroup == kFunctionNameAndDisplayName) {
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 5, v8::StackTrace::kOverview);
CHECK_EQ(3, stackTrace->GetFrameCount());
checkStackFrame(nullptr, "function.displayName", 3, 1, true, false,
- stackTrace->GetFrame(0));
+ stackTrace->GetFrame(isolate, 0));
} else if (testGroup == kDisplayNameIsNotString) {
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 5, v8::StackTrace::kOverview);
CHECK_EQ(3, stackTrace->GetFrameCount());
checkStackFrame(nullptr, "function.name", 3, 1, true, false,
- stackTrace->GetFrame(0));
+ stackTrace->GetFrame(isolate, 0));
} else if (testGroup == kFunctionNameIsNotString) {
v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 5, v8::StackTrace::kOverview);
CHECK_EQ(3, stackTrace->GetFrameCount());
- checkStackFrame(nullptr, "", 3, 1, true, false, stackTrace->GetFrame(0));
+ checkStackFrame(nullptr, "", 3, 1, true, false,
+ stackTrace->GetFrame(isolate, 0));
}
}
@@ -17498,9 +17557,9 @@ static void StackTraceForUncaughtExceptionListener(
v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
CHECK_EQ(2, stack_trace->GetFrameCount());
checkStackFrame("origin", "foo", 2, 3, false, false,
- stack_trace->GetFrame(0));
+ stack_trace->GetFrame(message->GetIsolate(), 0));
checkStackFrame("origin", "bar", 5, 3, false, false,
- stack_trace->GetFrame(1));
+ stack_trace->GetFrame(message->GetIsolate(), 1));
}
@@ -17629,16 +17688,18 @@ TEST(ErrorLevelWarning) {
static void StackTraceFunctionNameListener(v8::Local<v8::Message> message,
v8::Local<Value>) {
v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
+ v8::Isolate* isolate = message->GetIsolate();
CHECK_EQ(5, stack_trace->GetFrameCount());
checkStackFrame("origin", "foo:0", 4, 7, false, false,
- stack_trace->GetFrame(0));
+ stack_trace->GetFrame(isolate, 0));
checkStackFrame("origin", "foo:1", 5, 27, false, false,
- stack_trace->GetFrame(1));
+ stack_trace->GetFrame(isolate, 1));
checkStackFrame("origin", "foo", 5, 27, false, false,
- stack_trace->GetFrame(2));
+ stack_trace->GetFrame(isolate, 2));
checkStackFrame("origin", "foo", 5, 27, false, false,
- stack_trace->GetFrame(3));
- checkStackFrame("origin", "", 1, 14, false, false, stack_trace->GetFrame(4));
+ stack_trace->GetFrame(isolate, 3));
+ checkStackFrame("origin", "", 1, 14, false, false,
+ stack_trace->GetFrame(isolate, 4));
}
@@ -17684,7 +17745,8 @@ static void RethrowStackTraceHandler(v8::Local<v8::Message> message,
CHECK_EQ(3, frame_count);
int line_number[] = {1, 2, 5};
for (int i = 0; i < frame_count; i++) {
- CHECK_EQ(line_number[i], stack_trace->GetFrame(i)->GetLineNumber());
+ CHECK_EQ(line_number[i],
+ stack_trace->GetFrame(message->GetIsolate(), i)->GetLineNumber());
}
}
@@ -17728,7 +17790,8 @@ static void RethrowPrimitiveStackTraceHandler(v8::Local<v8::Message> message,
CHECK_EQ(2, frame_count);
int line_number[] = {3, 7};
for (int i = 0; i < frame_count; i++) {
- CHECK_EQ(line_number[i], stack_trace->GetFrame(i)->GetLineNumber());
+ CHECK_EQ(line_number[i],
+ stack_trace->GetFrame(message->GetIsolate(), i)->GetLineNumber());
}
}
@@ -17763,7 +17826,7 @@ static void RethrowExistingStackTraceHandler(v8::Local<v8::Message> message,
v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
CHECK(!stack_trace.IsEmpty());
CHECK_EQ(1, stack_trace->GetFrameCount());
- CHECK_EQ(1, stack_trace->GetFrame(0)->GetLineNumber());
+ CHECK_EQ(1, stack_trace->GetFrame(message->GetIsolate(), 0)->GetLineNumber());
}
@@ -17790,7 +17853,7 @@ static void RethrowBogusErrorStackTraceHandler(v8::Local<v8::Message> message,
v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
CHECK(!stack_trace.IsEmpty());
CHECK_EQ(1, stack_trace->GetFrameCount());
- CHECK_EQ(2, stack_trace->GetFrame(0)->GetLineNumber());
+ CHECK_EQ(2, stack_trace->GetFrame(message->GetIsolate(), 0)->GetLineNumber());
}
@@ -17845,13 +17908,14 @@ void PromiseRejectCallback(v8::PromiseRejectMessage reject_message) {
if (!stack_trace.IsEmpty()) {
promise_reject_frame_count = stack_trace->GetFrameCount();
if (promise_reject_frame_count > 0) {
- CHECK(stack_trace->GetFrame(0)
+ CHECK(stack_trace->GetFrame(CcTest::isolate(), 0)
->GetScriptName()
->Equals(context, v8_str("pro"))
.FromJust());
promise_reject_line_number =
- stack_trace->GetFrame(0)->GetLineNumber();
- promise_reject_column_number = stack_trace->GetFrame(0)->GetColumn();
+ stack_trace->GetFrame(CcTest::isolate(), 0)->GetLineNumber();
+ promise_reject_column_number =
+ stack_trace->GetFrame(CcTest::isolate(), 0)->GetColumn();
} else {
promise_reject_line_number = -1;
promise_reject_column_number = -1;
@@ -18309,7 +18373,7 @@ void AnalyzeStackOfEvalWithSourceURL(
v8::Local<v8::String> url = v8_str("eval_url");
for (int i = 0; i < 3; i++) {
v8::Local<v8::String> name =
- stackTrace->GetFrame(i)->GetScriptNameOrSourceURL();
+ stackTrace->GetFrame(args.GetIsolate(), i)->GetScriptNameOrSourceURL();
CHECK(!name.IsEmpty());
CHECK(url->Equals(args.GetIsolate()->GetCurrentContext(), name).FromJust());
}
@@ -18355,7 +18419,8 @@ void AnalyzeScriptIdInStack(
args.GetIsolate(), 10, v8::StackTrace::kScriptId);
CHECK_EQ(2, stackTrace->GetFrameCount());
for (int i = 0; i < 2; i++) {
- scriptIdInStack[i] = stackTrace->GetFrame(i)->GetScriptId();
+ scriptIdInStack[i] =
+ stackTrace->GetFrame(args.GetIsolate(), i)->GetScriptId();
}
}
@@ -18391,7 +18456,7 @@ void AnalyzeStackOfInlineScriptWithSourceURL(
v8::Local<v8::String> url = v8_str("source_url");
for (int i = 0; i < 3; i++) {
v8::Local<v8::String> name =
- stackTrace->GetFrame(i)->GetScriptNameOrSourceURL();
+ stackTrace->GetFrame(args.GetIsolate(), i)->GetScriptNameOrSourceURL();
CHECK(!name.IsEmpty());
CHECK(url->Equals(args.GetIsolate()->GetCurrentContext(), name).FromJust());
}
@@ -18806,7 +18871,7 @@ void AnalyzeStackOfDynamicScriptWithSourceURL(
v8::Local<v8::String> url = v8_str("source_url");
for (int i = 0; i < 3; i++) {
v8::Local<v8::String> name =
- stackTrace->GetFrame(i)->GetScriptNameOrSourceURL();
+ stackTrace->GetFrame(args.GetIsolate(), i)->GetScriptNameOrSourceURL();
CHECK(!name.IsEmpty());
CHECK(url->Equals(args.GetIsolate()->GetCurrentContext(), name).FromJust());
}
@@ -19072,7 +19137,8 @@ TEST(GetHeapSpaceStatistics) {
// Force allocation in LO_SPACE so that every space has non-zero size.
v8::internal::Isolate* i_isolate =
reinterpret_cast<v8::internal::Isolate*>(isolate);
- (void)i_isolate->factory()->TryNewFixedArray(512 * 1024);
+ auto unused = i_isolate->factory()->TryNewFixedArray(512 * 1024);
+ USE(unused);
isolate->GetHeapStatistics(&heap_statistics);
@@ -19620,7 +19686,7 @@ THREADED_TEST(ScriptOrigin) {
v8::HandleScope scope(isolate);
Local<v8::PrimitiveArray> array(v8::PrimitiveArray::New(isolate, 1));
Local<v8::Symbol> symbol(v8::Symbol::New(isolate));
- array->Set(0, symbol);
+ array->Set(isolate, 0, symbol);
v8::ScriptOrigin origin = v8::ScriptOrigin(
v8_str("test"), v8::Integer::New(env->GetIsolate(), 1),
@@ -19648,7 +19714,7 @@ THREADED_TEST(ScriptOrigin) {
CHECK(script_origin_f.Options().IsSharedCrossOrigin());
CHECK(script_origin_f.Options().IsOpaque());
printf("is name = %d\n", script_origin_f.SourceMapUrl()->IsUndefined());
- CHECK(script_origin_f.HostDefinedOptions()->Get(0)->IsSymbol());
+ CHECK(script_origin_f.HostDefinedOptions()->Get(isolate, 0)->IsSymbol());
CHECK_EQ(0, strcmp("http://sourceMapUrl",
*v8::String::Utf8Value(env->GetIsolate(),
@@ -19666,7 +19732,7 @@ THREADED_TEST(ScriptOrigin) {
CHECK_EQ(0, strcmp("http://sourceMapUrl",
*v8::String::Utf8Value(env->GetIsolate(),
script_origin_g.SourceMapUrl())));
- CHECK(script_origin_g.HostDefinedOptions()->Get(0)->IsSymbol());
+ CHECK(script_origin_g.HostDefinedOptions()->Get(isolate, 0)->IsSymbol());
}
@@ -20470,7 +20536,8 @@ THREADED_TEST(TwoByteStringInOneByteCons) {
CHECK_EQ(static_cast<int32_t>('e'),
reresult->Int32Value(context.local()).FromJust());
// This avoids the GC from trying to free stack allocated resources.
- i::Handle<i::ExternalTwoByteString>::cast(flat_string)->set_resource(nullptr);
+ i::Handle<i::ExternalTwoByteString>::cast(flat_string)
+ ->SetResource(i_isolate, nullptr);
}
@@ -20506,11 +20573,11 @@ TEST(ContainsOnlyOneByte) {
Local<String> left = base;
Local<String> right = base;
for (int i = 0; i < 1000; i++) {
- left = String::Concat(base, left);
- right = String::Concat(right, base);
+ left = String::Concat(isolate, base, left);
+ right = String::Concat(isolate, right, base);
}
- Local<String> balanced = String::Concat(left, base);
- balanced = String::Concat(balanced, right);
+ Local<String> balanced = String::Concat(isolate, left, base);
+ balanced = String::Concat(isolate, balanced, right);
Local<String> cons_strings[] = {left, balanced, right};
Local<String> two_byte =
String::NewExternalTwoByte(
@@ -20522,9 +20589,9 @@ TEST(ContainsOnlyOneByte) {
string = cons_strings[i];
CHECK(string->IsOneByte() && string->ContainsOnlyOneByte());
// Test left and right concatentation.
- string = String::Concat(two_byte, cons_strings[i]);
+ string = String::Concat(isolate, two_byte, cons_strings[i]);
CHECK(!string->IsOneByte() && string->ContainsOnlyOneByte());
- string = String::Concat(cons_strings[i], two_byte);
+ string = String::Concat(isolate, cons_strings[i], two_byte);
CHECK(!string->IsOneByte() && string->ContainsOnlyOneByte());
}
// Set bits in different positions
@@ -21921,12 +21988,11 @@ THREADED_TEST(ReadOnlyIndexedProperties) {
static int CountLiveMapsInMapCache(i::Context* context) {
- i::FixedArray* map_cache = i::FixedArray::cast(context->map_cache());
+ i::WeakFixedArray* map_cache = i::WeakFixedArray::cast(context->map_cache());
int length = map_cache->length();
int count = 0;
for (int i = 0; i < length; i++) {
- i::Object* value = map_cache->get(i);
- if (value->IsWeakCell() && !i::WeakCell::cast(value)->cleared()) count++;
+ if (map_cache->Get(i)->IsWeakHeapObject()) count++;
}
return count;
}
@@ -22752,7 +22818,7 @@ void Recompile(Args... args) {
stub.GetCode();
}
-void RecompileICStubs(i::Isolate* isolate) {
+void RecompileICStubs() {
// BUG(5784): We had a list of IC stubs here to recompile. These are now
// builtins and we can't compile them again (easily). Bug 5784 tracks
// our progress in finding another way to do this.
@@ -22805,7 +22871,6 @@ void TestStubCache(bool primary) {
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
create_params.counter_lookup_callback = LookupCounter;
v8::Isolate* isolate = v8::Isolate::New(create_params);
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
@@ -22814,7 +22879,7 @@ void TestStubCache(bool primary) {
// Enforce recompilation of IC stubs that access megamorphic stub cache
// to respect enabled native code counters and stub cache test flags.
- RecompileICStubs(i_isolate);
+ RecompileICStubs();
int initial_probes = probes_counter;
int initial_misses = misses_counter;
@@ -23745,7 +23810,6 @@ TEST(AccessCheckInIC) {
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
create_params.counter_lookup_callback = LookupCounter;
v8::Isolate* isolate = v8::Isolate::New(create_params);
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
@@ -23754,7 +23818,7 @@ TEST(AccessCheckInIC) {
// Enforce recompilation of IC stubs that access megamorphic stub cache
// to respect enabled native code counters and stub cache test flags.
- RecompileICStubs(i_isolate);
+ RecompileICStubs();
// Create an ObjectTemplate for global objects and install access
// check callbacks that will block access.
@@ -25010,8 +25074,12 @@ TEST(ResolvedPromiseReFulfill) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Local<v8::String> value1 = v8::String::NewFromUtf8(isolate, "foo");
- v8::Local<v8::String> value2 = v8::String::NewFromUtf8(isolate, "bar");
+ v8::Local<v8::String> value1 =
+ v8::String::NewFromUtf8(isolate, "foo", v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ v8::Local<v8::String> value2 =
+ v8::String::NewFromUtf8(isolate, "bar", v8::NewStringType::kNormal)
+ .ToLocalChecked();
v8::Local<v8::Promise::Resolver> resolver =
v8::Promise::Resolver::New(context.local()).ToLocalChecked();
@@ -25037,8 +25105,12 @@ TEST(RejectedPromiseReFulfill) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Local<v8::String> value1 = v8::String::NewFromUtf8(isolate, "foo");
- v8::Local<v8::String> value2 = v8::String::NewFromUtf8(isolate, "bar");
+ v8::Local<v8::String> value1 =
+ v8::String::NewFromUtf8(isolate, "foo", v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ v8::Local<v8::String> value2 =
+ v8::String::NewFromUtf8(isolate, "bar", v8::NewStringType::kNormal)
+ .ToLocalChecked();
v8::Local<v8::Promise::Resolver> resolver =
v8::Promise::Resolver::New(context.local()).ToLocalChecked();
@@ -26325,14 +26397,16 @@ TEST(InvalidCodeCacheData) {
TEST(StringConcatOverflow) {
v8::V8::Initialize();
- v8::HandleScope scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
RandomLengthOneByteResource* r =
new RandomLengthOneByteResource(i::String::kMaxLength);
v8::Local<v8::String> str =
- v8::String::NewExternalOneByte(CcTest::isolate(), r).ToLocalChecked();
+ v8::String::NewExternalOneByte(isolate, r).ToLocalChecked();
CHECK(!str.IsEmpty());
- v8::TryCatch try_catch(CcTest::isolate());
- v8::Local<v8::String> result = v8::String::Concat(str, str);
+ v8::TryCatch try_catch(isolate);
+ v8::Local<v8::String> result = v8::String::Concat(isolate, str, str);
+ v8::String::Concat(CcTest::isolate(), str, str);
CHECK(result.IsEmpty());
CHECK(!try_catch.HasCaught());
}
@@ -26350,7 +26424,7 @@ TEST(TurboAsmDisablesNeuter) {
" function load() { return MEM32[0] | 0; }"
" return { load: load };"
"}"
- "var buffer = new ArrayBuffer(1024);"
+ "var buffer = new ArrayBuffer(4096);"
"var module = Module(this, {}, buffer);"
"%OptimizeFunctionOnNextCall(module.load);"
"module.load();"
@@ -26366,7 +26440,7 @@ TEST(TurboAsmDisablesNeuter) {
" function store() { MEM32[0] = 0; }"
" return { store: store };"
"}"
- "var buffer = new ArrayBuffer(1024);"
+ "var buffer = new ArrayBuffer(4096);"
"var module = Module(this, {}, buffer);"
"%OptimizeFunctionOnNextCall(module.store);"
"module.store();"
@@ -27981,7 +28055,9 @@ v8::MaybeLocal<v8::Promise> HostImportModuleDynamicallyCallbackResolve(
String::Utf8Value referrer_utf8(
context->GetIsolate(), Local<String>::Cast(referrer->GetResourceName()));
CHECK_EQ(0, strcmp("www.google.com", *referrer_utf8));
- CHECK(referrer->GetHostDefinedOptions()->Get(0)->IsSymbol());
+ CHECK(referrer->GetHostDefinedOptions()
+ ->Get(context->GetIsolate(), 0)
+ ->IsSymbol());
CHECK(!specifier.IsEmpty());
String::Utf8Value specifier_utf8(context->GetIsolate(), specifier);
@@ -28151,41 +28227,41 @@ TEST(PrimitiveArray) {
CHECK_EQ(length, array->Length());
for (int i = 0; i < length; i++) {
- Local<v8::Primitive> item = array->Get(i);
+ Local<v8::Primitive> item = array->Get(isolate, i);
CHECK(item->IsUndefined());
}
Local<v8::Symbol> symbol(v8::Symbol::New(isolate));
- array->Set(0, symbol);
- CHECK(array->Get(0)->IsSymbol());
+ array->Set(isolate, 0, symbol);
+ CHECK(array->Get(isolate, 0)->IsSymbol());
Local<v8::String> string =
v8::String::NewFromUtf8(isolate, "test", v8::NewStringType::kInternalized)
.ToLocalChecked();
- array->Set(1, string);
- CHECK(array->Get(0)->IsSymbol());
- CHECK(array->Get(1)->IsString());
+ array->Set(isolate, 1, string);
+ CHECK(array->Get(isolate, 0)->IsSymbol());
+ CHECK(array->Get(isolate, 1)->IsString());
Local<v8::Number> num = v8::Number::New(env->GetIsolate(), 3.1415926);
- array->Set(2, num);
- CHECK(array->Get(0)->IsSymbol());
- CHECK(array->Get(1)->IsString());
- CHECK(array->Get(2)->IsNumber());
+ array->Set(isolate, 2, num);
+ CHECK(array->Get(isolate, 0)->IsSymbol());
+ CHECK(array->Get(isolate, 1)->IsString());
+ CHECK(array->Get(isolate, 2)->IsNumber());
v8::Local<v8::Boolean> f = v8::False(isolate);
- array->Set(3, f);
- CHECK(array->Get(0)->IsSymbol());
- CHECK(array->Get(1)->IsString());
- CHECK(array->Get(2)->IsNumber());
- CHECK(array->Get(3)->IsBoolean());
+ array->Set(isolate, 3, f);
+ CHECK(array->Get(isolate, 0)->IsSymbol());
+ CHECK(array->Get(isolate, 1)->IsString());
+ CHECK(array->Get(isolate, 2)->IsNumber());
+ CHECK(array->Get(isolate, 3)->IsBoolean());
v8::Local<v8::Primitive> n = v8::Null(isolate);
- array->Set(4, n);
- CHECK(array->Get(0)->IsSymbol());
- CHECK(array->Get(1)->IsString());
- CHECK(array->Get(2)->IsNumber());
- CHECK(array->Get(3)->IsBoolean());
- CHECK(array->Get(4)->IsNull());
+ array->Set(isolate, 4, n);
+ CHECK(array->Get(isolate, 0)->IsSymbol());
+ CHECK(array->Get(isolate, 1)->IsString());
+ CHECK(array->Get(isolate, 2)->IsNumber());
+ CHECK(array->Get(isolate, 3)->IsBoolean());
+ CHECK(array->Get(isolate, 4)->IsNull());
}
TEST(PersistentValueMap) {
@@ -28204,22 +28280,123 @@ TEST(PersistentValueMap) {
map.Set("key", value);
}
-TEST(WasmStreamingAbort) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- v8::WasmModuleObjectBuilderStreaming streaming(isolate);
- streaming.Abort(v8::Object::New(isolate));
- CHECK_EQ(streaming.GetPromise()->State(), v8::Promise::kRejected);
+namespace {
+
+bool wasm_streaming_callback_got_called = false;
+bool wasm_streaming_data_got_collected = false;
+
+void WasmStreamingTestFinalizer(const v8::WeakCallbackInfo<void>& data) {
+ CHECK(!wasm_streaming_data_got_collected);
+ wasm_streaming_data_got_collected = true;
+ i::JSObject** p = reinterpret_cast<i::JSObject**>(data.GetParameter());
+ i::GlobalHandles::Destroy(reinterpret_cast<i::Object**>(p));
+}
+
+void WasmStreamingCallbackTestCallbackIsCalled(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ CHECK(!wasm_streaming_callback_got_called);
+ wasm_streaming_callback_got_called = true;
+
+ i::Handle<i::Object> global_handle =
+ reinterpret_cast<i::Isolate*>(args.GetIsolate())
+ ->global_handles()
+ ->Create(*v8::Utils::OpenHandle(*args.Data()));
+ i::GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
+ WasmStreamingTestFinalizer,
+ v8::WeakCallbackType::kParameter);
+}
+
+void WasmStreamingCallbackTestOnBytesReceived(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ std::shared_ptr<v8::WasmStreaming> streaming =
+ v8::WasmStreaming::Unpack(args.GetIsolate(), args.Data());
+
+ // The first bytes of the WebAssembly magic word.
+ const uint8_t bytes[]{0x00, 0x61, 0x73};
+ streaming->OnBytesReceived(bytes, arraysize(bytes));
+}
+
+void WasmStreamingCallbackTestFinishWithSuccess(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ std::shared_ptr<v8::WasmStreaming> streaming =
+ v8::WasmStreaming::Unpack(args.GetIsolate(), args.Data());
+ // The bytes of a minimal WebAssembly module.
+ const uint8_t bytes[]{0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00};
+ streaming->OnBytesReceived(bytes, arraysize(bytes));
+ streaming->Finish();
+}
+
+void WasmStreamingCallbackTestFinishWithFailure(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ std::shared_ptr<v8::WasmStreaming> streaming =
+ v8::WasmStreaming::Unpack(args.GetIsolate(), args.Data());
+ streaming->Finish();
+}
+
+void WasmStreamingCallbackTestAbortWithReject(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ std::shared_ptr<v8::WasmStreaming> streaming =
+ v8::WasmStreaming::Unpack(args.GetIsolate(), args.Data());
+ streaming->Abort(v8::Object::New(args.GetIsolate()));
+}
+
+void WasmStreamingCallbackTestAbortNoReject(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ std::shared_ptr<v8::WasmStreaming> streaming =
+ v8::WasmStreaming::Unpack(args.GetIsolate(), args.Data());
+ streaming->Abort({});
}
-TEST(WasmStreamingAbortNoReject) {
+void TestWasmStreaming(v8::WasmStreamingCallback callback,
+ v8::Promise::PromiseState expected_state) {
+ CcTest::isolate()->SetWasmStreamingCallback(callback);
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::WasmModuleObjectBuilderStreaming streaming(isolate);
- streaming.Abort({});
- CHECK_EQ(streaming.GetPromise()->State(), v8::Promise::kPending);
+
+ // Call {WebAssembly.compileStreaming} with {null} as parameter. The parameter
+ // is only really processed by the embedder, so for this test the value is
+ // irrelevant.
+ v8::Local<v8::Promise> promise = v8::Local<v8::Promise>::Cast(
+ CompileRun("WebAssembly.compileStreaming(null)"));
+
+ EmptyMessageQueues(isolate);
+ CHECK_EQ(expected_state, promise->State());
+}
+
+} // namespace
+
+TEST(WasmStreamingCallback) {
+ TestWasmStreaming(WasmStreamingCallbackTestCallbackIsCalled,
+ v8::Promise::kPending);
+ CHECK(wasm_streaming_callback_got_called);
+ CcTest::CollectAllAvailableGarbage();
+ CHECK(wasm_streaming_data_got_collected);
+}
+
+TEST(WasmStreamingOnBytesReceived) {
+ TestWasmStreaming(WasmStreamingCallbackTestOnBytesReceived,
+ v8::Promise::kPending);
+}
+
+TEST(WasmStreamingFinishWithSuccess) {
+ TestWasmStreaming(WasmStreamingCallbackTestFinishWithSuccess,
+ v8::Promise::kFulfilled);
+}
+
+TEST(WasmStreamingFinishWithFailure) {
+ TestWasmStreaming(WasmStreamingCallbackTestFinishWithFailure,
+ v8::Promise::kRejected);
+}
+
+TEST(WasmStreamingAbortWithReject) {
+ TestWasmStreaming(WasmStreamingCallbackTestAbortWithReject,
+ v8::Promise::kRejected);
+}
+
+TEST(WasmStreamingAbortWithoutReject) {
+ TestWasmStreaming(WasmStreamingCallbackTestAbortNoReject,
+ v8::Promise::kPending);
}
enum class AtomicsWaitCallbackAction {
@@ -28545,3 +28722,42 @@ TEST(BigIntAPI) {
CHECK_EQ(word_count, 2);
}
}
+
+namespace {
+
+bool wasm_threads_enabled_value = false;
+
+bool MockWasmThreadsEnabledCallback(Local<Context>) {
+ return wasm_threads_enabled_value;
+}
+
+} // namespace
+
+TEST(TestSetWasmThreadsEnabledCallback) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<Context> context = Context::New(CcTest::isolate());
+ i::Handle<i::Context> i_context = v8::Utils::OpenHandle(*context);
+
+ // {Isolate::AreWasmThreadsEnabled} calls the callback set by the embedder if
+ // such a callback exists. Otherwise it returns
+ // {FLAG_experimental_wasm_threads}. First we test that the flag is returned
+ // correctly if no callback is set. Then we test that the flag is ignored if
+ // the callback is set.
+
+ i::FLAG_experimental_wasm_threads = false;
+ CHECK(!i_isolate->AreWasmThreadsEnabled(i_context));
+
+ i::FLAG_experimental_wasm_threads = true;
+ CHECK(i_isolate->AreWasmThreadsEnabled(i_context));
+
+ isolate->SetWasmThreadsEnabledCallback(MockWasmThreadsEnabledCallback);
+ wasm_threads_enabled_value = false;
+ CHECK(!i_isolate->AreWasmThreadsEnabled(i_context));
+
+ wasm_threads_enabled_value = true;
+ i::FLAG_experimental_wasm_threads = false;
+ CHECK(i_isolate->AreWasmThreadsEnabled(i_context));
+}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 6ed918a533..7e1bb402fc 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -1082,14 +1082,14 @@ TEST(13) {
__ vmov(d21, Double(16.0));
__ mov(r1, Operand(372106121));
__ mov(r2, Operand(1079146608));
- __ vmov(d22, VmovIndexLo, r1);
- __ vmov(d22, VmovIndexHi, r2);
+ __ vmov(NeonS32, d22, 0, r1);
+ __ vmov(NeonS32, d22, 1, r2);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, i))));
__ vstm(ia_w, r4, d20, d22);
// Move d22 into low and high.
- __ vmov(r4, VmovIndexLo, d22);
+ __ vmov(NeonS32, r4, d22, 0);
__ str(r4, MemOperand(r0, offsetof(T, low)));
- __ vmov(r4, VmovIndexHi, d22);
+ __ vmov(NeonS32, r4, d22, 1);
__ str(r4, MemOperand(r0, offsetof(T, high)));
__ ldm(ia_w, sp, r4.bit() | pc.bit());
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index c9a13c0b1c..c0f8e171c7 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -45,6 +45,7 @@
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-utils-arm64.h"
+#include "test/common/assembler-tester.h"
namespace v8 {
namespace internal {
@@ -129,13 +130,11 @@ static void InitializeVM() {
new Decoder<DispatchingDecoderVisitor>(); \
Simulator simulator(decoder); \
PrintDisassembler* pdis = nullptr; \
- RegisterDump core;
-
-/* if (Cctest::trace_sim()) { \
- pdis = new PrintDisassembler(stdout); \
- decoder.PrependVisitor(pdis); \
- } \
- */
+ RegisterDump core; \
+ if (i::FLAG_trace_sim) { \
+ pdis = new PrintDisassembler(stdout); \
+ decoder->PrependVisitor(pdis); \
+ }
// Reset the assembler and simulator, so that instructions can be generated,
// but don't actually emit any code. This can be used by tests that need to
@@ -198,7 +197,6 @@ static void InitializeVM() {
#define RUN() \
MakeAssemblerBufferExecutable(buf, allocated); \
- Assembler::FlushICache(buf, masm.SizeOfGeneratedCode()); \
{ \
void (*test_function)(void); \
memcpy(&test_function, &buf, sizeof(buf)); \
@@ -1714,7 +1712,7 @@ TEST(adr_far) {
INIT_V8();
int max_range = 1 << (Instruction::ImmPCRelRangeBitwidth - 1);
- SETUP_SIZE(max_range + 1000 * kInstructionSize);
+ SETUP_SIZE(max_range + 1000 * kInstrSize);
Label done, fail;
Label test_near, near_forward, near_backward;
@@ -1744,7 +1742,7 @@ TEST(adr_far) {
__ Orr(x0, x0, 1 << 3);
__ B(&done);
- for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
+ for (int i = 0; i < max_range / kInstrSize + 1; ++i) {
if (i % 100 == 0) {
// If we do land in this code, we do not want to execute so many nops
// before reaching the end of test (especially if tracing is activated).
@@ -1906,7 +1904,7 @@ TEST(branch_to_reg) {
RUN();
- CHECK_EQUAL_64(core.xreg(3) + kInstructionSize, x0);
+ CHECK_EQUAL_64(core.xreg(3) + kInstrSize, x0);
CHECK_EQUAL_64(42, x1);
CHECK_EQUAL_64(84, x2);
@@ -2048,7 +2046,7 @@ TEST(far_branch_backward) {
std::max(Instruction::ImmBranchRange(CompareBranchType),
Instruction::ImmBranchRange(CondBranchType)));
- SETUP_SIZE(max_range + 1000 * kInstructionSize);
+ SETUP_SIZE(max_range + 1000 * kInstrSize);
START();
@@ -2074,7 +2072,7 @@ TEST(far_branch_backward) {
// Generate enough code to overflow the immediate range of the three types of
// branches below.
- for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
+ for (int i = 0; i < max_range / kInstrSize + 1; ++i) {
if (i % 100 == 0) {
// If we do land in this code, we do not want to execute so many nops
// before reaching the end of test (especially if tracing is activated).
@@ -2095,7 +2093,7 @@ TEST(far_branch_backward) {
// For each out-of-range branch instructions, at least two instructions should
// have been generated.
- CHECK_GE(7 * kInstructionSize, __ SizeOfCodeGeneratedSince(&test_tbz));
+ CHECK_GE(7 * kInstrSize, __ SizeOfCodeGeneratedSince(&test_tbz));
__ Bind(&fail);
__ Mov(x1, 0);
@@ -2122,7 +2120,7 @@ TEST(far_branch_simple_veneer) {
std::max(Instruction::ImmBranchRange(CompareBranchType),
Instruction::ImmBranchRange(CondBranchType)));
- SETUP_SIZE(max_range + 1000 * kInstructionSize);
+ SETUP_SIZE(max_range + 1000 * kInstrSize);
START();
@@ -2144,7 +2142,7 @@ TEST(far_branch_simple_veneer) {
// Generate enough code to overflow the immediate range of the three types of
// branches below.
- for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
+ for (int i = 0; i < max_range / kInstrSize + 1; ++i) {
if (i % 100 == 0) {
// If we do land in this code, we do not want to execute so many nops
// before reaching the end of test (especially if tracing is activated).
@@ -2198,7 +2196,7 @@ TEST(far_branch_veneer_link_chain) {
std::max(Instruction::ImmBranchRange(CompareBranchType),
Instruction::ImmBranchRange(CondBranchType)));
- SETUP_SIZE(max_range + 1000 * kInstructionSize);
+ SETUP_SIZE(max_range + 1000 * kInstrSize);
START();
@@ -2239,7 +2237,7 @@ TEST(far_branch_veneer_link_chain) {
// Generate enough code to overflow the immediate range of the three types of
// branches below.
- for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
+ for (int i = 0; i < max_range / kInstrSize + 1; ++i) {
if (i % 100 == 0) {
// If we do land in this code, we do not want to execute so many nops
// before reaching the end of test (especially if tracing is activated).
@@ -2288,7 +2286,7 @@ TEST(far_branch_veneer_broken_link_chain) {
int max_range = Instruction::ImmBranchRange(TestBranchType);
int inter_range = max_range / 2 + max_range / 10;
- SETUP_SIZE(3 * inter_range + 1000 * kInstructionSize);
+ SETUP_SIZE(3 * inter_range + 1000 * kInstrSize);
START();
@@ -2305,7 +2303,7 @@ TEST(far_branch_veneer_broken_link_chain) {
__ Mov(x0, 1);
__ B(&far_target);
- for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
+ for (int i = 0; i < inter_range / kInstrSize; ++i) {
if (i % 100 == 0) {
// Do not allow generating veneers. They should not be needed.
__ b(&fail);
@@ -2319,7 +2317,7 @@ TEST(far_branch_veneer_broken_link_chain) {
__ Mov(x0, 2);
__ Tbz(x10, 7, &far_target);
- for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
+ for (int i = 0; i < inter_range / kInstrSize; ++i) {
if (i % 100 == 0) {
// Do not allow generating veneers. They should not be needed.
__ b(&fail);
@@ -2334,7 +2332,7 @@ TEST(far_branch_veneer_broken_link_chain) {
__ Mov(x0, 3);
__ Tbz(x10, 7, &far_target);
- for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
+ for (int i = 0; i < inter_range / kInstrSize; ++i) {
if (i % 100 == 0) {
// Allow generating veneers.
__ B(&fail);
@@ -6742,65 +6740,72 @@ TEST(ldr_literal) {
#ifdef DEBUG
// These tests rely on functions available in debug mode.
-enum LiteralPoolEmitOption { NoJumpRequired, JumpRequired };
+enum LiteralPoolEmitOutcome { EmitExpected, NoEmitExpected };
-static void LdrLiteralRangeHelper(int range_, LiteralPoolEmitOption option,
- bool expect_dump) {
- CHECK_GT(range_, 0);
- SETUP_SIZE(range_ + 1024);
+static void LdrLiteralRangeHelper(size_t range, LiteralPoolEmitOutcome outcome,
+ size_t prepadding = 0) {
+ SETUP_SIZE(static_cast<int>(range + 1024));
- Label label_1, label_2;
-
- size_t range = static_cast<size_t>(range_);
size_t code_size = 0;
- size_t pool_guard_size;
-
- if (option == NoJumpRequired) {
- // Space for an explicit branch.
- pool_guard_size = kInstructionSize;
- } else {
- pool_guard_size = 0;
- }
+ const size_t pool_entries = 2;
+ const size_t kEntrySize = 8;
START();
// Force a pool dump so the pool starts off empty.
__ CheckConstPool(true, true);
CHECK_CONSTANT_POOL_SIZE(0);
+ // Emit prepadding to influence alignment of the pool; we don't count this
+ // into code size.
+ for (size_t i = 0; i < prepadding; ++i) __ Nop();
+
LoadLiteral(&masm, x0, 0x1234567890ABCDEFUL);
LoadLiteral(&masm, x1, 0xABCDEF1234567890UL);
- CHECK_CONSTANT_POOL_SIZE(16);
-
- code_size += 2 * kInstructionSize;
+ code_size += 2 * kInstrSize;
+ CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
// Check that the requested range (allowing space for a branch over the pool)
// can be handled by this test.
- CHECK_LE(code_size + pool_guard_size, range);
+ CHECK_LE(code_size, range);
+
+ auto PoolSizeAt = [pool_entries](int pc_offset) {
+ // To determine padding, consider the size of the prologue of the pool,
+ // and the jump around the pool, which we always need.
+ size_t prologue_size = 2 * kInstrSize + kInstrSize;
+ size_t pc = pc_offset + prologue_size;
+ const size_t padding = IsAligned(pc, 8) ? 0 : 4;
+ return prologue_size + pool_entries * kEntrySize + padding;
+ };
- // Emit NOPs up to 'range', leaving space for the pool guard.
- while ((code_size + pool_guard_size + kInstructionSize) < range) {
+ int pc_offset_before_emission = -1;
+ // Emit NOPs up to 'range'.
+ while (code_size < range) {
+ pc_offset_before_emission = __ pc_offset() + kInstrSize;
__ Nop();
- code_size += kInstructionSize;
- }
-
- // Emit the guard sequence before the literal pool.
- if (option == NoJumpRequired) {
- __ B(&label_1);
- code_size += kInstructionSize;
+ code_size += kInstrSize;
}
+ CHECK_EQ(code_size, range);
- // The next instruction will trigger pool emission when expect_dump is true.
- CHECK_EQ(code_size, range - kInstructionSize);
- CHECK_CONSTANT_POOL_SIZE(16);
-
- // Possibly generate a literal pool.
- __ Nop();
-
- __ Bind(&label_1);
- if (expect_dump) {
+ if (outcome == EmitExpected) {
CHECK_CONSTANT_POOL_SIZE(0);
+ // Check that the size of the emitted constant pool is as expected.
+ size_t pool_size = PoolSizeAt(pc_offset_before_emission);
+ CHECK_EQ(pc_offset_before_emission + pool_size, __ pc_offset());
+ byte* pool_start = buf + pc_offset_before_emission;
+ Instruction* branch = reinterpret_cast<Instruction*>(pool_start);
+ CHECK(branch->IsImmBranch());
+ CHECK_EQ(pool_size, branch->ImmPCOffset());
+ Instruction* marker =
+ reinterpret_cast<Instruction*>(pool_start + kInstrSize);
+ CHECK(marker->IsLdrLiteralX());
+ const size_t padding =
+ IsAligned(pc_offset_before_emission + kInstrSize, kEntrySize) ? 0 : 1;
+ CHECK_EQ(pool_entries * 2 + 1 + padding, marker->ImmLLiteral());
+
} else {
- CHECK_CONSTANT_POOL_SIZE(16);
+ CHECK_EQ(outcome, NoEmitExpected);
+ CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
+ CHECK_EQ(pc_offset_before_emission, __ pc_offset());
}
// Force a pool flush to check that a second pool functions correctly.
@@ -6810,7 +6815,7 @@ static void LdrLiteralRangeHelper(int range_, LiteralPoolEmitOption option,
// These loads should be after the pool (and will require a new one).
LoadLiteral(&masm, x4, 0x34567890ABCDEF12UL);
LoadLiteral(&masm, x5, 0xABCDEF0123456789UL);
- CHECK_CONSTANT_POOL_SIZE(16);
+ CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
END();
RUN();
@@ -6824,35 +6829,32 @@ static void LdrLiteralRangeHelper(int range_, LiteralPoolEmitOption option,
TEARDOWN();
}
-TEST(ldr_literal_range_1) {
+TEST(ldr_literal_range_max_dist_emission_1) {
INIT_V8();
LdrLiteralRangeHelper(MacroAssembler::GetApproxMaxDistToConstPoolForTesting(),
- NoJumpRequired, true);
+ EmitExpected);
}
-
-TEST(ldr_literal_range_2) {
+TEST(ldr_literal_range_max_dist_emission_2) {
INIT_V8();
- LdrLiteralRangeHelper(
- MacroAssembler::GetApproxMaxDistToConstPoolForTesting() -
- kInstructionSize,
- NoJumpRequired, false);
+ LdrLiteralRangeHelper(MacroAssembler::GetApproxMaxDistToConstPoolForTesting(),
+ EmitExpected, 1);
}
-
-TEST(ldr_literal_range_3) {
+TEST(ldr_literal_range_max_dist_no_emission_1) {
INIT_V8();
- LdrLiteralRangeHelper(MacroAssembler::GetCheckConstPoolIntervalForTesting(),
- JumpRequired, false);
+ LdrLiteralRangeHelper(
+ MacroAssembler::GetApproxMaxDistToConstPoolForTesting() - kInstrSize,
+ NoEmitExpected);
}
-
-TEST(ldr_literal_range_4) {
+TEST(ldr_literal_range_max_dist_no_emission_2) {
INIT_V8();
LdrLiteralRangeHelper(
- MacroAssembler::GetCheckConstPoolIntervalForTesting() - kInstructionSize,
- JumpRequired, false);
+ MacroAssembler::GetApproxMaxDistToConstPoolForTesting() - kInstrSize,
+ NoEmitExpected, 1);
}
+
#endif
TEST(add_sub_imm) {
@@ -15251,7 +15253,7 @@ TEST(pool_size) {
}
__ RecordVeneerPool(masm.pc_offset(), veneer_pool_size);
- for (unsigned i = 0; i < veneer_pool_size / kInstructionSize; ++i) {
+ for (unsigned i = 0; i < veneer_pool_size / kInstrSize; ++i) {
__ nop();
}
@@ -15289,7 +15291,7 @@ TEST(jump_tables_forward) {
const int kNumCases = 512;
INIT_V8();
- SETUP_SIZE(kNumCases * 5 * kInstructionSize + 8192);
+ SETUP_SIZE(kNumCases * 5 * kInstrSize + 8192);
START();
int32_t values[kNumCases];
@@ -15353,7 +15355,7 @@ TEST(jump_tables_backward) {
const int kNumCases = 512;
INIT_V8();
- SETUP_SIZE(kNumCases * 5 * kInstructionSize + 8192);
+ SETUP_SIZE(kNumCases * 5 * kInstrSize + 8192);
START();
int32_t values[kNumCases];
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 4538e8ac78..4b625c2b53 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -3164,16 +3164,13 @@ TEST(jump_tables1) {
Label done;
{
__ BlockTrampolinePoolFor(kNumCases + 7);
- PredictableCodeSizeScope predictable(
- &assm, (kNumCases + 7) * Assembler::kInstrSize);
- Label here;
+ PredictableCodeSizeScope predictable(&assm, (kNumCases + 7) * kInstrSize);
- __ bal(&here);
+ __ nal();
__ nop();
- __ bind(&here);
__ sll(at, a0, 2);
__ addu(at, at, ra);
- __ lw(at, MemOperand(at, 5 * Assembler::kInstrSize));
+ __ lw(at, MemOperand(at, 5 * kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@@ -3243,16 +3240,13 @@ TEST(jump_tables2) {
__ bind(&dispatch);
{
__ BlockTrampolinePoolFor(kNumCases + 7);
- PredictableCodeSizeScope predictable(
- &assm, (kNumCases + 7) * Assembler::kInstrSize);
- Label here;
+ PredictableCodeSizeScope predictable(&assm, (kNumCases + 7) * kInstrSize);
- __ bal(&here);
+ __ nal();
__ nop();
- __ bind(&here);
__ sll(at, a0, 2);
__ addu(at, at, ra);
- __ lw(at, MemOperand(at, 5 * Assembler::kInstrSize));
+ __ lw(at, MemOperand(at, 5 * kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@@ -3319,16 +3313,13 @@ TEST(jump_tables3) {
__ bind(&dispatch);
{
__ BlockTrampolinePoolFor(kNumCases + 7);
- PredictableCodeSizeScope predictable(
- &assm, (kNumCases + 7) * Assembler::kInstrSize);
- Label here;
+ PredictableCodeSizeScope predictable(&assm, (kNumCases + 7) * kInstrSize);
- __ bal(&here);
+ __ nal();
__ nop();
- __ bind(&here);
__ sll(at, a0, 2);
__ addu(at, at, ra);
- __ lw(at, MemOperand(at, 5 * Assembler::kInstrSize));
+ __ lw(at, MemOperand(at, 5 * kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@@ -4811,8 +4802,8 @@ uint32_t run_jic(int16_t offset) {
__ beq(v0, t1, &stop_execution);
__ nop();
- __ bal(&get_program_counter); // t0 <- program counter
- __ nop();
+ __ nal(); // t0 <- program counter
+ __ mov(t0, ra);
__ jic(t0, offset);
__ addiu(v0, v0, 0x100);
@@ -4823,11 +4814,6 @@ uint32_t run_jic(int16_t offset) {
__ jr(ra);
__ nop();
- __ bind(&get_program_counter);
- __ mov(t0, ra);
- __ jr(ra);
- __ nop();
-
__ bind(&stop_execution);
__ pop(ra);
__ jr(ra);
@@ -5158,8 +5144,8 @@ uint32_t run_jialc(int16_t offset) {
// Block 3 (Main)
__ bind(&main_block);
- __ bal(&get_program_counter); // t0 <- program counter
- __ nop();
+ __ nal(); // t0 <- program counter
+ __ mov(t0, ra);
__ jialc(t0, offset);
__ addiu(v0, v0, 0x4);
__ pop(ra);
@@ -5178,11 +5164,6 @@ uint32_t run_jialc(int16_t offset) {
__ jr(ra);
__ nop();
- __ bind(&get_program_counter);
- __ mov(t0, ra);
- __ jr(ra);
- __ nop();
-
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -5557,7 +5538,7 @@ TEST(Trampoline) {
MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
Label done;
- size_t nr_calls = kMaxBranchOffset / (2 * Instruction::kInstrSize) + 2;
+ size_t nr_calls = kMaxBranchOffset / (2 * kInstrSize) + 2;
for (size_t i = 0; i < nr_calls; ++i) {
__ BranchShort(&done, eq, a0, Operand(a1));
@@ -5715,8 +5696,7 @@ uint32_t run_Subu(uint32_t imm, int32_t num_instr) {
Label code_start;
__ bind(&code_start);
__ Subu(v0, zero_reg, imm);
- CHECK_EQ(assm.SizeOfCodeGeneratedSince(&code_start),
- num_instr * Assembler::kInstrSize);
+ CHECK_EQ(assm.SizeOfCodeGeneratedSince(&code_start), num_instr * kInstrSize);
__ jr(ra);
__ nop();
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index bd96375c48..785ffa2fa3 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -3293,15 +3293,13 @@ TEST(jump_tables1) {
Label done;
{
__ BlockTrampolinePoolFor(kNumCases * 2 + 6);
- PredictableCodeSizeScope predictable(
- &assm, (kNumCases * 2 + 6) * Assembler::kInstrSize);
- Label here;
+ PredictableCodeSizeScope predictable(&assm,
+ (kNumCases * 2 + 6) * kInstrSize);
- __ bal(&here);
+ __ nal();
__ dsll(at, a0, 3); // In delay slot.
- __ bind(&here);
__ daddu(at, at, ra);
- __ Ld(at, MemOperand(at, 4 * Assembler::kInstrSize));
+ __ Ld(at, MemOperand(at, 4 * kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@@ -3373,15 +3371,13 @@ TEST(jump_tables2) {
__ bind(&dispatch);
{
__ BlockTrampolinePoolFor(kNumCases * 2 + 6);
- PredictableCodeSizeScope predictable(
- &assm, (kNumCases * 2 + 6) * Assembler::kInstrSize);
- Label here;
+ PredictableCodeSizeScope predictable(&assm,
+ (kNumCases * 2 + 6) * kInstrSize);
- __ bal(&here);
+ __ nal();
__ dsll(at, a0, 3); // In delay slot.
- __ bind(&here);
__ daddu(at, at, ra);
- __ Ld(at, MemOperand(at, 4 * Assembler::kInstrSize));
+ __ Ld(at, MemOperand(at, 4 * kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@@ -3453,15 +3449,13 @@ TEST(jump_tables3) {
__ bind(&dispatch);
{
__ BlockTrampolinePoolFor(kNumCases * 2 + 6);
- PredictableCodeSizeScope predictable(
- &assm, (kNumCases * 2 + 6) * Assembler::kInstrSize);
- Label here;
+ PredictableCodeSizeScope predictable(&assm,
+ (kNumCases * 2 + 6) * kInstrSize);
- __ bal(&here);
+ __ nal();
__ dsll(at, a0, 3); // In delay slot.
- __ bind(&here);
__ daddu(at, at, ra);
- __ Ld(at, MemOperand(at, 4 * Assembler::kInstrSize));
+ __ Ld(at, MemOperand(at, 4 * kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@@ -5437,8 +5431,8 @@ uint64_t run_jic(int16_t offset) {
__ beq(v0, t1, &stop_execution);
__ nop();
- __ bal(&get_program_counter); // t0 <- program counter
- __ nop();
+ __ nal(); // t0 <- program counter
+ __ mov(t0, ra);
__ jic(t0, offset);
__ addiu(v0, v0, 0x100);
@@ -5449,11 +5443,6 @@ uint64_t run_jic(int16_t offset) {
__ jr(ra);
__ nop();
- __ bind(&get_program_counter);
- __ mov(t0, ra);
- __ jr(ra);
- __ nop();
-
__ bind(&stop_execution);
__ pop(ra);
__ jr(ra);
@@ -5784,8 +5773,8 @@ uint64_t run_jialc(int16_t offset) {
// Block 3 (Main)
__ bind(&main_block);
- __ bal(&get_program_counter); // t0 <- program counter
- __ nop();
+ __ nal(); // t0 <- program counter
+ __ mov(t0, ra);
__ jialc(t0, offset);
__ addiu(v0, v0, 0x4);
__ pop(ra);
@@ -5804,11 +5793,6 @@ uint64_t run_jialc(int16_t offset) {
__ jr(ra);
__ nop();
- __ bind(&get_program_counter);
- __ mov(t0, ra);
- __ jr(ra);
- __ nop();
-
CodeDesc desc;
assm.GetCode(isolate, &desc);
@@ -6263,7 +6247,7 @@ TEST(Trampoline) {
MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
Label done;
- size_t nr_calls = kMaxBranchOffset / (2 * Instruction::kInstrSize) + 2;
+ size_t nr_calls = kMaxBranchOffset / (2 * kInstrSize) + 2;
for (size_t i = 0; i < nr_calls; ++i) {
__ BranchShort(&done, eq, a0, Operand(a1));
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index eb172ee2aa..7ecef4429c 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -39,6 +39,7 @@
#include "src/ostreams.h"
#include "src/simulator.h"
#include "test/cctest/cctest.h"
+#include "test/common/assembler-tester.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index ba6af617bb..d7cdb39933 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -4,7 +4,7 @@
#include <cmath>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/base/utils/random-number-generator.h"
#include "src/builtins/builtins-promise-gen.h"
#include "src/builtins/builtins-string-gen.h"
@@ -13,9 +13,12 @@
#include "src/code-stub-assembler.h"
#include "src/compiler/node.h"
#include "src/debug/debug.h"
+#include "src/heap/heap-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/promise-inl.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
@@ -2377,7 +2380,8 @@ TEST(CreatePromiseResolvingFunctions) {
std::tie(resolve, reject) = m.CreatePromiseResolvingFunctions(
promise, m.BooleanConstant(false), native_context);
Node* const kSize = m.IntPtrConstant(2);
- Node* const arr = m.AllocateFixedArray(PACKED_ELEMENTS, kSize);
+ TNode<FixedArray> const arr =
+ m.Cast(m.AllocateFixedArray(PACKED_ELEMENTS, kSize));
m.StoreFixedArrayElement(arr, 0, resolve);
m.StoreFixedArrayElement(arr, 1, reject);
m.Return(arr);
@@ -3451,6 +3455,54 @@ TEST(IsDoubleElementsKind) {
0);
}
+TEST(TestCallBuiltinInlineTrampoline) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 1;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeStubAssembler m(asm_tester.state());
+
+ const int kContextOffset = 2;
+ Node* str = m.Parameter(0);
+ Node* context = m.Parameter(kNumParams + kContextOffset);
+
+ Node* index = m.SmiConstant(2);
+
+ m.Return(m.CallStub(Builtins::CallableFor(isolate, Builtins::kStringRepeat),
+ context, str, index));
+ AssemblerOptions options = AssemblerOptions::Default(isolate);
+ options.inline_offheap_trampolines = true;
+ options.use_pc_relative_calls_and_jumps = false;
+ options.isolate_independent_code = false;
+ FunctionTester ft(asm_tester.GenerateCode(options), kNumParams);
+ MaybeHandle<Object> result = ft.Call(MakeString("abcdef"));
+ CHECK(String::Equals(isolate, MakeString("abcdefabcdef"),
+ Handle<String>::cast(result.ToHandleChecked())));
+}
+
+TEST(TestCallBuiltinIndirectLoad) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 1;
+ CodeAssemblerTester asm_tester(isolate, kNumParams);
+ CodeStubAssembler m(asm_tester.state());
+
+ const int kContextOffset = 2;
+ Node* str = m.Parameter(0);
+ Node* context = m.Parameter(kNumParams + kContextOffset);
+
+ Node* index = m.SmiConstant(2);
+
+ m.Return(m.CallStub(Builtins::CallableFor(isolate, Builtins::kStringRepeat),
+ context, str, index));
+ AssemblerOptions options = AssemblerOptions::Default(isolate);
+ options.inline_offheap_trampolines = false;
+ options.use_pc_relative_calls_and_jumps = false;
+ options.isolate_independent_code = true;
+ FunctionTester ft(asm_tester.GenerateCode(options), kNumParams);
+ MaybeHandle<Object> result = ft.Call(MakeString("abcdef"));
+ CHECK(String::Equals(isolate, MakeString("abcdefabcdef"),
+ Handle<String>::cast(result.ToHandleChecked())));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs-arm.cc b/deps/v8/test/cctest/test-code-stubs-arm.cc
index 8247afeb49..a5746a7f8e 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm.cc
@@ -37,6 +37,7 @@
#include "src/v8.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
+#include "test/common/assembler-tester.h"
namespace v8 {
namespace internal {
@@ -116,7 +117,6 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
MakeAssemblerBufferExecutable(buffer, allocated);
- Assembler::FlushICache(buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
diff --git a/deps/v8/test/cctest/test-code-stubs-arm64.cc b/deps/v8/test/cctest/test-code-stubs-arm64.cc
index 43bea1d910..cb20931a5d 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm64.cc
@@ -37,6 +37,7 @@
#include "src/simulator.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
+#include "test/common/assembler-tester.h"
namespace v8 {
namespace internal {
@@ -128,7 +129,6 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
MakeAssemblerBufferExecutable(buffer, allocated);
- Assembler::FlushICache(buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
diff --git a/deps/v8/test/cctest/test-code-stubs-ia32.cc b/deps/v8/test/cctest/test-code-stubs-ia32.cc
index 77a2254245..54f53e57c3 100644
--- a/deps/v8/test/cctest/test-code-stubs-ia32.cc
+++ b/deps/v8/test/cctest/test-code-stubs-ia32.cc
@@ -38,6 +38,7 @@
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
+#include "test/common/assembler-tester.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/test-code-stubs-mips.cc b/deps/v8/test/cctest/test-code-stubs-mips.cc
index f57459ead0..ed1798160b 100644
--- a/deps/v8/test/cctest/test-code-stubs-mips.cc
+++ b/deps/v8/test/cctest/test-code-stubs-mips.cc
@@ -39,6 +39,7 @@
#include "src/simulator.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
+#include "test/common/assembler-tester.h"
namespace v8 {
namespace internal {
@@ -129,7 +130,6 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
MakeAssemblerBufferExecutable(buffer, allocated);
- Assembler::FlushICache(buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
diff --git a/deps/v8/test/cctest/test-code-stubs-mips64.cc b/deps/v8/test/cctest/test-code-stubs-mips64.cc
index 6605f82422..3518e722c7 100644
--- a/deps/v8/test/cctest/test-code-stubs-mips64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-mips64.cc
@@ -39,6 +39,7 @@
#include "src/simulator.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
+#include "test/common/assembler-tester.h"
namespace v8 {
namespace internal {
@@ -126,7 +127,6 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
MakeAssemblerBufferExecutable(buffer, allocated);
- Assembler::FlushICache(buffer, allocated);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
diff --git a/deps/v8/test/cctest/test-code-stubs-x64.cc b/deps/v8/test/cctest/test-code-stubs-x64.cc
index 327d45d16c..c55b2e2b1e 100644
--- a/deps/v8/test/cctest/test-code-stubs-x64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-x64.cc
@@ -37,6 +37,7 @@
#include "src/register-configuration.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-code-stubs.h"
+#include "test/common/assembler-tester.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 34868cd4c4..63904e086f 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -30,7 +30,7 @@
#include "src/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/compiler.h"
#include "src/disasm.h"
#include "src/heap/factory.h"
@@ -66,8 +66,9 @@ static Handle<JSFunction> Compile(const char* source) {
CStrVector(source)).ToHandleChecked();
Handle<SharedFunctionInfo> shared =
Compiler::GetSharedFunctionInfoForScript(
- source_code, Compiler::ScriptDetails(), v8::ScriptOriginOptions(),
- nullptr, nullptr, v8::ScriptCompiler::kNoCompileOptions,
+ isolate, source_code, Compiler::ScriptDetails(),
+ v8::ScriptOriginOptions(), nullptr, nullptr,
+ v8::ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE)
.ToHandleChecked();
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
@@ -658,7 +659,7 @@ TEST(CompileFunctionInContextScriptOrigin) {
v8::Exception::GetStackTrace(try_catch.Exception());
CHECK(!stack.IsEmpty());
CHECK_GT(stack->GetFrameCount(), 0);
- v8::Local<v8::StackFrame> frame = stack->GetFrame(0);
+ v8::Local<v8::StackFrame> frame = stack->GetFrame(CcTest::isolate(), 0);
CHECK_EQ(23, frame->GetLineNumber());
CHECK_EQ(42 + strlen("throw "), static_cast<unsigned>(frame->GetColumn()));
}
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index f74bdf1ede..75af3f6d98 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -33,7 +33,7 @@
#include "src/v8.h"
#include "include/v8-profiler.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/base/platform/platform.h"
#include "src/deoptimizer.h"
#include "src/libplatform/default-platform.h"
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 7430fbf06b..f678b8ca6f 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -29,7 +29,7 @@
#include "src/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/compilation-cache.h"
#include "src/debug/debug-interface.h"
#include "src/debug/debug.h"
@@ -191,10 +191,6 @@ class DebugEventCounter : public v8::debug::DebugDelegate {
public:
void BreakProgramRequested(v8::Local<v8::Context>,
const std::vector<v8::debug::BreakpointId>&) {
- v8::internal::Debug* debug = CcTest::i_isolate()->debug();
- // When hitting a debug event listener there must be a break set.
- CHECK_NE(debug->break_id(), 0);
-
break_point_hit_count++;
// Perform a full deoptimization when the specified number of
// breaks have been hit.
@@ -217,10 +213,6 @@ class DebugEventBreakPointCollectGarbage : public v8::debug::DebugDelegate {
public:
void BreakProgramRequested(v8::Local<v8::Context>,
const std::vector<v8::debug::BreakpointId>&) {
- v8::internal::Debug* debug = CcTest::i_isolate()->debug();
- // When hitting a debug event listener there must be a break set.
- CHECK_NE(debug->break_id(), 0);
-
// Perform a garbage collection when break point is hit and continue. Based
// on the number of break points hit either scavenge or mark compact
// collector is used.
@@ -241,10 +233,6 @@ class DebugEventBreak : public v8::debug::DebugDelegate {
public:
void BreakProgramRequested(v8::Local<v8::Context>,
const std::vector<v8::debug::BreakpointId>&) {
- v8::internal::Debug* debug = CcTest::i_isolate()->debug();
- // When hitting a debug event listener there must be a break set.
- CHECK_NE(debug->break_id(), 0);
-
// Count the number of breaks.
break_point_hit_count++;
@@ -271,9 +259,6 @@ class DebugEventBreakMax : public v8::debug::DebugDelegate {
const std::vector<v8::debug::BreakpointId>&) {
v8::Isolate* v8_isolate = CcTest::isolate();
v8::internal::Isolate* isolate = CcTest::i_isolate();
- v8::internal::Debug* debug = isolate->debug();
- // When hitting a debug event listener there must be a break set.
- CHECK_NE(debug->break_id(), 0);
if (break_point_hit_count < max_break_point_hit_count) {
// Count the number of breaks.
break_point_hit_count++;
diff --git a/deps/v8/test/cctest/test-deoptimization.cc b/deps/v8/test/cctest/test-deoptimization.cc
index f0d62caff8..0d86f135ea 100644
--- a/deps/v8/test/cctest/test-deoptimization.cc
+++ b/deps/v8/test/cctest/test-deoptimization.cc
@@ -29,7 +29,7 @@
#include "src/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
#include "src/debug/debug.h"
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
index 9f921558dd..1101ec06eb 100644
--- a/deps/v8/test/cctest/test-dictionary.cc
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -28,7 +28,6 @@
#include "src/v8.h"
#include "test/cctest/cctest.h"
-#include "src/api.h"
#include "src/builtins/builtins-constructor.h"
#include "src/debug/debug.h"
#include "src/execution.h"
@@ -195,7 +194,7 @@ class ObjectHashTableTest: public ObjectHashTable {
}
int lookup(int key) {
- Handle<Object> key_obj(Smi::FromInt(key), GetIsolate());
+ Handle<Object> key_obj(Smi::FromInt(key), CcTest::i_isolate());
return Smi::ToInt(Lookup(key_obj));
}
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index d1cebe8138..877ae6665f 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -58,7 +58,7 @@ bool DisassembleAndCompare(byte* begin, UseRegex use_regex,
std::vector<std::string> expected_disassembly = {expected_strings...};
size_t n_expected = expected_disassembly.size();
- byte* end = begin + (n_expected * Assembler::kInstrSize);
+ byte* end = begin + (n_expected * kInstrSize);
std::vector<std::string> disassembly;
for (byte* pc = begin; pc < end;) {
@@ -483,6 +483,9 @@ TEST(Type3) {
COMPARE(rbit(r1, r2), "e6ff1f32 rbit r1, r2");
COMPARE(rbit(r10, ip), "e6ffaf3c rbit r10, ip");
+
+ COMPARE(rev(r1, r2), "e6bf1f32 rev r1, r2");
+ COMPARE(rev(r10, ip), "e6bfaf3c rev r10, ip");
}
COMPARE(usat(r0, 1, Operand(r1)),
@@ -672,14 +675,14 @@ TEST(Vfp) {
COMPARE(vmov(s3, Float32(13.0f)),
"eef21a0a vmov.f32 s3, #13");
- COMPARE(vmov(d0, VmovIndexLo, r0),
+ COMPARE(vmov(NeonS32, d0, 0, r0),
"ee000b10 vmov.32 d0[0], r0");
- COMPARE(vmov(d0, VmovIndexHi, r0),
+ COMPARE(vmov(NeonS32, d0, 1, r0),
"ee200b10 vmov.32 d0[1], r0");
- COMPARE(vmov(r2, VmovIndexLo, d15),
+ COMPARE(vmov(NeonS32, r2, d15, 0),
"ee1f2b10 vmov.32 r2, d15[0]");
- COMPARE(vmov(r3, VmovIndexHi, d14),
+ COMPARE(vmov(NeonS32, r3, d14, 1),
"ee3e3b10 vmov.32 r3, d14[1]");
COMPARE(vldr(s0, r0, 0),
@@ -833,9 +836,9 @@ TEST(Vfp) {
COMPARE(vmov(d30, Double(16.0)),
"eef3eb00 vmov.f64 d30, #16");
- COMPARE(vmov(d31, VmovIndexLo, r7),
+ COMPARE(vmov(NeonS32, d31, 0, r7),
"ee0f7b90 vmov.32 d31[0], r7");
- COMPARE(vmov(d31, VmovIndexHi, r7),
+ COMPARE(vmov(NeonS32, d31, 1, r7),
"ee2f7b90 vmov.32 d31[1], r7");
COMPARE(vldr(d25, r0, 0),
@@ -1513,7 +1516,7 @@ static void TestLoadLiteral(byte* buffer, Assembler* assm, bool* failure,
char expected_string[80];
snprintf(expected_string, sizeof(expected_string), expected_string_template,
abs(offset), offset,
- progcounter + Instruction::kPCReadOffset + offset);
+ progcounter + Instruction::kPcLoadDelta + offset);
if (!DisassembleAndCompare(progcounter, kRawString, expected_string)) {
*failure = true;
}
@@ -1612,6 +1615,9 @@ TEST(LoadStoreExclusive) {
COMPARE(strexh(r0, r1, r2), "e1e20f91 strexh r0, r1, [r2]");
COMPARE(ldrex(r0, r1), "e1910f9f ldrex r0, [r1]");
COMPARE(strex(r0, r1, r2), "e1820f91 strex r0, r1, [r2]");
+ COMPARE(ldrexd(r0, r1, r2), "e1b20f9f ldrexd r0, [r2]");
+ COMPARE(strexd(r0, r2, r3, r4),
+ "e1a40f92 strexd r0, r2, [r4]");
VERIFY_RUN();
}
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
index 275feaa88e..1cc14271a6 100644
--- a/deps/v8/test/cctest/test-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -815,7 +815,7 @@ TEST_(adr) {
TEST_(branch) {
SET_UP_ASM();
- #define INST_OFF(x) ((x) >> kInstructionSizeLog2)
+#define INST_OFF(x) ((x) >> kInstrSizeLog2)
COMPARE_PREFIX(b(INST_OFF(0x4)), "b #+0x4");
COMPARE_PREFIX(b(INST_OFF(-0x4)), "b #-0x4");
COMPARE_PREFIX(b(INST_OFF(0x7fffffc)), "b #+0x7fffffc");
@@ -840,6 +840,7 @@ TEST_(branch) {
COMPARE_PREFIX(tbnz(w10, 31, INST_OFF(0)), "tbnz w10, #31, #+0x0");
COMPARE_PREFIX(tbnz(x11, 31, INST_OFF(0x4)), "tbnz w11, #31, #+0x4");
COMPARE_PREFIX(tbnz(x12, 32, INST_OFF(0x8)), "tbnz x12, #32, #+0x8");
+#undef INST_OFF
COMPARE(br(x0), "br x0");
COMPARE(blr(x1), "blr x1");
COMPARE(ret(x2), "ret x2");
@@ -1881,7 +1882,11 @@ TEST_(debug) {
byte* buf = static_cast<byte*>(malloc(INSTR_SIZE));
uint32_t encoding = 0;
AssemblerOptions options;
+#ifdef USE_SIMULATOR
options.enable_simulator_code = (i == 1);
+#else
+ CHECK(!options.enable_simulator_code);
+#endif
Assembler* assm = new Assembler(options, buf, INSTR_SIZE);
Decoder<DispatchingDecoderVisitor>* decoder =
new Decoder<DispatchingDecoderVisitor>();
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 84764621c9..a24de5656d 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -91,6 +91,8 @@ TEST(DisasmIa320) {
__ add(edi, Operand(ebp, ecx, times_4, -3999));
__ add(Operand(ebp, ecx, times_4, 12), Immediate(12));
+ __ bswap(eax);
+
__ nop();
__ add(ebx, Immediate(12));
__ nop();
@@ -391,6 +393,7 @@ TEST(DisasmIa320) {
__ shufps(xmm0, xmm0, 0x0);
__ cvtsd2ss(xmm0, xmm1);
__ cvtsd2ss(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ movq(xmm0, Operand(edx, 4));
// logic operation
__ andps(xmm0, xmm1);
@@ -871,6 +874,8 @@ TEST(DisasmIa320) {
__ cmpxchg_b(Operand(esp, 12), eax);
__ cmpxchg_w(Operand(ebx, ecx, times_4, 10000), eax);
__ cmpxchg(Operand(ebx, ecx, times_4, 10000), eax);
+ __ cmpxchg(Operand(ebx, ecx, times_4, 10000), eax);
+ __ cmpxchg8b(Operand(ebx, ecx, times_8, 10000));
}
// lock prefix.
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 5811371999..c42606485c 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -89,6 +89,8 @@ TEST(DisasmX64) {
__ addq(rdi, Operand(rbp, rcx, times_4, -3999));
__ addq(Operand(rbp, rcx, times_4, 12), Immediate(12));
+ __ bswapl(rax);
+ __ bswapq(rdi);
__ bsrl(rax, r15);
__ bsrl(r9, Operand(rcx, times_8, 91919));
@@ -384,6 +386,8 @@ TEST(DisasmX64) {
__ cvttss2si(rdx, xmm1);
__ cvtsd2ss(xmm0, xmm1);
__ cvtsd2ss(xmm0, Operand(rbx, rcx, times_4, 10000));
+ __ cvttps2dq(xmm0, xmm1);
+ __ cvttps2dq(xmm0, Operand(rbx, rcx, times_4, 10000));
__ movaps(xmm0, xmm1);
__ movdqa(xmm0, Operand(rsp, 12));
__ movdqa(Operand(rsp, 12), xmm0);
@@ -524,6 +528,8 @@ TEST(DisasmX64) {
{
if (CpuFeatures::IsSupported(SSSE3)) {
CpuFeatureScope scope(&assm, SSSE3);
+ __ palignr(xmm5, xmm1, 5);
+ __ palignr(xmm5, Operand(rdx, 4), 5);
SSSE3_INSTRUCTION_LIST(EMIT_SSE34_INSTR)
}
}
@@ -539,6 +545,8 @@ TEST(DisasmX64) {
__ pextrd(r12, xmm0, 1);
__ pinsrd(xmm9, r9, 0);
__ pinsrd(xmm5, Operand(rax, 4), 1);
+ __ pblendw(xmm5, xmm1, 1);
+ __ pblendw(xmm9, Operand(rax, 4), 1);
__ cmpps(xmm5, xmm1, 1);
__ cmpps(xmm5, Operand(rbx, rcx, times_4, 10000), 1);
diff --git a/deps/v8/test/cctest/test-elements-kind.cc b/deps/v8/test/cctest/test-elements-kind.cc
index 5237a54173..59252f2ef8 100644
--- a/deps/v8/test/cctest/test-elements-kind.cc
+++ b/deps/v8/test/cctest/test-elements-kind.cc
@@ -15,6 +15,7 @@
#include "src/heap/factory.h"
#include "src/ic/stub-cache.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index 44a81dd294..b809854270 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -5,7 +5,7 @@
#include "src/v8.h"
#include "test/cctest/cctest.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/debug/debug.h"
#include "src/execution.h"
#include "src/global-handles.h"
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index 95b671b43a..4e37103558 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -609,7 +609,6 @@ static void TestGeneralizeField(int detach_property_at_index,
bool expected_deprecation,
bool expected_field_type_dependency) {
Isolate* isolate = CcTest::i_isolate();
- JSHeapBroker broker(isolate);
Handle<FieldType> any_type = FieldType::Any(isolate);
CHECK(detach_property_at_index >= -1 &&
@@ -656,6 +655,7 @@ static void TestGeneralizeField(int detach_property_at_index,
// Create new maps by generalizing representation of propX field.
CanonicalHandleScope canonical(isolate);
+ JSHeapBroker broker(isolate, &zone);
CompilationDependencies dependencies(isolate, &zone);
dependencies.DependOnFieldType(MapRef(&broker, map), property_index);
@@ -989,7 +989,6 @@ TEST(GeneralizeFieldWithAccessorProperties) {
static void TestReconfigureDataFieldAttribute_GeneralizeField(
const CRFTData& from, const CRFTData& to, const CRFTData& expected) {
Isolate* isolate = CcTest::i_isolate();
- JSHeapBroker broker(isolate);
Expectations expectations(isolate);
@@ -1028,6 +1027,7 @@ static void TestReconfigureDataFieldAttribute_GeneralizeField(
Zone zone(isolate->allocator(), ZONE_NAME);
CanonicalHandleScope canonical(isolate);
+ JSHeapBroker broker(isolate, &zone);
CompilationDependencies dependencies(isolate, &zone);
dependencies.DependOnFieldType(MapRef(&broker, map), kSplitProp);
@@ -1073,7 +1073,6 @@ static void TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
const CRFTData& from, const CRFTData& to, const CRFTData& expected,
bool expected_field_type_dependency = true) {
Isolate* isolate = CcTest::i_isolate();
- JSHeapBroker broker(isolate);
Expectations expectations(isolate);
@@ -1112,6 +1111,7 @@ static void TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
Zone zone(isolate->allocator(), ZONE_NAME);
CanonicalHandleScope canonical(isolate);
+ JSHeapBroker broker(isolate, &zone);
CompilationDependencies dependencies(isolate, &zone);
dependencies.DependOnFieldType(MapRef(&broker, map), kSplitProp);
@@ -1753,7 +1753,6 @@ TEST(ReconfigureDataFieldAttribute_AccConstantToDataFieldAfterTargetMap) {
static void TestReconfigureElementsKind_GeneralizeField(
const CRFTData& from, const CRFTData& to, const CRFTData& expected) {
Isolate* isolate = CcTest::i_isolate();
- JSHeapBroker broker(isolate);
Expectations expectations(isolate, PACKED_SMI_ELEMENTS);
@@ -1793,6 +1792,7 @@ static void TestReconfigureElementsKind_GeneralizeField(
Zone zone(isolate->allocator(), ZONE_NAME);
CanonicalHandleScope canonical(isolate);
+ JSHeapBroker broker(isolate, &zone);
CompilationDependencies dependencies(isolate, &zone);
dependencies.DependOnFieldType(MapRef(&broker, map), kDiffProp);
@@ -1848,7 +1848,6 @@ static void TestReconfigureElementsKind_GeneralizeField(
static void TestReconfigureElementsKind_GeneralizeFieldTrivial(
const CRFTData& from, const CRFTData& to, const CRFTData& expected) {
Isolate* isolate = CcTest::i_isolate();
- JSHeapBroker broker(isolate);
Expectations expectations(isolate, PACKED_SMI_ELEMENTS);
@@ -1888,6 +1887,7 @@ static void TestReconfigureElementsKind_GeneralizeFieldTrivial(
Zone zone(isolate->allocator(), ZONE_NAME);
CanonicalHandleScope canonical(isolate);
+ JSHeapBroker broker(isolate, &zone);
CompilationDependencies dependencies(isolate, &zone);
dependencies.DependOnFieldType(MapRef(&broker, map), kDiffProp);
@@ -2636,8 +2636,7 @@ struct SameMapChecker {
// Checks that both |map1| and |map2| should stays non-deprecated, this is
// the case when property kind is change.
struct PropertyKindReconfigurationChecker {
- void Check(Isolate* isolate, Expectations& expectations, Handle<Map> map1,
- Handle<Map> map2) {
+ void Check(Expectations& expectations, Handle<Map> map1, Handle<Map> map2) {
CHECK(!map1->is_deprecated());
CHECK(!map2->is_deprecated());
CHECK_NE(*map1, *map2);
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 160f68813c..61f3ef0eeb 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -29,7 +29,7 @@
#include "src/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/debug/debug.h"
#include "src/objects-inl.h"
#include "src/string-search.h"
diff --git a/deps/v8/test/cctest/test-fuzz-arm64.cc b/deps/v8/test/cctest/test-fuzz-arm64.cc
index 059eda46fd..4345bb5f44 100644
--- a/deps/v8/test/cctest/test-fuzz-arm64.cc
+++ b/deps/v8/test/cctest/test-fuzz-arm64.cc
@@ -41,7 +41,7 @@ TEST(FUZZ_decoder) {
seed48(seed);
Decoder<DispatchingDecoderVisitor> decoder;
- Instruction buffer[kInstructionSize];
+ Instruction buffer[kInstrSize];
for (int i = 0; i < instruction_count; i++) {
uint32_t instr = static_cast<uint32_t>(mrand48());
@@ -61,7 +61,7 @@ TEST(FUZZ_disasm) {
Decoder<DispatchingDecoderVisitor> decoder;
DisassemblingDecoder disasm;
- Instruction buffer[kInstructionSize];
+ Instruction buffer[kInstrSize];
decoder.AppendVisitor(&disasm);
for (int i = 0; i < instruction_count; i++) {
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index 89b5ed4c83..ea76faa857 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/global-handles.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
diff --git a/deps/v8/test/cctest/test-global-object.cc b/deps/v8/test/cctest/test-global-object.cc
index 24dd270b73..5c154565d9 100644
--- a/deps/v8/test/cctest/test-global-object.cc
+++ b/deps/v8/test/cctest/test-global-object.cc
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/api.h"
#include "src/objects-inl.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-hashcode.cc b/deps/v8/test/cctest/test-hashcode.cc
index a433c77ac7..2059d53885 100644
--- a/deps/v8/test/cctest/test-hashcode.cc
+++ b/deps/v8/test/cctest/test-hashcode.cc
@@ -6,7 +6,6 @@
#include <sstream>
#include <utility>
-#include "src/api.h"
#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/v8.h"
@@ -26,14 +25,14 @@ int AddToSetAndGetHash(Isolate* isolate, Handle<JSObject> obj,
return Smi::ToInt(obj->GetHash());
}
-void CheckFastObject(Isolate* isolate, Handle<JSObject> obj, int hash) {
+void CheckFastObject(Handle<JSObject> obj, int hash) {
CHECK(obj->HasFastProperties());
CHECK(obj->raw_properties_or_hash()->IsPropertyArray());
CHECK_EQ(Smi::FromInt(hash), obj->GetHash());
CHECK_EQ(hash, obj->property_array()->Hash());
}
-void CheckDictionaryObject(Isolate* isolate, Handle<JSObject> obj, int hash) {
+void CheckDictionaryObject(Handle<JSObject> obj, int hash) {
CHECK(!obj->HasFastProperties());
CHECK(obj->raw_properties_or_hash()->IsDictionary());
CHECK_EQ(Smi::FromInt(hash), obj->GetHash());
@@ -83,7 +82,7 @@ TEST(AddHashCodeToFastObjectWithPropertiesArray) {
CHECK(obj->HasFastProperties());
int hash = AddToSetAndGetHash(isolate, obj, true);
- CheckFastObject(isolate, obj, hash);
+ CheckFastObject(obj, hash);
}
TEST(AddHashCodeToSlowObject) {
@@ -99,7 +98,7 @@ TEST(AddHashCodeToSlowObject) {
CHECK(obj->raw_properties_or_hash()->IsDictionary());
int hash = AddToSetAndGetHash(isolate, obj, false);
- CheckDictionaryObject(isolate, obj, hash);
+ CheckDictionaryObject(obj, hash);
}
TEST(TransitionFastWithInObjectToFastWithPropertyArray) {
@@ -121,7 +120,7 @@ TEST(TransitionFastWithInObjectToFastWithPropertyArray) {
int length = obj->property_array()->length();
CompileRun("x.e = 5;");
CHECK(obj->property_array()->length() > length);
- CheckFastObject(isolate, obj, hash);
+ CheckFastObject(obj, hash);
}
TEST(TransitionFastWithPropertyArray) {
@@ -143,7 +142,7 @@ TEST(TransitionFastWithPropertyArray) {
int length = obj->property_array()->length();
CompileRun("x.f = 2; x.g = 5; x.h = 2");
CHECK(obj->property_array()->length() > length);
- CheckFastObject(isolate, obj, hash);
+ CheckFastObject(obj, hash);
}
TEST(TransitionFastWithPropertyArrayToSlow) {
@@ -165,7 +164,7 @@ TEST(TransitionFastWithPropertyArrayToSlow) {
JSObject::NormalizeProperties(obj, KEEP_INOBJECT_PROPERTIES, 0,
"cctest/test-hashcode");
- CheckDictionaryObject(isolate, obj, hash);
+ CheckDictionaryObject(obj, hash);
}
TEST(TransitionSlowToSlow) {
@@ -187,7 +186,7 @@ TEST(TransitionSlowToSlow) {
int length = obj->property_dictionary()->length();
CompileRun("for(var i = 0; i < 10; i++) { x['f'+i] = i };");
CHECK(obj->property_dictionary()->length() > length);
- CheckDictionaryObject(isolate, obj, hash);
+ CheckDictionaryObject(obj, hash);
}
TEST(TransitionSlowToFastWithoutProperties) {
@@ -225,7 +224,7 @@ TEST(TransitionSlowToFastWithPropertyArray) {
CHECK_EQ(hash, obj->property_dictionary()->Hash());
JSObject::MigrateSlowToFast(obj, 0, "cctest/test-hashcode");
- CheckFastObject(isolate, obj, hash);
+ CheckFastObject(obj, hash);
}
} // namespace internal
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 18203c7725..5d8094d635 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -34,7 +34,7 @@
#include "src/v8.h"
#include "include/v8-profiler.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/assembler-inl.h"
#include "src/base/hashmap.h"
#include "src/collector.h"
@@ -49,7 +49,9 @@ using i::AllocationTraceNode;
using i::AllocationTraceTree;
using i::AllocationTracker;
using i::ArrayVector;
+using i::SourceLocation;
using i::Vector;
+using v8::base::Optional;
namespace {
@@ -151,6 +153,23 @@ static const v8::HeapGraphNode* GetRootChild(const v8::HeapSnapshot* snapshot,
return GetChildByName(snapshot->GetRoot(), name);
}
+static Optional<SourceLocation> GetLocation(const v8::HeapSnapshot* s,
+ const v8::HeapGraphNode* node) {
+ const i::HeapSnapshot* snapshot = reinterpret_cast<const i::HeapSnapshot*>(s);
+ const std::vector<SourceLocation>& locations = snapshot->locations();
+ const int index =
+ const_cast<i::HeapEntry*>(reinterpret_cast<const i::HeapEntry*>(node))
+ ->index();
+
+ for (const auto& loc : locations) {
+ if (loc.entry_index == index) {
+ return Optional<SourceLocation>(loc);
+ }
+ }
+
+ return Optional<SourceLocation>();
+}
+
static const v8::HeapGraphNode* GetProperty(v8::Isolate* isolate,
const v8::HeapGraphNode* node,
v8::HeapGraphEdge::Type type,
@@ -258,6 +277,49 @@ TEST(HeapSnapshot) {
CHECK(det.has_C2);
}
+TEST(HeapSnapshotLocations) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+
+ CompileRun(
+ "function X(a) { return function() { return a; } }\n"
+ "function* getid() { yield 1; }\n"
+ "class A {}\n"
+ "var x = X(1);\n"
+ "var g = getid();\n"
+ "var o = new A();");
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
+ CHECK(ValidateSnapshot(snapshot));
+
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* x =
+ GetProperty(env->GetIsolate(), global, v8::HeapGraphEdge::kProperty, "x");
+ CHECK(x);
+
+ Optional<SourceLocation> x_loc = GetLocation(snapshot, x);
+ CHECK(x_loc);
+ CHECK_EQ(0, x_loc->line);
+ CHECK_EQ(31, x_loc->col);
+
+ const v8::HeapGraphNode* g =
+ GetProperty(env->GetIsolate(), global, v8::HeapGraphEdge::kProperty, "g");
+ CHECK(x);
+
+ Optional<SourceLocation> g_loc = GetLocation(snapshot, g);
+ CHECK(g_loc);
+ CHECK_EQ(1, g_loc->line);
+ CHECK_EQ(15, g_loc->col);
+
+ const v8::HeapGraphNode* o =
+ GetProperty(env->GetIsolate(), global, v8::HeapGraphEdge::kProperty, "o");
+ CHECK(x);
+
+ Optional<SourceLocation> o_loc = GetLocation(snapshot, o);
+ CHECK(o_loc);
+ CHECK_EQ(2, o_loc->line);
+ CHECK_EQ(0, o_loc->col);
+}
TEST(HeapSnapshotObjectSizes) {
LocalContext env;
@@ -1045,6 +1107,7 @@ TEST(HeapSnapshotJSONSerialization) {
CHECK(parsed_snapshot->Has(env.local(), v8_str("snapshot")).FromJust());
CHECK(parsed_snapshot->Has(env.local(), v8_str("nodes")).FromJust());
CHECK(parsed_snapshot->Has(env.local(), v8_str("edges")).FromJust());
+ CHECK(parsed_snapshot->Has(env.local(), v8_str("locations")).FromJust());
CHECK(parsed_snapshot->Has(env.local(), v8_str("strings")).FromJust());
// Get node and edge "member" offsets.
@@ -1869,6 +1932,63 @@ static int StringCmp(const char* ref, i::String* act) {
return result;
}
+TEST(GetConstructor) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ CompileRun(
+ "function Constructor1() {};\n"
+ "var obj1 = new Constructor1();\n"
+ "var Constructor2 = function() {};\n"
+ "var obj2 = new Constructor2();\n"
+ "var obj3 = {};\n"
+ "obj3.__proto__ = { constructor: function Constructor3() {} };\n"
+ "var obj4 = {};\n"
+ "// Slow properties\n"
+ "for (var i=0; i<2000; ++i) obj4[\"p\" + i] = i;\n"
+ "obj4.__proto__ = { constructor: function Constructor4() {} };\n"
+ "var obj5 = {};\n"
+ "var obj6 = {};\n"
+ "obj6.constructor = 6;");
+ v8::Local<v8::Object> js_global =
+ env->Global()->GetPrototype().As<v8::Object>();
+ v8::Local<v8::Object> obj1 = js_global->Get(env.local(), v8_str("obj1"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ i::Handle<i::JSObject> js_obj1 =
+ i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj1));
+ CHECK(i::V8HeapExplorer::GetConstructor(*js_obj1));
+ v8::Local<v8::Object> obj2 = js_global->Get(env.local(), v8_str("obj2"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ i::Handle<i::JSObject> js_obj2 =
+ i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj2));
+ CHECK(i::V8HeapExplorer::GetConstructor(*js_obj2));
+ v8::Local<v8::Object> obj3 = js_global->Get(env.local(), v8_str("obj3"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ i::Handle<i::JSObject> js_obj3 =
+ i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj3));
+ CHECK(i::V8HeapExplorer::GetConstructor(*js_obj3));
+ v8::Local<v8::Object> obj4 = js_global->Get(env.local(), v8_str("obj4"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ i::Handle<i::JSObject> js_obj4 =
+ i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj4));
+ CHECK(i::V8HeapExplorer::GetConstructor(*js_obj4));
+ v8::Local<v8::Object> obj5 = js_global->Get(env.local(), v8_str("obj5"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ i::Handle<i::JSObject> js_obj5 =
+ i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj5));
+ CHECK(!i::V8HeapExplorer::GetConstructor(*js_obj5));
+ v8::Local<v8::Object> obj6 = js_global->Get(env.local(), v8_str("obj6"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ i::Handle<i::JSObject> js_obj6 =
+ i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj6));
+ CHECK(!i::V8HeapExplorer::GetConstructor(*js_obj6));
+}
TEST(GetConstructorName) {
LocalContext env;
@@ -2134,6 +2254,35 @@ TEST(AccessorInfo) {
CHECK(setter);
}
+TEST(JSGeneratorObject) {
+ v8::Isolate* isolate = CcTest::isolate();
+ LocalContext env;
+ v8::HandleScope scope(isolate);
+ v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
+
+ CompileRun(
+ "function* foo() { yield 1; }\n"
+ "g = foo();\n");
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
+ CHECK(ValidateSnapshot(snapshot));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* g =
+ GetProperty(isolate, global, v8::HeapGraphEdge::kProperty, "g");
+ CHECK(g);
+ const v8::HeapGraphNode* function = GetProperty(
+ env->GetIsolate(), g, v8::HeapGraphEdge::kInternal, "function");
+ CHECK(function);
+ const v8::HeapGraphNode* context = GetProperty(
+ env->GetIsolate(), g, v8::HeapGraphEdge::kInternal, "context");
+ CHECK(context);
+ const v8::HeapGraphNode* receiver = GetProperty(
+ env->GetIsolate(), g, v8::HeapGraphEdge::kInternal, "receiver");
+ CHECK(receiver);
+ const v8::HeapGraphNode* parameters_and_registers =
+ GetProperty(env->GetIsolate(), g, v8::HeapGraphEdge::kInternal,
+ "parameters_and_registers");
+ CHECK(parameters_and_registers);
+}
bool HasWeakEdge(const v8::HeapGraphNode* node) {
for (int i = 0; i < node->GetChildrenCount(); ++i) {
@@ -3436,12 +3585,18 @@ TEST(SamplingHeapProfilerRateAgnosticEstimates) {
// what we expect in this test.
v8::internal::FLAG_always_opt = false;
+ // Disable compilation cache to force compilation in both cases
+ v8::internal::FLAG_compilation_cache = false;
+
// Suppress randomness to avoid flakiness in tests.
v8::internal::FLAG_sampling_heap_profiler_suppress_randomness = true;
// stress_incremental_marking adds randomness to the test.
v8::internal::FLAG_stress_incremental_marking = false;
+ // warmup compilation
+ CompileRun(simple_sampling_heap_profiler_script);
+
int count_1024 = 0;
{
heap_profiler->StartSamplingHeapProfiler(1024);
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
index 9ebe67cab4..0e850b1682 100644
--- a/deps/v8/test/cctest/test-inobject-slack-tracking.cc
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -6,7 +6,7 @@
#include <sstream>
#include <utility>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/v8.h"
@@ -1148,7 +1148,7 @@ TEST(SubclassTypedArrayBuiltin) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
-#define TYPED_ARRAY_TEST(Type, type, TYPE, elementType, size) \
+#define TYPED_ARRAY_TEST(Type, type, TYPE, elementType) \
TestSubclassBuiltin("A" #Type, JS_TYPED_ARRAY_TYPE, #Type "Array", "42");
TYPED_ARRAYS(TYPED_ARRAY_TEST)
diff --git a/deps/v8/test/cctest/test-isolate-independent-builtins.cc b/deps/v8/test/cctest/test-isolate-independent-builtins.cc
index c878484976..4b4babdb37 100644
--- a/deps/v8/test/cctest/test-isolate-independent-builtins.cc
+++ b/deps/v8/test/cctest/test-isolate-independent-builtins.cc
@@ -11,6 +11,7 @@
#include "src/simulator.h"
#include "src/snapshot/macros.h"
#include "src/snapshot/snapshot.h"
+#include "test/common/assembler-tester.h"
// To generate the binary files for the test function, enable this section and
// run GenerateTestFunctionData once on each arch.
diff --git a/deps/v8/test/cctest/test-liveedit.cc b/deps/v8/test/cctest/test-liveedit.cc
index 05044a4aa5..e2fec0a3ae 100644
--- a/deps/v8/test/cctest/test-liveedit.cc
+++ b/deps/v8/test/cctest/test-liveedit.cc
@@ -29,7 +29,7 @@
#include "src/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/debug/liveedit.h"
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
@@ -248,14 +248,16 @@ TEST(LiveEditPatchFunctions) {
PatchFunctions(context, "function foo() { return 1; }",
"function foo() { return 42; }");
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "foo()")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
42);
// It is expected, we do not reevaluate top level function.
PatchFunctions(context, "var a = 1; function foo() { return a; }",
"var a = 3; function foo() { return a; }");
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "foo()")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
1);
// Throw exception since var b is not defined in original source.
@@ -270,14 +272,16 @@ TEST(LiveEditPatchFunctions) {
PatchFunctions(context, "var a = 1; function foo() { return a; }",
"var b = 4; function foo() { var b = 5; return b; }");
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "foo()")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
5);
PatchFunctions(context, "var a = 1; function foo() { return a; }",
"var b = 4; function foo() { var a = 6; return a; }");
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "foo()")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
6);
@@ -292,7 +296,8 @@ TEST(LiveEditPatchFunctions) {
PatchFunctions(context, "var a = 1; function foo() { return a; }",
"var b = 1; var a = 2; function foo() { return a; }");
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "foo()")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
1);
@@ -307,14 +312,16 @@ TEST(LiveEditPatchFunctions) {
PatchFunctions(context, "function foo() { var a = 1; return a; }",
"function foo() { var b = 1; return b; }");
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "foo()")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
1);
PatchFunctions(context, "var a = 3; function foo() { var a = 1; return a; }",
"function foo() { var b = 1; return a; }");
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "foo()")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
3);
@@ -330,14 +337,16 @@ TEST(LiveEditPatchFunctions) {
PatchFunctions(context, "function fooArgs(a1, b1) { return a1 + b1; }",
"function fooArgs(a2, b2, c2) { return a2 + b2 + c2; }");
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "fooArgs(1,2,3)")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
6);
PatchFunctions(context, "function fooArgs(a1, b1) { return a1 + b1; }",
"function fooArgs(a1, b1, c1) { return a1 + b1 + c1; }");
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "fooArgs(1,2,3)")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
6);
@@ -347,7 +356,8 @@ TEST(LiveEditPatchFunctions) {
"%OptimizeFunctionOnNextCall(foo); foo(1,2);",
"function foo(a, b) { return a * b; };");
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "foo(5,7)")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
35);
i::FLAG_allow_natives_syntax = false;
@@ -359,7 +369,8 @@ TEST(LiveEditPatchFunctions) {
"function foo(a,b) { function op(a,b) { return a * b } return op(a,b); "
"}");
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "foo(8,9)")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
72);
@@ -368,7 +379,8 @@ TEST(LiveEditPatchFunctions) {
"class Foo { constructor(a,b) { this.data = a + b; } };",
"class Foo { constructor(a,b) { this.data = a * b; } };");
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "new Foo(4,5).data")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
20);
// Change inner functions.
@@ -379,7 +391,8 @@ TEST(LiveEditPatchFunctions) {
"function f(evt) { function f2() { return 1; } return f2() + f3(); "
"function f3() { return 2; } } function f4() {}");
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "f()")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
3);
// Change usage of outer scope.
@@ -424,21 +437,24 @@ TEST(LiveEditPatchFunctions) {
// TODO(kozyatinskiy): should work when we remove (.
PatchFunctions(context, "f = () => 2", "f = a => a");
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "f(3)")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
2);
// Replace function with not a function.
PatchFunctions(context, "f = () => 2", "f = a == 2");
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "f(3)")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
2);
// TODO(kozyatinskiy): should work when we put function into (...).
PatchFunctions(context, "f = a => 2", "f = (a => 5)()");
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "f()")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
2);
@@ -457,11 +473,13 @@ TEST(LiveEditPatchFunctions) {
"f()\n");
// TODO(kozyatinskiy): ditto.
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "f2()")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
5);
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "f()")
- ->ToInt32(env->GetIsolate())
+ ->ToInt32(context)
+ .ToLocalChecked()
->Value(),
3);
}
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index ebfbe88de2..dec279e781 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -31,7 +31,6 @@
#include "src/v8.h"
-#include "src/api.h"
#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
#include "src/execution.h"
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index 8a09a17684..ea51a168d7 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -30,7 +30,7 @@
#include <stdlib.h>
#include "include/v8-profiler.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/code-stubs.h"
#include "src/disassembler.h"
#include "src/isolate.h"
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 1dfa22b4cc..9ac73af3e5 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -27,27 +27,16 @@
//
// Tests of logging functions from log.h
-#ifdef __linux__
-#include <pthread.h>
-#include <signal.h>
-#include <unistd.h>
-#include <cmath>
-#endif // __linux__
-
#include <unordered_set>
#include <vector>
-// The C++ style guide recommends using <re2> instead of <regex>. However, the
-// former isn't available in V8.
-#include <regex> // NOLINT(build/c++11)
-#include "src/api.h"
+#include "src/api-inl.h"
+#include "src/builtins/builtins.h"
#include "src/log-utils.h"
#include "src/log.h"
#include "src/objects-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/snapshot/natives.h"
-#include "src/utils.h"
#include "src/v8.h"
-#include "src/v8threads.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
#include "test/cctest/cctest.h"
@@ -68,35 +57,14 @@ namespace {
i::FLAG_logfile = i::Log::kLogToTemporaryFile; \
i::FLAG_logfile_per_isolate = false
-static const char* StrNStr(const char* s1, const char* s2, size_t n) {
- CHECK_EQ(s1[n], '\0');
- return strstr(s1, s2);
-}
-
-// Look for a log line which starts with {prefix} and ends with {suffix}.
-static const char* FindLogLine(const char* start, const char* end,
- const char* prefix,
- const char* suffix = nullptr) {
- CHECK_LT(start, end);
- CHECK_EQ(end[0], '\0');
- size_t prefixLength = strlen(prefix);
- // Loop through the input until we find /{prefix}[^\n]+{suffix}/.
- while (start < end) {
- const char* prefixResult = strstr(start, prefix);
- if (!prefixResult) return NULL;
- if (suffix == nullptr) return prefixResult;
- const char* suffixResult =
- StrNStr(prefixResult, suffix, (end - prefixResult));
- if (!suffixResult) return NULL;
- // Check that there are no newlines in between the {prefix} and the {suffix}
- // results.
- const char* newlineResult =
- StrNStr(prefixResult, "\n", (end - prefixResult));
- if (!newlineResult) return prefixResult;
- if (newlineResult > suffixResult) return prefixResult;
- start = prefixResult + prefixLength;
+static std::vector<std::string> Split(const std::string& s, char delimiter) {
+ std::vector<std::string> result;
+ std::string line;
+ std::istringstream stream(s);
+ while (std::getline(stream, line, delimiter)) {
+ result.push_back(line);
}
- return NULL;
+ return result;
}
class ScopedLoggerInitializer {
@@ -119,7 +87,6 @@ class ScopedLoggerInitializer {
if (temp_file_ != nullptr) fclose(temp_file_);
i::FLAG_prof = saved_prof_;
i::FLAG_log = saved_log_;
- log_.Dispose();
}
v8::Local<v8::Context>& env() { return env_; }
@@ -130,116 +97,81 @@ class ScopedLoggerInitializer {
Logger* logger() { return logger_; }
- void PrintLog(int requested_nof_lines = 0, const char* start = nullptr) {
- if (requested_nof_lines <= 0) {
- printf("%s", log_.start());
- return;
- }
- // Try to print the last {requested_nof_lines} of the log.
- if (start == nullptr) start = log_.start();
- const char* current = log_.end();
- int nof_lines = requested_nof_lines;
- while (current > start && nof_lines > 0) {
- current--;
- if (*current == '\n') nof_lines--;
- }
- printf(
- "======================================================\n"
- "Last %i log lines:\n"
- "======================================================\n"
- "...\n%s\n"
- "======================================================\n",
- requested_nof_lines, current);
- }
-
v8::Local<v8::String> GetLogString() {
- return v8::String::NewFromUtf8(isolate_, log_.start(),
- v8::NewStringType::kNormal, log_.length())
+ int length = static_cast<int>(raw_log_.size());
+ return v8::String::NewFromUtf8(isolate_, raw_log_.c_str(),
+ v8::NewStringType::kNormal, length)
.ToLocalChecked();
}
void StopLogging() {
bool exists = false;
- log_ = i::ReadFile(StopLoggingGetTempFile(), &exists, true);
+ raw_log_ = i::ReadFile(StopLoggingGetTempFile(), &exists, true);
+ log_ = Split(raw_log_, '\n');
CHECK(exists);
}
- const char* GetEndPosition() { return log_.start() + log_.length(); }
+ // Searches |log_| for a line which contains all the strings in |search_terms|
+ // as substrings, starting from the index |start|, and returns the index of
+ // the found line. Returns std::string::npos if no line is found.
+ size_t IndexOfLine(const std::vector<std::string>& search_terms,
+ size_t start = 0) {
+ for (size_t i = start; i < log_.size(); ++i) {
+ const std::string& line = log_.at(i);
+ bool all_terms_found = true;
+ for (const std::string& term : search_terms) {
+ all_terms_found &= line.find(term) != std::string::npos;
+ }
+ if (all_terms_found) return i;
+ }
+ return std::string::npos;
+ }
- const char* FindLine(const char* prefix, const char* suffix = nullptr,
- const char* start = nullptr) {
- // Make sure that StopLogging() has been called before.
- CHECK(log_.size());
- if (start == nullptr) start = log_.start();
- const char* end = GetEndPosition();
- return FindLogLine(start, end, prefix, suffix);
+ bool ContainsLine(const std::vector<std::string>& search_terms,
+ size_t start = 0) {
+ return IndexOfLine(search_terms, start) != std::string::npos;
}
- // Find all log lines specified by the {prefix, suffix} pairs and ensure they
- // occurr in the specified order.
- void FindLogLines(const char* pairs[][2], size_t limit,
- const char* start = nullptr) {
- const char* prefix = pairs[0][0];
- const char* suffix = pairs[0][1];
- const char* last_position = FindLine(prefix, suffix, start);
- if (last_position == nullptr) {
- PrintLog(100, start);
- V8_Fatal(__FILE__, __LINE__, "Could not find log line: %s ... %s", prefix,
- suffix);
+ // Calls IndexOfLine for each set of substring terms in
+ // |all_line_search_terms|, in order. Returns true if they're all found.
+ bool ContainsLinesInOrder(
+ const std::vector<std::vector<std::string>>& all_line_search_terms,
+ size_t start = 0) {
+ CHECK_GT(log_.size(), 0);
+ for (auto& search_terms : all_line_search_terms) {
+ start = IndexOfLine(search_terms, start);
+ if (start == std::string::npos) return false;
+ ++start; // Skip the found line.
}
- CHECK(last_position);
- for (size_t i = 1; i < limit; i++) {
- prefix = pairs[i][0];
- suffix = pairs[i][1];
- const char* position = FindLine(prefix, suffix, start);
- if (position == nullptr) {
- PrintLog(100, start);
- V8_Fatal(__FILE__, __LINE__, "Could not find log line: %s ... %s",
- prefix, suffix);
- }
- // Check that all string positions are in order.
- if (position <= last_position) {
- PrintLog(100, start);
- V8_Fatal(__FILE__, __LINE__,
- "Log statements not in expected order (prev=%p, current=%p): "
- "%s ... %s",
- reinterpret_cast<const void*>(last_position),
- reinterpret_cast<const void*>(position), prefix, suffix);
- }
- last_position = position;
+ return true;
+ }
+
+ std::unordered_set<uintptr_t> ExtractAllAddresses(std::string search_term,
+ size_t address_column) {
+ CHECK_GT(log_.size(), 0);
+ std::unordered_set<uintptr_t> result;
+ size_t start = 0;
+ while (true) {
+ start = IndexOfLine({search_term}, start);
+ if (start == std::string::npos) break;
+ std::vector<std::string> columns = Split(log_.at(start), ',');
+ CHECK_LT(address_column, columns.size());
+ uintptr_t address =
+ strtoll(columns.at(address_column).c_str(), nullptr, 16);
+ CHECK_GT(address, 0);
+ result.insert(address);
+ ++start; // Skip the found line.
}
+ return result;
}
+ void LogCodeObjects() { logger_->LogCodeObjects(); }
void LogCompiledFunctions() { logger_->LogCompiledFunctions(); }
void StringEvent(const char* name, const char* value) {
logger_->StringEvent(name, value);
}
- void ExtractAllAddresses(std::unordered_set<uintptr_t>* map,
- const char* prefix, int field_index) {
- // Make sure that StopLogging() has been called before.
- CHECK(log_.size());
- const char* current = log_.start();
- while (current != nullptr) {
- current = FindLine(prefix, nullptr, current);
- if (current == nullptr) return;
- // Find token number {index}.
- const char* previous;
- for (int i = 0; i <= field_index; i++) {
- previous = current;
- current = strchr(current + 1, ',');
- if (current == nullptr) break;
- // Skip the comma.
- current++;
- }
- if (current == nullptr) break;
- uintptr_t address = strtoll(previous, nullptr, 16);
- CHECK_LT(0, address);
- map->insert(address);
- }
- }
-
private:
FILE* StopLoggingGetTempFile() {
temp_file_ = logger_->TearDown();
@@ -257,7 +189,9 @@ class ScopedLoggerInitializer {
v8::HandleScope scope_;
v8::Local<v8::Context> env_;
Logger* logger_;
- i::Vector<const char> log_;
+
+ std::string raw_log_;
+ std::vector<std::string> log_;
DISALLOW_COPY_AND_ASSIGN(ScopedLoggerInitializer);
};
@@ -268,15 +202,20 @@ class TestCodeEventHandler : public v8::CodeEventHandler {
: v8::CodeEventHandler(isolate), isolate_(isolate) {}
size_t CountLines(std::string prefix, std::string suffix = std::string()) {
- if (!log_.length()) return 0;
-
- std::regex expression("(^|\\n)" + prefix + ".*" + suffix + "(?=\\n|$)");
-
- size_t match_count(std::distance(
- std::sregex_iterator(log_.begin(), log_.end(), expression),
- std::sregex_iterator()));
+ if (event_log_.empty()) return 0;
+
+ size_t match = 0;
+ for (const std::string& line : event_log_) {
+ size_t prefix_pos = line.find(prefix);
+ if (prefix_pos == std::string::npos) continue;
+ size_t suffix_pos = line.rfind(suffix);
+ if (suffix_pos == std::string::npos) continue;
+ if (suffix_pos != line.length() - suffix.length()) continue;
+ if (prefix_pos >= suffix_pos) continue;
+ match++;
+ }
- return match_count;
+ return match;
}
void Handle(v8::CodeEvent* code_event) override {
@@ -284,8 +223,7 @@ class TestCodeEventHandler : public v8::CodeEventHandler {
log_line += v8::CodeEvent::GetCodeEventTypeName(code_event->GetCodeType());
log_line += " ";
log_line += FormatName(code_event);
- log_line += "\n";
- log_ += log_line;
+ event_log_.push_back(log_line);
}
private:
@@ -294,7 +232,7 @@ class TestCodeEventHandler : public v8::CodeEventHandler {
if (name.empty()) {
v8::Local<v8::String> functionName = code_event->GetFunctionName();
std::string buffer(functionName->Utf8Length(isolate_) + 1, 0);
- functionName->WriteUtf8(&buffer[0],
+ functionName->WriteUtf8(isolate_, &buffer[0],
functionName->Utf8Length(isolate_) + 1);
// Sanitize name, removing unwanted \0 resulted from WriteUtf8
name = std::string(buffer.c_str());
@@ -303,195 +241,12 @@ class TestCodeEventHandler : public v8::CodeEventHandler {
return name;
}
- std::string log_;
+ std::vector<std::string> event_log_;
v8::Isolate* isolate_;
};
} // namespace
-TEST(FindLogLine) {
- const char* string =
- "prefix1, stuff, suffix1\n"
- "prefix2, stuff\n, suffix2\n"
- "prefix3suffix3\n"
- "prefix4 suffix4";
- const char* end = string + strlen(string);
- // Make sure the vector contains the terminating \0 character.
- CHECK(FindLogLine(string, end, "prefix1, stuff, suffix1"));
- CHECK(FindLogLine(string, end, "prefix1, stuff"));
- CHECK(FindLogLine(string, end, "prefix1"));
- CHECK(FindLogLine(string, end, "prefix1", "suffix1"));
- CHECK(FindLogLine(string, end, "prefix1", "suffix1"));
- CHECK(!FindLogLine(string, end, "prefix2", "suffix2"));
- CHECK(!FindLogLine(string, end, "prefix1", "suffix2"));
- CHECK(!FindLogLine(string, end, "prefix1", "suffix3"));
- CHECK(FindLogLine(string, end, "prefix3", "suffix3"));
- CHECK(FindLogLine(string, end, "prefix4", "suffix4"));
- CHECK(!FindLogLine(string, end, "prefix4", "suffix4XXXXXXXXXXXX"));
- CHECK(
- !FindLogLine(string, end, "prefix4XXXXXXXXXXXXXXXXXXXXXXxxx", "suffix4"));
- CHECK(!FindLogLine(string, end, "suffix", "suffix5XXXXXXXXXXXXXXXXXXXX"));
-}
-
-// BUG(913). Need to implement support for profiling multiple VM threads.
-#if 0
-
-namespace {
-
-class LoopingThread : public v8::internal::Thread {
- public:
- explicit LoopingThread(v8::internal::Isolate* isolate)
- : v8::internal::Thread(isolate),
- semaphore_(new v8::internal::Semaphore(0)),
- run_(true) {
- }
-
- virtual ~LoopingThread() { delete semaphore_; }
-
- void Run() {
- self_ = pthread_self();
- RunLoop();
- }
-
- void SendSigProf() { pthread_kill(self_, SIGPROF); }
-
- void Stop() { run_ = false; }
-
- bool WaitForRunning() { return semaphore_->Wait(1000000); }
-
- protected:
- bool IsRunning() { return run_; }
-
- virtual void RunLoop() = 0;
-
- void SetV8ThreadId() {
- v8_thread_id_ = v8::V8::GetCurrentThreadId();
- }
-
- void SignalRunning() { semaphore_->Signal(); }
-
- private:
- v8::internal::Semaphore* semaphore_;
- bool run_;
- pthread_t self_;
- int v8_thread_id_;
-};
-
-
-class LoopingJsThread : public LoopingThread {
- public:
- explicit LoopingJsThread(v8::internal::Isolate* isolate)
- : LoopingThread(isolate) { }
- void RunLoop() {
- v8::Locker locker;
- CHECK_NOT_NULL(CcTest::i_isolate());
- CHECK_GT(CcTest::i_isolate()->thread_manager()->CurrentId(), 0);
- SetV8ThreadId();
- while (IsRunning()) {
- v8::HandleScope scope;
- v8::Persistent<v8::Context> context = v8::Context::New();
- CHECK(!context.IsEmpty());
- {
- v8::Context::Scope context_scope(context);
- SignalRunning();
- CompileRun(
- "var j; for (var i=0; i<10000; ++i) { j = Math.sin(i); }");
- }
- context.Dispose();
- i::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(1));
- }
- }
-};
-
-
-class LoopingNonJsThread : public LoopingThread {
- public:
- explicit LoopingNonJsThread(v8::internal::Isolate* isolate)
- : LoopingThread(isolate) { }
- void RunLoop() {
- v8::Locker locker;
- v8::Unlocker unlocker;
- // Now thread has V8's id, but will not run VM code.
- CHECK_NOT_NULL(CcTest::i_isolate());
- CHECK_GT(CcTest::i_isolate()->thread_manager()->CurrentId(), 0);
- double i = 10;
- SignalRunning();
- while (IsRunning()) {
- i = std::sin(i);
- i::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(1));
- }
- }
-};
-
-
-class TestSampler : public v8::internal::Sampler {
- public:
- explicit TestSampler(v8::internal::Isolate* isolate)
- : Sampler(isolate, 0, true, true),
- semaphore_(new v8::internal::Semaphore(0)),
- was_sample_stack_called_(false) {
- }
-
- ~TestSampler() { delete semaphore_; }
-
- void SampleStack(v8::internal::TickSample*) {
- was_sample_stack_called_ = true;
- }
-
- void Tick(v8::internal::TickSample*) { semaphore_->Signal(); }
-
- bool WaitForTick() { return semaphore_->Wait(1000000); }
-
- void Reset() { was_sample_stack_called_ = false; }
-
- bool WasSampleStackCalled() { return was_sample_stack_called_; }
-
- private:
- v8::internal::Semaphore* semaphore_;
- bool was_sample_stack_called_;
-};
-
-
-} // namespace
-
-TEST(ProfMultipleThreads) {
- TestSampler* sampler = nullptr;
- {
- v8::Locker locker;
- sampler = new TestSampler(CcTest::i_isolate());
- sampler->Start();
- CHECK(sampler->IsActive());
- }
-
- LoopingJsThread jsThread(CcTest::i_isolate());
- jsThread.Start();
- LoopingNonJsThread nonJsThread(CcTest::i_isolate());
- nonJsThread.Start();
-
- CHECK(!sampler->WasSampleStackCalled());
- jsThread.WaitForRunning();
- jsThread.SendSigProf();
- CHECK(sampler->WaitForTick());
- CHECK(sampler->WasSampleStackCalled());
- sampler->Reset();
- CHECK(!sampler->WasSampleStackCalled());
- nonJsThread.WaitForRunning();
- nonJsThread.SendSigProf();
- CHECK(!sampler->WaitForTick());
- CHECK(!sampler->WasSampleStackCalled());
- sampler->Stop();
-
- jsThread.Stop();
- nonJsThread.Stop();
- jsThread.Join();
- nonJsThread.Join();
-
- delete sampler;
-}
-
-#endif // __linux__
-
-
// Test for issue http://crbug.com/23768 in Chromium.
// Heap can contain scripts with already disposed external sources.
// We need to verify that LogCompiledFunctions doesn't crash on them.
@@ -535,17 +290,15 @@ TEST(Issue23768) {
CcTest::i_isolate());
// This situation can happen if source was an external string disposed
// by its owner.
- i_source->set_resource(nullptr);
+ i_source->SetResource(CcTest::i_isolate(), nullptr);
// Must not crash.
CcTest::i_isolate()->logger()->LogCompiledFunctions();
}
-
static void ObjMethod1(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
-
TEST(LogCallbacks) {
SETUP_FLAGS();
v8::Isolate::CreateParams create_params;
@@ -572,21 +325,20 @@ TEST(LogCallbacks) {
CompileRun("Obj.prototype.method1.toString();");
logger.LogCompiledFunctions();
-
logger.StopLogging();
Address ObjMethod1_entry = reinterpret_cast<Address>(ObjMethod1);
#if USES_FUNCTION_DESCRIPTORS
ObjMethod1_entry = *FUNCTION_ENTRYPOINT_ADDRESS(ObjMethod1_entry);
#endif
- i::EmbeddedVector<char, 100> ref_data;
- i::SNPrintF(ref_data, ",0x%" V8PRIxPTR ",1,method1", ObjMethod1_entry);
- CHECK(logger.FindLine("code-creation,Callback,-2,", ref_data.start()));
+ i::EmbeddedVector<char, 100> suffix_buffer;
+ i::SNPrintF(suffix_buffer, ",0x%" V8PRIxPTR ",1,method1", ObjMethod1_entry);
+ CHECK(logger.ContainsLine(
+ {"code-creation,Callback,-2,", std::string(suffix_buffer.start())}));
}
isolate->Dispose();
}
-
static void Prop1Getter(v8::Local<v8::String> property,
const v8::PropertyCallbackInfo<v8::Value>& info) {
}
@@ -600,7 +352,6 @@ static void Prop2Getter(v8::Local<v8::String> property,
const v8::PropertyCallbackInfo<v8::Value>& info) {
}
-
TEST(LogAccessorCallbacks) {
SETUP_FLAGS();
v8::Isolate::CreateParams create_params;
@@ -627,8 +378,8 @@ TEST(LogAccessorCallbacks) {
EmbeddedVector<char, 100> prop1_getter_record;
i::SNPrintF(prop1_getter_record, ",0x%" V8PRIxPTR ",1,get prop1",
Prop1Getter_entry);
- CHECK(logger.FindLine("code-creation,Callback,-2,",
- prop1_getter_record.start()));
+ CHECK(logger.ContainsLine({"code-creation,Callback,-2,",
+ std::string(prop1_getter_record.start())}));
Address Prop1Setter_entry = reinterpret_cast<Address>(Prop1Setter);
#if USES_FUNCTION_DESCRIPTORS
@@ -637,8 +388,8 @@ TEST(LogAccessorCallbacks) {
EmbeddedVector<char, 100> prop1_setter_record;
i::SNPrintF(prop1_setter_record, ",0x%" V8PRIxPTR ",1,set prop1",
Prop1Setter_entry);
- CHECK(logger.FindLine("code-creation,Callback,-2,",
- prop1_setter_record.start()));
+ CHECK(logger.ContainsLine({"code-creation,Callback,-2,",
+ std::string(prop1_setter_record.start())}));
Address Prop2Getter_entry = reinterpret_cast<Address>(Prop2Getter);
#if USES_FUNCTION_DESCRIPTORS
@@ -647,8 +398,8 @@ TEST(LogAccessorCallbacks) {
EmbeddedVector<char, 100> prop2_getter_record;
i::SNPrintF(prop2_getter_record, ",0x%" V8PRIxPTR ",1,get prop2",
Prop2Getter_entry);
- CHECK(logger.FindLine("code-creation,Callback,-2,",
- prop2_getter_record.start()));
+ CHECK(logger.ContainsLine({"code-creation,Callback,-2,",
+ std::string(prop2_getter_record.start())}));
}
isolate->Dispose();
}
@@ -716,7 +467,7 @@ TEST(EquivalenceOfLoggingAndTraversal) {
v8::Local<v8::String> s = result->ToString(logger.env()).ToLocalChecked();
i::ScopedVector<char> data(s->Utf8Length(isolate) + 1);
CHECK(data.start());
- s->WriteUtf8(data.start());
+ s->WriteUtf8(isolate, data.start());
FATAL("%s\n", data.start());
}
}
@@ -733,11 +484,12 @@ TEST(LogVersion) {
ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
logger.StopLogging();
- i::EmbeddedVector<char, 100> ref_data;
- i::SNPrintF(ref_data, "%d,%d,%d,%d,%d", i::Version::GetMajor(),
+ i::EmbeddedVector<char, 100> line_buffer;
+ i::SNPrintF(line_buffer, "%d,%d,%d,%d,%d", i::Version::GetMajor(),
i::Version::GetMinor(), i::Version::GetBuild(),
i::Version::GetPatch(), i::Version::IsCandidate());
- CHECK(logger.FindLine("v8-version,", ref_data.start()));
+ CHECK(
+ logger.ContainsLine({"v8-version,", std::string(line_buffer.start())}));
}
isolate->Dispose();
}
@@ -810,7 +562,6 @@ TEST(LogAll) {
{
ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
- // Function that will
const char* source_text =
"function testAddFn(a,b) { return a + b };"
"let result;"
@@ -822,16 +573,16 @@ TEST(LogAll) {
logger.StopLogging();
// We should find at least one code-creation even for testAddFn();
- CHECK(logger.FindLine("api,v8::Context::New"));
- CHECK(logger.FindLine("timer-event-start", "V8.CompileCode"));
- CHECK(logger.FindLine("timer-event-end", "V8.CompileCode"));
- CHECK(logger.FindLine("code-creation,Script", ":1:1"));
- CHECK(logger.FindLine("api,v8::Script::Run"));
- CHECK(logger.FindLine("code-creation,LazyCompile,", "testAddFn"));
+ CHECK(logger.ContainsLine({"api,v8::Context::New"}));
+ CHECK(logger.ContainsLine({"timer-event-start", "V8.CompileCode"}));
+ CHECK(logger.ContainsLine({"timer-event-end", "V8.CompileCode"}));
+ CHECK(logger.ContainsLine({"code-creation,Script", ":1:1"}));
+ CHECK(logger.ContainsLine({"api,v8::Script::Run"}));
+ CHECK(logger.ContainsLine({"code-creation,LazyCompile,", "testAddFn"}));
if (i::FLAG_opt && !i::FLAG_always_opt) {
- CHECK(logger.FindLine("code-deopt,", "soft"));
- CHECK(logger.FindLine("timer-event-start", "V8.DeoptimizeCode"));
- CHECK(logger.FindLine("timer-event-end", "V8.DeoptimizeCode"));
+ CHECK(logger.ContainsLine({"code-deopt,", "soft"}));
+ CHECK(logger.ContainsLine({"timer-event-start", "V8.DeoptimizeCode"}));
+ CHECK(logger.ContainsLine({"timer-event-end", "V8.DeoptimizeCode"}));
}
}
isolate->Dispose();
@@ -854,8 +605,8 @@ TEST(LogInterpretedFramesNativeStack) {
logger.StopLogging();
- CHECK(logger.FindLine("InterpretedFunction",
- "testLogInterpretedFramesNativeStack"));
+ CHECK(logger.ContainsLine(
+ {"InterpretedFunction", "testLogInterpretedFramesNativeStack"}));
}
isolate->Dispose();
}
@@ -978,9 +729,9 @@ TEST(TraceMaps) {
logger.StopLogging();
// Mostly superficial checks.
- CHECK(logger.FindLine("map,InitialMap", ",0x"));
- CHECK(logger.FindLine("map,Transition", ",0x"));
- CHECK(logger.FindLine("map-details", ",0x"));
+ CHECK(logger.ContainsLine({"map,InitialMap", ",0x"}));
+ CHECK(logger.ContainsLine({"map,Transition", ",0x"}));
+ CHECK(logger.ContainsLine({"map-details", ",0x"}));
}
i::FLAG_trace_maps = false;
isolate->Dispose();
@@ -996,9 +747,8 @@ TEST(LogMaps) {
{
ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
logger.StopLogging();
- // Extract all the map-detail entry addresses from the log.
- std::unordered_set<uintptr_t> map_addresses;
- logger.ExtractAllAddresses(&map_addresses, "map-details", 2);
+ std::unordered_set<uintptr_t> map_addresses =
+ logger.ExtractAllAddresses("map-details", 2);
i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
i::HeapIterator iterator(heap);
i::DisallowHeapAllocation no_gc;
@@ -1011,15 +761,12 @@ TEST(LogMaps) {
if (!obj->IsMap()) continue;
uintptr_t address = reinterpret_cast<uintptr_t>(obj);
if (map_addresses.find(address) != map_addresses.end()) continue;
- logger.PrintLog(200);
i::Map::cast(obj)->Print();
V8_Fatal(__FILE__, __LINE__,
"Map (%p, #%zu) was not logged during startup with --trace-maps!"
- "\n# Expected Log Line: map_details, ... %p"
- "\n# Use logger::PrintLog() for more details.",
+ "\n# Expected Log Line: map_details, ... %p",
reinterpret_cast<void*>(obj), i, reinterpret_cast<void*>(obj));
}
- logger.PrintLog(200);
}
i::FLAG_log_function_events = false;
isolate->Dispose();
@@ -1045,14 +792,12 @@ TEST(ConsoleTimeEvents) {
logger.StopLogging();
- const char* pairs[][2] = {{"timer-event-start,default,", nullptr},
- {"timer-event-end,default,", nullptr},
- {"timer-event,default,", nullptr},
- {"timer-event-start,timerEvent1,", nullptr},
- {"timer-event-end,timerEvent1,", nullptr},
- {"timer-event,timerEvent2,", nullptr},
- {"timer-event,timerEvent3,", nullptr}};
- logger.FindLogLines(pairs, arraysize(pairs));
+ std::vector<std::vector<std::string>> lines = {
+ {"timer-event-start,default,"}, {"timer-event-end,default,"},
+ {"timer-event,default,"}, {"timer-event-start,timerEvent1,"},
+ {"timer-event-end,timerEvent1,"}, {"timer-event,timerEvent2,"},
+ {"timer-event,timerEvent3,"}};
+ CHECK(logger.ContainsLinesInOrder(lines));
}
isolate->Dispose();
@@ -1096,13 +841,13 @@ TEST(LogFunctionEvents) {
logger.StopLogging();
// Ignore all the log entries that happened before warmup
- const char* start =
- logger.FindLine("function,first-execution", "warmUpEndMarkerFunction");
- CHECK_NOT_NULL(start);
- const char* pairs[][2] = {
+ size_t start = logger.IndexOfLine(
+ {"function,first-execution", "warmUpEndMarkerFunction"});
+ CHECK(start != std::string::npos);
+ std::vector<std::vector<std::string>> lines = {
// Create a new script
- {"script,create", nullptr},
- {"script-details", nullptr},
+ {"script,create"},
+ {"script-details"},
// Step 1: parsing top-level script, preparsing functions
{"function,preparse-", ",lazyNotExecutedFunction"},
// Missing name for preparsing lazyInnerFunction
@@ -1113,11 +858,11 @@ TEST(LogFunctionEvents) {
// Missing name for inner preparsing of Foo.foo
// {"function,preparse-", nullptr},
// Missing name for top-level script.
- {"function,parse-script,", nullptr},
+ {"function,parse-script,"},
// Step 2: compiling top-level script and eager functions
// - Compiling script without name.
- {"function,compile,", nullptr},
+ {"function,compile,"},
{"function,compile,", ",eagerFunction"},
// Step 3: start executing script
@@ -1141,8 +886,38 @@ TEST(LogFunctionEvents) {
{"function,compile-lazy,", ",Foo.foo"},
{"function,first-execution,", ",Foo.foo"},
};
- logger.FindLogLines(pairs, arraysize(pairs), start);
+ CHECK(logger.ContainsLinesInOrder(lines, start));
}
i::FLAG_log_function_events = false;
isolate->Dispose();
}
+
+TEST(BuiltinsNotLoggedAsLazyCompile) {
+ SETUP_FLAGS();
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ {
+ ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
+
+ logger.LogCodeObjects();
+ logger.LogCompiledFunctions();
+ logger.StopLogging();
+
+ i::Handle<i::Code> builtin = logger.i_isolate()->builtins()->builtin_handle(
+ i::Builtins::kBooleanConstructor);
+ i::EmbeddedVector<char, 100> buffer;
+
+ // Should only be logged as "Builtin" with a name, never as "LazyCompile".
+ i::SNPrintF(buffer, ",0x%" V8PRIxPTR ",%d,BooleanConstructor",
+ builtin->InstructionStart(), builtin->InstructionSize());
+ CHECK(logger.ContainsLine(
+ {"code-creation,Builtin,3,", std::string(buffer.start())}));
+
+ i::SNPrintF(buffer, ",0x%" V8PRIxPTR ",%d,", builtin->InstructionStart(),
+ builtin->InstructionSize());
+ CHECK(!logger.ContainsLine(
+ {"code-creation,LazyCompile,3,", std::string(buffer.start())}));
+ }
+ isolate->Dispose();
+}
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index d442903ea0..3f115af416 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -33,6 +33,7 @@
#include "src/simulator.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
+#include "test/common/assembler-tester.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index accee77f34..18404d6629 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -28,7 +28,7 @@
#include <stdlib.h>
#include <iostream> // NOLINT(readability/streams)
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/base/utils/random-number-generator.h"
#include "src/macro-assembler.h"
#include "src/mips/macro-assembler-mips.h"
@@ -236,7 +236,7 @@ TEST(jump_tables5) {
{
__ BlockTrampolinePoolFor(kNumCases + 6 + 1);
PredictableCodeSizeScope predictable(
- masm, kNumCases * kPointerSize + ((6 + 1) * Assembler::kInstrSize));
+ masm, kNumCases * kPointerSize + ((6 + 1) * kInstrSize));
__ addiupc(at, 6 + 1);
__ Lsa(at, at, a0, 2);
@@ -294,7 +294,6 @@ TEST(jump_tables6) {
const int kSwitchTableCases = 40;
- const int kInstrSize = Assembler::kInstrSize;
const int kMaxBranchOffset = Assembler::kMaxBranchOffset;
const int kTrampolineSlotsSize = Assembler::kTrampolineSlotsSize;
const int kSwitchTablePrologueSize = MacroAssembler::kSwitchTablePrologueSize;
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index f982e869fe..9a6e319363 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -309,7 +309,7 @@ TEST(jump_tables5) {
{
__ BlockTrampolinePoolFor(kNumCases * 2 + 6 + 1);
PredictableCodeSizeScope predictable(
- masm, kNumCases * kPointerSize + ((6 + 1) * Assembler::kInstrSize));
+ masm, kNumCases * kPointerSize + ((6 + 1) * kInstrSize));
__ addiupc(at, 6 + 1);
__ Dlsa(at, at, a0, 3);
@@ -368,7 +368,6 @@ TEST(jump_tables6) {
const int kSwitchTableCases = 40;
- const int kInstrSize = Assembler::kInstrSize;
const int kMaxBranchOffset = Assembler::kMaxBranchOffset;
const int kTrampolineSlotsSize = Assembler::kTrampolineSlotsSize;
const int kSwitchTablePrologueSize = MacroAssembler::kSwitchTablePrologueSize;
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 54924def87..49d57aed21 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -35,6 +35,7 @@
#include "src/objects-inl.h"
#include "src/simulator.h"
#include "test/cctest/cctest.h"
+#include "test/common/assembler-tester.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/test-object.cc b/deps/v8/test/cctest/test-object.cc
index 4a2dccae6d..3f65691141 100644
--- a/deps/v8/test/cctest/test-object.cc
+++ b/deps/v8/test/cctest/test-object.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/handles-inl.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index b48cc493c7..72e3711405 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -33,7 +33,7 @@
#include "src/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
#include "src/compiler.h"
@@ -92,16 +92,16 @@ TEST(ScanKeywords) {
CHECK(static_cast<int>(sizeof(buffer)) >= length);
{
auto stream = i::ScannerStream::ForTesting(keyword, length);
- i::Scanner scanner(&unicode_cache);
- scanner.Initialize(stream.get(), false);
+ i::Scanner scanner(&unicode_cache, stream.get(), false);
+ scanner.Initialize();
CHECK_EQ(key_token.token, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
}
// Removing characters will make keyword matching fail.
{
auto stream = i::ScannerStream::ForTesting(keyword, length - 1);
- i::Scanner scanner(&unicode_cache);
- scanner.Initialize(stream.get(), false);
+ i::Scanner scanner(&unicode_cache, stream.get(), false);
+ scanner.Initialize();
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
}
@@ -111,8 +111,8 @@ TEST(ScanKeywords) {
i::MemMove(buffer, keyword, length);
buffer[length] = chars_to_append[j];
auto stream = i::ScannerStream::ForTesting(buffer, length + 1);
- i::Scanner scanner(&unicode_cache);
- scanner.Initialize(stream.get(), false);
+ i::Scanner scanner(&unicode_cache, stream.get(), false);
+ scanner.Initialize();
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
}
@@ -121,8 +121,8 @@ TEST(ScanKeywords) {
i::MemMove(buffer, keyword, length);
buffer[length - 1] = '_';
auto stream = i::ScannerStream::ForTesting(buffer, length);
- i::Scanner scanner(&unicode_cache);
- scanner.Initialize(stream.get(), false);
+ i::Scanner scanner(&unicode_cache, stream.get(), false);
+ scanner.Initialize();
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
}
@@ -188,8 +188,8 @@ TEST(ScanHTMLEndComments) {
for (int i = 0; tests[i]; i++) {
const char* source = tests[i];
auto stream = i::ScannerStream::ForTesting(source);
- i::Scanner scanner(i_isolate->unicode_cache());
- scanner.Initialize(stream.get(), false);
+ i::Scanner scanner(i_isolate->unicode_cache(), stream.get(), false);
+ scanner.Initialize();
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(&zone,
i_isolate->ast_string_constants(),
@@ -207,8 +207,8 @@ TEST(ScanHTMLEndComments) {
for (int i = 0; fail_tests[i]; i++) {
const char* source = fail_tests[i];
auto stream = i::ScannerStream::ForTesting(source);
- i::Scanner scanner(i_isolate->unicode_cache());
- scanner.Initialize(stream.get(), false);
+ i::Scanner scanner(i_isolate->unicode_cache(), stream.get(), false);
+ scanner.Initialize();
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(&zone,
i_isolate->ast_string_constants(),
@@ -232,8 +232,8 @@ TEST(ScanHtmlComments) {
// Disallow HTML comments.
{
auto stream = i::ScannerStream::ForTesting(src);
- i::Scanner scanner(&unicode_cache);
- scanner.Initialize(stream.get(), true);
+ i::Scanner scanner(&unicode_cache, stream.get(), true);
+ scanner.Initialize();
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::ILLEGAL, scanner.Next());
}
@@ -241,8 +241,8 @@ TEST(ScanHtmlComments) {
// Skip HTML comments:
{
auto stream = i::ScannerStream::ForTesting(src);
- i::Scanner scanner(&unicode_cache);
- scanner.Initialize(stream.get(), false);
+ i::Scanner scanner(&unicode_cache, stream.get(), false);
+ scanner.Initialize();
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
}
@@ -280,8 +280,8 @@ TEST(StandAlonePreParser) {
uintptr_t stack_limit = i_isolate->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
auto stream = i::ScannerStream::ForTesting(programs[i]);
- i::Scanner scanner(i_isolate->unicode_cache());
- scanner.Initialize(stream.get(), false);
+ i::Scanner scanner(i_isolate->unicode_cache(), stream.get(), false);
+ scanner.Initialize();
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(&zone,
@@ -313,8 +313,8 @@ TEST(StandAlonePreParserNoNatives) {
uintptr_t stack_limit = isolate->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
auto stream = i::ScannerStream::ForTesting(programs[i]);
- i::Scanner scanner(isolate->unicode_cache());
- scanner.Initialize(stream.get(), false);
+ i::Scanner scanner(isolate->unicode_cache(), stream.get(), false);
+ scanner.Initialize();
// Preparser defaults to disallowing natives syntax.
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
@@ -348,8 +348,8 @@ TEST(RegressChromium62639) {
// failed in debug mode, and sometimes crashed in release mode.
auto stream = i::ScannerStream::ForTesting(program);
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(stream.get(), false);
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), stream.get(), false);
+ scanner.Initialize();
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->ast_string_constants(),
@@ -381,8 +381,8 @@ TEST(PreParseOverflow) {
uintptr_t stack_limit = isolate->stack_guard()->real_climit();
auto stream = i::ScannerStream::ForTesting(program.get(), kProgramSize);
- i::Scanner scanner(isolate->unicode_cache());
- scanner.Initialize(stream.get(), false);
+ i::Scanner scanner(isolate->unicode_cache(), stream.get(), false);
+ scanner.Initialize();
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
@@ -396,13 +396,12 @@ TEST(PreParseOverflow) {
CHECK_EQ(i::PreParser::kPreParseStackOverflow, result);
}
-
void TestStreamScanner(i::Utf16CharacterStream* stream,
i::Token::Value* expected_tokens,
int skip_pos = 0, // Zero means not skipping.
int skip_to = 0) {
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(stream, false);
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), stream, false);
+ scanner.Initialize();
int i = 0;
do {
@@ -479,8 +478,8 @@ TEST(StreamScanner) {
void TestScanRegExp(const char* re_source, const char* expected) {
auto stream = i::ScannerStream::ForTesting(re_source);
i::HandleScope scope(CcTest::i_isolate());
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(stream.get(), false);
+ i::Scanner scanner(CcTest::i_isolate()->unicode_cache(), stream.get(), false);
+ scanner.Initialize();
i::Token::Value start = scanner.peek();
CHECK(start == i::Token::DIV || start == i::Token::ASSIGN_DIV);
@@ -1172,9 +1171,9 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
// Preparse the data.
i::PendingCompilationErrorHandler pending_error_handler;
if (test_preparser) {
- i::Scanner scanner(isolate->unicode_cache());
std::unique_ptr<i::Utf16CharacterStream> stream(
i::ScannerStream::For(isolate, source));
+ i::Scanner scanner(isolate->unicode_cache(), stream.get(), is_module);
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->ast_string_constants(),
@@ -1184,7 +1183,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
isolate->counters()->runtime_call_stats(),
isolate->logger(), -1, is_module);
SetParserFlags(&preparser, flags);
- scanner.Initialize(stream.get(), is_module);
+ scanner.Initialize();
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
}
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index b53bf148e6..c4ad1babc5 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -28,7 +28,7 @@
// Tests of profiles generator and utilities.
#include "include/v8-profiler.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/objects-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index bcabebf639..c65714a930 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -32,7 +32,7 @@
#include "include/v8.h"
#include "src/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/ast/ast.h"
#include "src/char-predicates-inl.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/test/cctest/test-roots.cc b/deps/v8/test/cctest/test-roots.cc
index e6220be889..f99b9df399 100644
--- a/deps/v8/test/cctest/test-roots.cc
+++ b/deps/v8/test/cctest/test-roots.cc
@@ -74,16 +74,23 @@ TEST(TestAllocationSiteMaps) {
#undef CHECK_IN_RO_SPACE
namespace {
-bool IsInitiallyMutable(Heap* heap, Object* object) {
+bool IsInitiallyMutable(Factory* factory, Address object_address) {
// Entries in this list are in STRONG_MUTABLE_ROOT_LIST, but may initially point
// to objects that in RO_SPACE.
-#define INITIALLY_READ_ONLY_ROOT_LIST(V) \
- V(materialized_objects) \
- V(retaining_path_targets) \
- V(retained_maps)
+#define INITIALLY_READ_ONLY_ROOT_LIST(V) \
+ V(builtins_constants_table) \
+ V(detached_contexts) \
+ V(feedback_vectors_for_profiling_tools) \
+ V(materialized_objects) \
+ V(microtask_queue) \
+ V(noscript_shared_function_infos) \
+ V(retained_maps) \
+ V(retaining_path_targets) \
+ V(serialized_global_proxy_sizes) \
+ V(serialized_objects)
#define TEST_CAN_BE_READ_ONLY(name) \
- if (heap->name() == object) return false;
+ if (factory->name().address() == object_address) return false;
INITIALLY_READ_ONLY_ROOT_LIST(TEST_CAN_BE_READ_ONLY)
#undef TEST_CAN_BE_READ_ONLY
#undef INITIALLY_READ_ONLY_ROOT_LIST
@@ -91,15 +98,21 @@ bool IsInitiallyMutable(Heap* heap, Object* object) {
}
} // namespace
-#define CHECK_NOT_IN_RO_SPACE(name) \
- Object* name = heap->name(); \
- if (name->IsHeapObject() && IsInitiallyMutable(heap, name)) \
- CHECK_NE(RO_SPACE, GetSpaceFromObject(name));
+// The CHECK_EQ line is there just to ensure that the root is publicly
+// accessible from Heap, but ultimately the factory is used as it provides
+// handles that have the address in the root table.
+#define CHECK_NOT_IN_RO_SPACE(name) \
+ Handle<Object> name = factory->name(); \
+ CHECK_EQ(*name, heap->name()); \
+ if (name->IsHeapObject() && IsInitiallyMutable(factory, name.address())) \
+ CHECK_NE(RO_SPACE, \
+ GetSpaceFromObject(reinterpret_cast<HeapObject*>(*name)));
// The following tests check that all the roots accessible via public Heap
// accessors are not in RO_SPACE with the exception of the objects listed in
// INITIALLY_READ_ONLY_ROOT_LIST.
TEST(TestHeapRootsNotReadOnly) {
+ Factory* factory = CcTest::i_isolate()->factory();
Heap* heap = CcTest::i_isolate()->heap();
#define TEST_ROOT(type, name, camel_name) CHECK_NOT_IN_RO_SPACE(name)
@@ -108,6 +121,7 @@ TEST(TestHeapRootsNotReadOnly) {
}
TEST(TestAccessorInfosNotReadOnly) {
+ Factory* factory = CcTest::i_isolate()->factory();
Heap* heap = CcTest::i_isolate()->heap();
#define TEST_ROOT(name, AccessorName) CHECK_NOT_IN_RO_SPACE(name##_accessor)
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 8a2fe60b11..d3fd665a66 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -31,7 +31,7 @@
#include "src/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/assembler-inl.h"
#include "src/bootstrapper.h"
#include "src/compilation-cache.h"
@@ -40,6 +40,8 @@
#include "src/heap/spaces.h"
#include "src/macro-assembler-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/builtin-deserializer.h"
#include "src/snapshot/builtin-serializer.h"
@@ -372,7 +374,6 @@ UNINITIALIZED_TEST(StartupSerializerRootMapDependencies) {
// - NullValue
// - Internalized one byte string
// - Map for Internalized one byte string
- // - WeakCell
// - TheHoleValue
// - HeapNumber
// HeapNumber objects require kDoubleUnaligned on 32-bit
@@ -381,7 +382,6 @@ UNINITIALIZED_TEST(StartupSerializerRootMapDependencies) {
v8::internal::Handle<Map> map(
ReadOnlyRoots(internal_isolate).one_byte_internalized_string_map(),
internal_isolate);
- Map::WeakCellForMap(internal_isolate, map);
// Need to avoid DCHECKs inside SnapshotCreator.
snapshot_creator.SetDefaultContext(v8::Context::New(isolate));
}
@@ -1365,9 +1365,9 @@ static Handle<SharedFunctionInfo> CompileScript(
Isolate* isolate, Handle<String> source, Handle<String> name,
ScriptData* cached_data, v8::ScriptCompiler::CompileOptions options) {
return Compiler::GetSharedFunctionInfoForScript(
- source, Compiler::ScriptDetails(name), v8::ScriptOriginOptions(),
- nullptr, cached_data, options, ScriptCompiler::kNoCacheNoReason,
- NOT_NATIVES_CODE)
+ isolate, source, Compiler::ScriptDetails(name),
+ v8::ScriptOriginOptions(), nullptr, cached_data, options,
+ ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE)
.ToHandleChecked();
}
@@ -1376,9 +1376,9 @@ static Handle<SharedFunctionInfo> CompileScriptAndProduceCache(
ScriptData** script_data, v8::ScriptCompiler::CompileOptions options) {
Handle<SharedFunctionInfo> sfi =
Compiler::GetSharedFunctionInfoForScript(
- source, Compiler::ScriptDetails(name), v8::ScriptOriginOptions(),
- nullptr, nullptr, options, ScriptCompiler::kNoCacheNoReason,
- NOT_NATIVES_CODE)
+ isolate, source, Compiler::ScriptDetails(name),
+ v8::ScriptOriginOptions(), nullptr, nullptr, options,
+ ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE)
.ToHandleChecked();
std::unique_ptr<ScriptCompiler::CachedData> cached_data(
ScriptCompiler::CreateCodeCache(ToApiHandle<UnboundScript>(sfi)));
@@ -1440,6 +1440,13 @@ void TestCodeSerializerOnePlusOneImpl() {
TEST(CodeSerializerOnePlusOne) { TestCodeSerializerOnePlusOneImpl(); }
+TEST(CodeSerializerOnePlusOneWithDebugger) {
+ v8::HandleScope scope(CcTest::isolate());
+ static v8::debug::DebugDelegate dummy_delegate;
+ v8::debug::SetDebugDelegate(CcTest::isolate(), &dummy_delegate);
+ TestCodeSerializerOnePlusOneImpl();
+}
+
TEST(CodeSerializerOnePlusOne1) {
FLAG_serialization_chunk_size = 1;
TestCodeSerializerOnePlusOneImpl();
@@ -1903,9 +1910,9 @@ TEST(CodeSerializerExternalString) {
// This avoids the GC from trying to free stack allocated resources.
i::Handle<i::ExternalOneByteString>::cast(one_byte_string)
- ->set_resource(nullptr);
+ ->SetResource(isolate, nullptr);
i::Handle<i::ExternalTwoByteString>::cast(two_byte_string)
- ->set_resource(nullptr);
+ ->SetResource(isolate, nullptr);
delete cache;
}
@@ -1963,7 +1970,8 @@ TEST(CodeSerializerLargeExternalString) {
CHECK_EQ(42.0, copy_result->Number());
// This avoids the GC from trying to free stack allocated resources.
- i::Handle<i::ExternalOneByteString>::cast(name)->set_resource(nullptr);
+ i::Handle<i::ExternalOneByteString>::cast(name)->SetResource(isolate,
+ nullptr);
delete cache;
string.Dispose();
}
@@ -2014,7 +2022,8 @@ TEST(CodeSerializerExternalScriptName) {
CHECK_EQ(10.0, copy_result->Number());
// This avoids the GC from trying to free stack allocated resources.
- i::Handle<i::ExternalOneByteString>::cast(name)->set_resource(nullptr);
+ i::Handle<i::ExternalOneByteString>::cast(name)->SetResource(isolate,
+ nullptr);
delete cache;
}
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index d5ef8af652..8aa621b1c1 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -34,7 +34,7 @@
#include "src/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/heap/factory.h"
#include "src/messages.h"
#include "src/objects-inl.h"
@@ -951,7 +951,8 @@ TEST(Utf8Conversion) {
for (int j = 0; j < 11; j++)
buffer[j] = kNoChar;
int chars_written;
- int written = mixed->WriteUtf8(buffer, i, &chars_written);
+ int written =
+ mixed->WriteUtf8(CcTest::isolate(), buffer, i, &chars_written);
CHECK_EQ(lengths[i], written);
CHECK_EQ(char_lengths[i], chars_written);
// Check that the contents are correct
@@ -1095,7 +1096,7 @@ TEST(JSONStringifySliceMadeExternal) {
int length = underlying->Length();
uc16* two_byte = NewArray<uc16>(length + 1);
- underlying->Write(two_byte);
+ underlying->Write(CcTest::isolate(), two_byte);
Resource* resource = new Resource(two_byte, length);
CHECK(underlying->MakeExternal(resource));
CHECK(v8::Utils::OpenHandle(*slice)->IsSlicedString());
@@ -1245,7 +1246,8 @@ TEST(SliceFromExternal) {
CHECK(SlicedString::cast(*slice)->parent()->IsExternalString());
CHECK(slice->IsFlat());
// This avoids the GC from trying to free stack allocated resources.
- i::Handle<i::ExternalOneByteString>::cast(string)->set_resource(nullptr);
+ i::Handle<i::ExternalOneByteString>::cast(string)->SetResource(
+ CcTest::i_isolate(), nullptr);
}
@@ -1526,8 +1528,9 @@ TEST(FormatMessage) {
Handle<String> arg1 = isolate->factory()->NewStringFromAsciiChecked("arg1");
Handle<String> arg2 = isolate->factory()->NewStringFromAsciiChecked("arg2");
Handle<String> result =
- MessageTemplate::FormatMessage(MessageTemplate::kPropertyNotFunction,
- arg0, arg1, arg2).ToHandleChecked();
+ MessageTemplate::FormatMessage(
+ isolate, MessageTemplate::kPropertyNotFunction, arg0, arg1, arg2)
+ .ToHandleChecked();
Handle<String> expected = isolate->factory()->NewStringFromAsciiChecked(
"'arg0' returned for property 'arg1' of object 'arg2' is not a function");
CHECK(String::Equals(isolate, result, expected));
@@ -1661,6 +1664,35 @@ TEST(HashArrayIndexStrings) {
isolate->factory()->one_string()->Hash());
}
+TEST(StringEquals) {
+ v8::V8::Initialize();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ auto foo_str =
+ v8::String::NewFromUtf8(isolate, "foo", v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ auto bar_str =
+ v8::String::NewFromUtf8(isolate, "bar", v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ auto foo_str2 =
+ v8::String::NewFromUtf8(isolate, "foo", v8::NewStringType::kNormal)
+ .ToLocalChecked();
+
+ uint16_t* two_byte_source = AsciiToTwoByteString("foo");
+ auto foo_two_byte_str =
+ v8::String::NewFromTwoByte(isolate, two_byte_source,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ i::DeleteArray(two_byte_source);
+
+ CHECK(foo_str->StringEquals(foo_str));
+ CHECK(!foo_str->StringEquals(bar_str));
+ CHECK(foo_str->StringEquals(foo_str2));
+ CHECK(foo_str->StringEquals(foo_two_byte_str));
+ CHECK(!bar_str->StringEquals(foo_str2));
+}
+
} // namespace test_strings
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index ddfc262807..902295447b 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/v8.h"
diff --git a/deps/v8/test/cctest/test-trace-event.cc b/deps/v8/test/cctest/test-trace-event.cc
index 47545af37f..10b837aaed 100644
--- a/deps/v8/test/cctest/test-trace-event.cc
+++ b/deps/v8/test/cctest/test-trace-event.cc
@@ -289,6 +289,7 @@ TEST(BuiltinsIsTraceCategoryEnabled) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
LocalContext env;
v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
@@ -307,7 +308,7 @@ TEST(BuiltinsIsTraceCategoryEnabled) {
.ToLocalChecked()
.As<v8::Boolean>();
- CHECK(result->BooleanValue());
+ CHECK(result->BooleanValue(context).ToChecked());
}
{
@@ -317,7 +318,7 @@ TEST(BuiltinsIsTraceCategoryEnabled) {
.ToLocalChecked()
.As<v8::Boolean>();
- CHECK(!result->BooleanValue());
+ CHECK(!result->BooleanValue(context).ToChecked());
}
{
@@ -327,7 +328,7 @@ TEST(BuiltinsIsTraceCategoryEnabled) {
.ToLocalChecked()
.As<v8::Boolean>();
- CHECK(result->BooleanValue());
+ CHECK(result->BooleanValue(context).ToChecked());
}
}
@@ -337,6 +338,7 @@ TEST(BuiltinsTrace) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
LocalContext env;
v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
@@ -360,7 +362,7 @@ TEST(BuiltinsTrace) {
.ToLocalChecked()
.As<v8::Boolean>();
- CHECK(!result->BooleanValue());
+ CHECK(!result->BooleanValue(context).ToChecked());
CHECK_EQ(0, GET_TRACE_OBJECTS_LIST->size());
}
@@ -368,7 +370,6 @@ TEST(BuiltinsTrace) {
{
v8::Local<v8::String> category = v8_str("v8-cat");
v8::Local<v8::String> name = v8_str("name");
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
v8::Local<v8::Object> data = v8::Object::New(isolate);
data->Set(context, v8_str("foo"), v8_str("bar")).FromJust();
v8::Local<v8::Value> argv[] = {
@@ -380,7 +381,7 @@ TEST(BuiltinsTrace) {
.ToLocalChecked()
.As<v8::Boolean>();
- CHECK(result->BooleanValue());
+ CHECK(result->BooleanValue(context).ToChecked());
CHECK_EQ(1, GET_TRACE_OBJECTS_LIST->size());
CHECK_EQ(123, GET_TRACE_OBJECT(0)->id);
@@ -393,7 +394,6 @@ TEST(BuiltinsTrace) {
{
v8::Local<v8::String> category = v8_str("v8-cat\u20ac");
v8::Local<v8::String> name = v8_str("name\u20ac");
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
v8::Local<v8::Object> data = v8::Object::New(isolate);
data->Set(context, v8_str("foo"), v8_str("bar")).FromJust();
v8::Local<v8::Value> argv[] = {
@@ -405,7 +405,7 @@ TEST(BuiltinsTrace) {
.ToLocalChecked()
.As<v8::Boolean>();
- CHECK(result->BooleanValue());
+ CHECK(result->BooleanValue(context).ToChecked());
CHECK_EQ(2, GET_TRACE_OBJECTS_LIST->size());
CHECK_EQ(123, GET_TRACE_OBJECT(1)->id);
diff --git a/deps/v8/test/cctest/test-typedarrays.cc b/deps/v8/test/cctest/test-typedarrays.cc
index bab6e0f2f6..b574fdd94a 100644
--- a/deps/v8/test/cctest/test-typedarrays.cc
+++ b/deps/v8/test/cctest/test-typedarrays.cc
@@ -7,7 +7,6 @@
#include "src/v8.h"
#include "test/cctest/cctest.h"
-#include "src/api.h"
#include "src/heap/heap.h"
#include "src/objects-inl.h"
#include "src/objects.h"
@@ -92,7 +91,7 @@ void TestSpeciesProtector(char* code,
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
std::string typed_array_constructors[] = {
-#define TYPED_ARRAY_CTOR(Type, type, TYPE, ctype, size) #Type "Array",
+#define TYPED_ARRAY_CTOR(Type, type, TYPE, ctype) #Type "Array",
TYPED_ARRAYS(TYPED_ARRAY_CTOR)
#undef TYPED_ARRAY_CTOR
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index 831593f17b..c8d5e37fa2 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -33,12 +33,14 @@ struct Tests {
Isolate* isolate;
HandleScope scope;
+ CanonicalHandleScope canonical;
Zone zone;
Types T;
Tests()
: isolate(CcTest::InitIsolateOnce()),
scope(isolate),
+ canonical(isolate),
zone(isolate->allocator(), ZONE_NAME),
T(&zone, isolate, isolate->random_number_generator()) {}
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index 2d63a87fc2..421407180c 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -8,7 +8,7 @@
#include "src/v8.h"
#include "src/accessors.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/field-type.h"
@@ -124,7 +124,7 @@ static Handle<DescriptorArray> CreateDescriptorArray(Isolate* isolate,
d = Descriptor::AccessorConstant(name, info, NONE);
} else {
- d = Descriptor::DataField(name, next_field_offset, NONE,
+ d = Descriptor::DataField(isolate, name, next_field_offset, NONE,
representations[kind]);
}
descriptors->Append(&d);
@@ -658,7 +658,7 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppend(
d = Descriptor::AccessorConstant(name, info, NONE);
} else {
- d = Descriptor::DataField(name, next_field_offset, NONE,
+ d = Descriptor::DataField(isolate, name, next_field_offset, NONE,
representations[kind]);
}
PropertyDetails details = d.GetDetails();
diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc
index c1c15873e9..1f5c7c6a70 100644
--- a/deps/v8/test/cctest/test-utils.cc
+++ b/deps/v8/test/cctest/test-utils.cc
@@ -31,6 +31,7 @@
#include "src/v8.h"
+#include "src/api-inl.h"
#include "src/base/platform/platform.h"
#include "src/collector.h"
#include "src/conversions.h"
diff --git a/deps/v8/test/cctest/torque/test-torque.cc b/deps/v8/test/cctest/torque/test-torque.cc
index 11a683f85e..439fe043b8 100644
--- a/deps/v8/test/cctest/torque/test-torque.cc
+++ b/deps/v8/test/cctest/torque/test-torque.cc
@@ -4,7 +4,6 @@
#include <cmath>
-#include "src/api.h"
#include "src/base/utils/random-number-generator.h"
#include "src/builtins/builtins-promise-gen.h"
#include "src/builtins/builtins-string-gen.h"
@@ -224,6 +223,42 @@ TEST(TestLocalConstBindings) {
ft.Call();
}
+TEST(TestForLoop) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ CodeAssemblerTester asm_tester(isolate, 0);
+ TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ {
+ m.TestForLoop();
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
+TEST(TestTypeswitch) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ CodeAssemblerTester asm_tester(isolate, 0);
+ TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ {
+ m.TestTypeswitch();
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
+TEST(TestGenericOverload) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ CodeAssemblerTester asm_tester(isolate, 0);
+ TestBuiltinsFromDSLAssembler m(asm_tester.state());
+ {
+ m.TestGenericOverload();
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/types-fuzz.h b/deps/v8/test/cctest/types-fuzz.h
index db264db42c..b6b5bf2dc5 100644
--- a/deps/v8/test/cctest/types-fuzz.h
+++ b/deps/v8/test/cctest/types-fuzz.h
@@ -40,7 +40,7 @@ namespace compiler {
class Types {
public:
Types(Zone* zone, Isolate* isolate, v8::base::RandomNumberGenerator* rng)
- : zone_(zone), js_heap_broker_(isolate), rng_(rng) {
+ : zone_(zone), js_heap_broker_(isolate, zone), rng_(rng) {
#define DECLARE_TYPE(name, value) \
name = Type::name(); \
types.push_back(name);
@@ -209,7 +209,7 @@ class Types {
}
Zone* zone() { return zone_; }
- const JSHeapBroker* js_heap_broker() const { return &js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() { return &js_heap_broker_; }
private:
Zone* zone_;
diff --git a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
index 23a0c3369b..e56060bdd9 100644
--- a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
@@ -30,7 +30,7 @@ class CWasmEntryArgTester {
public:
CWasmEntryArgTester(std::initializer_list<uint8_t> wasm_function_bytes,
std::function<ReturnType(Args...)> expected_fn)
- : runner_(kExecuteTurbofan),
+ : runner_(ExecutionTier::kOptimized),
isolate_(runner_.main_isolate()),
expected_fn_(expected_fn),
sig_(runner_.template CreateSig<ReturnType, Args...>()) {
@@ -93,7 +93,7 @@ class CWasmEntryArgTester {
std::function<ReturnType(Args...)> expected_fn_;
FunctionSig* sig_;
Handle<JSFunction> c_wasm_entry_fn_;
- wasm::WasmCode* wasm_code_;
+ WasmCode* wasm_code_;
};
} // namespace
diff --git a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
new file mode 100644
index 0000000000..53ee5eedd1
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
@@ -0,0 +1,199 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/assembler-inl.h"
+#include "src/macro-assembler-inl.h"
+#include "src/simulator.h"
+#include "src/utils.h"
+#include "src/wasm/jump-table-assembler.h"
+#include "test/cctest/cctest.h"
+#include "test/common/assembler-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#if 0
+#define TRACE(...) PrintF(__VA_ARGS__)
+#else
+#define TRACE(...)
+#endif
+
+#define __ masm.
+
+// TODO(v8:7424,v8:8018): Extend this test to all architectures.
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
+ V8_TARGET_ARCH_ARM64
+
+namespace {
+
+static volatile int global_stop_bit = 0;
+
+Address GenerateJumpTableThunk(Address jump_target) {
+ size_t allocated;
+ byte* buffer;
+#if V8_TARGET_ARCH_ARM64
+ // TODO(wasm): Currently {kMaxWasmCodeMemory} limits code sufficiently, so
+ // that the jump table only supports {near_call} distances.
+ const uintptr_t kThunkAddrMask = (1 << WhichPowerOf2(kMaxWasmCodeMemory)) - 1;
+ const int kArbitrarilyChosenRetryCount = 10; // Retry to avoid flakes.
+ for (int retry = 0; retry < kArbitrarilyChosenRetryCount; ++retry) {
+ Address random_addr = reinterpret_cast<Address>(GetRandomMmapAddr());
+ void* address = reinterpret_cast<void*>((jump_target & ~kThunkAddrMask) |
+ (random_addr & kThunkAddrMask));
+ buffer = AllocateAssemblerBuffer(
+ &allocated, AssemblerBase::kMinimalBufferSize, address);
+ Address bufferptr = reinterpret_cast<uintptr_t>(buffer);
+ if ((bufferptr & ~kThunkAddrMask) == (jump_target & ~kThunkAddrMask)) break;
+ }
+#else
+ buffer = AllocateAssemblerBuffer(
+ &allocated, AssemblerBase::kMinimalBufferSize, GetRandomMmapAddr());
+#endif
+ MacroAssembler masm(nullptr, AssemblerOptions{}, buffer,
+ static_cast<int>(allocated), CodeObjectRequired::kNo);
+
+ Label exit;
+ Register scratch = kReturnRegister0;
+ Address stop_bit_address = reinterpret_cast<Address>(&global_stop_bit);
+#if V8_TARGET_ARCH_X64
+ __ Move(scratch, stop_bit_address, RelocInfo::NONE);
+ __ testl(MemOperand(scratch, 0), Immediate(1));
+ __ j(not_zero, &exit);
+ __ Jump(jump_target, RelocInfo::NONE);
+#elif V8_TARGET_ARCH_IA32
+ __ Move(scratch, Immediate(stop_bit_address, RelocInfo::NONE));
+ __ test(MemOperand(scratch, 0), Immediate(1));
+ __ j(not_zero, &exit);
+ __ jmp(jump_target, RelocInfo::NONE);
+#elif V8_TARGET_ARCH_ARM
+ __ mov(scratch, Operand(stop_bit_address, RelocInfo::NONE));
+ __ ldr(scratch, MemOperand(scratch, 0));
+ __ tst(scratch, Operand(1));
+ __ b(ne, &exit);
+ __ Jump(jump_target, RelocInfo::NONE);
+#elif V8_TARGET_ARCH_ARM64
+ __ Mov(scratch, Operand(stop_bit_address, RelocInfo::NONE));
+ __ Ldr(scratch, MemOperand(scratch, 0));
+ __ Tbnz(scratch, 0, &exit);
+ __ Mov(scratch, Immediate(jump_target, RelocInfo::NONE));
+ __ Br(scratch);
+#else
+#error Unsupported architecture
+#endif
+ __ bind(&exit);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(nullptr, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ return reinterpret_cast<Address>(buffer);
+}
+
+class JumpTableRunner : public v8::base::Thread {
+ public:
+ JumpTableRunner(Address slot_address, int runner_id)
+ : Thread(Options("JumpTableRunner")),
+ slot_address_(slot_address),
+ runner_id_(runner_id) {}
+
+ void Run() override {
+ TRACE("Runner #%d is starting ...\n", runner_id_);
+ GeneratedCode<void>::FromAddress(CcTest::i_isolate(), slot_address_).Call();
+ TRACE("Runner #%d is stopping ...\n", runner_id_);
+ USE(runner_id_);
+ }
+
+ private:
+ Address slot_address_;
+ int runner_id_;
+};
+
+class JumpTablePatcher : public v8::base::Thread {
+ public:
+ JumpTablePatcher(Address slot_start, uint32_t slot_index, Address thunk1,
+ Address thunk2)
+ : Thread(Options("JumpTablePatcher")),
+ slot_start_(slot_start),
+ slot_index_(slot_index),
+ thunks_{thunk1, thunk2} {}
+
+ void Run() override {
+ TRACE("Patcher is starting ...\n");
+ constexpr int kNumberOfPatchIterations = 64;
+ for (int i = 0; i < kNumberOfPatchIterations; ++i) {
+ TRACE(" patch slot " V8PRIxPTR_FMT " to thunk #%d\n",
+ slot_start_ + JumpTableAssembler::SlotIndexToOffset(slot_index_),
+ i % 2);
+ JumpTableAssembler::PatchJumpTableSlot(
+ slot_start_, slot_index_, thunks_[i % 2], WasmCode::kFlushICache);
+ }
+ TRACE("Patcher is stopping ...\n");
+ }
+
+ private:
+ Address slot_start_;
+ uint32_t slot_index_;
+ Address thunks_[2];
+};
+
+} // namespace
+
+// This test is intended to stress concurrent patching of jump-table slots. It
+// uses the following setup:
+// 1) Picks a particular slot of the jump-table. Slots are iterated over to
+// ensure multiple entries (at different offset alignments) are tested.
+// 2) Starts multiple runners that spin through the above slot. The runners
+// use thunk code that will jump to the same jump-table slot repeatedly
+// until the {global_stop_bit} indicates a test-end condition.
+// 3) Start a patcher that repeatedly patches the jump-table slot back and
+// forth between two thunk. If there is a race then chances are high that
+// one of the runners is currently executing the jump-table slot.
+TEST(JumpTablePatchingStress) {
+ constexpr int kJumpTableSlotCount = 128;
+ constexpr int kNumberOfRunnerThreads = 5;
+
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(
+ &allocated,
+ JumpTableAssembler::SizeForNumberOfSlots(kJumpTableSlotCount));
+
+ // Iterate through jump-table slots to hammer at different alignments within
+ // the jump-table, thereby increasing stress for variable-length ISAs.
+ Address slot_start = reinterpret_cast<Address>(buffer);
+ for (int slot = 0; slot < kJumpTableSlotCount; ++slot) {
+ TRACE("Hammering on jump table slot #%d ...\n", slot);
+ uint32_t slot_offset = JumpTableAssembler::SlotIndexToOffset(slot);
+ Address thunk1 = GenerateJumpTableThunk(slot_start + slot_offset);
+ Address thunk2 = GenerateJumpTableThunk(slot_start + slot_offset);
+ TRACE(" generated thunk1: " V8PRIxPTR_FMT "\n", thunk1);
+ TRACE(" generated thunk2: " V8PRIxPTR_FMT "\n", thunk2);
+ JumpTableAssembler::PatchJumpTableSlot(slot_start, slot, thunk1,
+ WasmCode::kFlushICache);
+
+ // Start multiple runner threads and a patcher thread that hammer on the
+ // same jump-table slot concurrently.
+ std::list<JumpTableRunner> runners;
+ for (int runner = 0; runner < kNumberOfRunnerThreads; ++runner) {
+ runners.emplace_back(slot_start + slot_offset, runner);
+ }
+ JumpTablePatcher patcher(slot_start, slot, thunk1, thunk2);
+ global_stop_bit = 0; // Signal runners to keep going.
+ for (auto& runner : runners) runner.Start();
+ patcher.Start();
+ patcher.Join();
+ global_stop_bit = -1; // Signal runners to stop.
+ for (auto& runner : runners) runner.Join();
+ }
+}
+
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM ||
+ // V8_TARGET_ARCH_ARM64
+
+#undef __
+#undef TRACE
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index 157dd519b1..be45f5bc17 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -22,7 +22,7 @@ namespace wasm {
namespace test_run_wasm_64 {
WASM_EXEC_TEST(I64Const) {
- WasmRunner<int64_t> r(execution_mode);
+ WasmRunner<int64_t> r(execution_tier);
const int64_t kExpectedValue = 0x1122334455667788LL;
// return(kExpectedValue)
BUILD(r, WASM_I64V_9(kExpectedValue));
@@ -32,7 +32,7 @@ WASM_EXEC_TEST(I64Const) {
WASM_EXEC_TEST(I64Const_many) {
int cntr = 0;
FOR_INT32_INPUTS(i) {
- WasmRunner<int64_t> r(execution_mode);
+ WasmRunner<int64_t> r(execution_tier);
const int64_t kExpectedValue = (static_cast<int64_t>(*i) << 32) | cntr;
// return(kExpectedValue)
BUILD(r, WASM_I64V(kExpectedValue));
@@ -42,7 +42,7 @@ WASM_EXEC_TEST(I64Const_many) {
}
WASM_EXEC_TEST(Return_I64) {
- WasmRunner<int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_RETURN1(WASM_GET_LOCAL(0)));
@@ -50,7 +50,7 @@ WASM_EXEC_TEST(Return_I64) {
}
WASM_EXEC_TEST(I64Add) {
- WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i + *j, r.Call(*i, *j)); }
@@ -63,7 +63,7 @@ WASM_EXEC_TEST(I64Add) {
const int64_t kHasBit33On = 0x100000000;
WASM_EXEC_TEST(Regress5800_Add) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK(WASM_BR_IF(0, WASM_I64_EQZ(WASM_I64_ADD(
WASM_I64V(0), WASM_I64V(kHasBit33On)))),
WASM_RETURN1(WASM_I32V(0))),
@@ -72,7 +72,7 @@ WASM_EXEC_TEST(Regress5800_Add) {
}
WASM_EXEC_TEST(I64Sub) {
- WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i - *j, r.Call(*i, *j)); }
@@ -80,7 +80,7 @@ WASM_EXEC_TEST(I64Sub) {
}
WASM_EXEC_TEST(Regress5800_Sub) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK(WASM_BR_IF(0, WASM_I64_EQZ(WASM_I64_SUB(
WASM_I64V(0), WASM_I64V(kHasBit33On)))),
WASM_RETURN1(WASM_I32V(0))),
@@ -89,7 +89,7 @@ WASM_EXEC_TEST(Regress5800_Sub) {
}
WASM_EXEC_TEST(I64AddUseOnlyLowWord) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I32_CONVERT_I64(
WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_INT64_INPUTS(i) {
@@ -100,7 +100,7 @@ WASM_EXEC_TEST(I64AddUseOnlyLowWord) {
}
WASM_EXEC_TEST(I64SubUseOnlyLowWord) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I32_CONVERT_I64(
WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_INT64_INPUTS(i) {
@@ -111,7 +111,7 @@ WASM_EXEC_TEST(I64SubUseOnlyLowWord) {
}
WASM_EXEC_TEST(I64MulUseOnlyLowWord) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I32_CONVERT_I64(
WASM_I64_MUL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_INT64_INPUTS(i) {
@@ -122,7 +122,7 @@ WASM_EXEC_TEST(I64MulUseOnlyLowWord) {
}
WASM_EXEC_TEST(I64ShlUseOnlyLowWord) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I32_CONVERT_I64(
WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_INT64_INPUTS(i) {
@@ -134,7 +134,7 @@ WASM_EXEC_TEST(I64ShlUseOnlyLowWord) {
}
WASM_EXEC_TEST(I64ShrUseOnlyLowWord) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I32_CONVERT_I64(
WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_UINT64_INPUTS(i) {
@@ -146,7 +146,7 @@ WASM_EXEC_TEST(I64ShrUseOnlyLowWord) {
}
WASM_EXEC_TEST(I64SarUseOnlyLowWord) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I32_CONVERT_I64(
WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_INT64_INPUTS(i) {
@@ -158,7 +158,7 @@ WASM_EXEC_TEST(I64SarUseOnlyLowWord) {
}
WASM_EXEC_TEST(I64DivS) {
- WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
@@ -174,7 +174,7 @@ WASM_EXEC_TEST(I64DivS) {
}
WASM_EXEC_TEST(I64DivS_Trap) {
- WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(0, r.Call(int64_t{0}, int64_t{100}));
CHECK_TRAP64(r.Call(int64_t{100}, int64_t{0}));
@@ -185,7 +185,7 @@ WASM_EXEC_TEST(I64DivS_Trap) {
WASM_EXEC_TEST(I64DivS_Byzero_Const) {
for (int8_t denom = -2; denom < 8; denom++) {
- WasmRunner<int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_I64V_1(denom)));
for (int64_t val = -7; val < 8; val++) {
if (denom == 0) {
@@ -198,7 +198,7 @@ WASM_EXEC_TEST(I64DivS_Byzero_Const) {
}
WASM_EXEC_TEST(I64DivU) {
- WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
@@ -212,7 +212,7 @@ WASM_EXEC_TEST(I64DivU) {
}
WASM_EXEC_TEST(I64DivU_Trap) {
- WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(0, r.Call(uint64_t{0}, uint64_t{100}));
CHECK_TRAP64(r.Call(uint64_t{100}, uint64_t{0}));
@@ -222,7 +222,7 @@ WASM_EXEC_TEST(I64DivU_Trap) {
WASM_EXEC_TEST(I64DivU_Byzero_Const) {
for (uint64_t denom = 0xFFFFFFFFFFFFFFFE; denom < 8; denom++) {
- WasmRunner<uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t> r(execution_tier);
BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_I64V_1(denom)));
for (uint64_t val = 0xFFFFFFFFFFFFFFF0; val < 8; val++) {
@@ -236,7 +236,7 @@ WASM_EXEC_TEST(I64DivU_Byzero_Const) {
}
WASM_EXEC_TEST(I64RemS) {
- WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
@@ -250,7 +250,7 @@ WASM_EXEC_TEST(I64RemS) {
}
WASM_EXEC_TEST(I64RemS_Trap) {
- WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(33, r.Call(int64_t{133}, int64_t{100}));
CHECK_EQ(0, r.Call(std::numeric_limits<int64_t>::min(), int64_t{-1}));
@@ -260,7 +260,7 @@ WASM_EXEC_TEST(I64RemS_Trap) {
}
WASM_EXEC_TEST(I64RemU) {
- WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
BUILD(r, WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
@@ -274,7 +274,7 @@ WASM_EXEC_TEST(I64RemU) {
}
WASM_EXEC_TEST(I64RemU_Trap) {
- WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
BUILD(r, WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(17, r.Call(uint64_t{217}, uint64_t{100}));
CHECK_TRAP64(r.Call(uint64_t{100}, uint64_t{0}));
@@ -283,7 +283,7 @@ WASM_EXEC_TEST(I64RemU_Trap) {
}
WASM_EXEC_TEST(I64And) {
- WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_AND(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ((*i) & (*j), r.Call(*i, *j)); }
@@ -291,7 +291,7 @@ WASM_EXEC_TEST(I64And) {
}
WASM_EXEC_TEST(I64Ior) {
- WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_IOR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ((*i) | (*j), r.Call(*i, *j)); }
@@ -299,7 +299,7 @@ WASM_EXEC_TEST(I64Ior) {
}
WASM_EXEC_TEST(I64Xor) {
- WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ((*i) ^ (*j), r.Call(*i, *j)); }
@@ -308,7 +308,7 @@ WASM_EXEC_TEST(I64Xor) {
WASM_EXEC_TEST(I64Shl) {
{
- WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
@@ -319,22 +319,22 @@ WASM_EXEC_TEST(I64Shl) {
}
}
{
- WasmRunner<uint64_t, int64_t> r(execution_mode);
+ WasmRunner<uint64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(0)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 0, r.Call(*i)); }
}
{
- WasmRunner<uint64_t, int64_t> r(execution_mode);
+ WasmRunner<uint64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(32)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 32, r.Call(*i)); }
}
{
- WasmRunner<uint64_t, int64_t> r(execution_mode);
+ WasmRunner<uint64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 20, r.Call(*i)); }
}
{
- WasmRunner<uint64_t, int64_t> r(execution_mode);
+ WasmRunner<uint64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(40)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 40, r.Call(*i)); }
}
@@ -342,7 +342,7 @@ WASM_EXEC_TEST(I64Shl) {
WASM_EXEC_TEST(I64ShrU) {
{
- WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
@@ -353,22 +353,22 @@ WASM_EXEC_TEST(I64ShrU) {
}
}
{
- WasmRunner<uint64_t, int64_t> r(execution_mode);
+ WasmRunner<uint64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(0)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 0, r.Call(*i)); }
}
{
- WasmRunner<uint64_t, int64_t> r(execution_mode);
+ WasmRunner<uint64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(32)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 32, r.Call(*i)); }
}
{
- WasmRunner<uint64_t, int64_t> r(execution_mode);
+ WasmRunner<uint64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 20, r.Call(*i)); }
}
{
- WasmRunner<uint64_t, int64_t> r(execution_mode);
+ WasmRunner<uint64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(40)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 40, r.Call(*i)); }
}
@@ -376,7 +376,7 @@ WASM_EXEC_TEST(I64ShrU) {
WASM_EXEC_TEST(I64ShrS) {
{
- WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
@@ -387,29 +387,29 @@ WASM_EXEC_TEST(I64ShrS) {
}
}
{
- WasmRunner<int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(0)));
FOR_INT64_INPUTS(i) { CHECK_EQ(*i >> 0, r.Call(*i)); }
}
{
- WasmRunner<int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(32)));
FOR_INT64_INPUTS(i) { CHECK_EQ(*i >> 32, r.Call(*i)); }
}
{
- WasmRunner<int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
FOR_INT64_INPUTS(i) { CHECK_EQ(*i >> 20, r.Call(*i)); }
}
{
- WasmRunner<int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(40)));
FOR_INT64_INPUTS(i) { CHECK_EQ(*i >> 40, r.Call(*i)); }
}
}
WASM_EXEC_TEST(I64Eq) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_EQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i == *j ? 1 : 0, r.Call(*i, *j)); }
@@ -417,7 +417,7 @@ WASM_EXEC_TEST(I64Eq) {
}
WASM_EXEC_TEST(I64Ne) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_NE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i != *j ? 1 : 0, r.Call(*i, *j)); }
@@ -425,7 +425,7 @@ WASM_EXEC_TEST(I64Ne) {
}
WASM_EXEC_TEST(I64LtS) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_LTS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i < *j ? 1 : 0, r.Call(*i, *j)); }
@@ -433,7 +433,7 @@ WASM_EXEC_TEST(I64LtS) {
}
WASM_EXEC_TEST(I64LeS) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_LES(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i <= *j ? 1 : 0, r.Call(*i, *j)); }
@@ -441,7 +441,7 @@ WASM_EXEC_TEST(I64LeS) {
}
WASM_EXEC_TEST(I64LtU) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_LTU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) { CHECK_EQ(*i < *j ? 1 : 0, r.Call(*i, *j)); }
@@ -449,7 +449,7 @@ WASM_EXEC_TEST(I64LtU) {
}
WASM_EXEC_TEST(I64LeU) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_LEU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) { CHECK_EQ(*i <= *j ? 1 : 0, r.Call(*i, *j)); }
@@ -457,7 +457,7 @@ WASM_EXEC_TEST(I64LeU) {
}
WASM_EXEC_TEST(I64GtS) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_GTS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i > *j ? 1 : 0, r.Call(*i, *j)); }
@@ -465,7 +465,7 @@ WASM_EXEC_TEST(I64GtS) {
}
WASM_EXEC_TEST(I64GeS) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_GES(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i >= *j ? 1 : 0, r.Call(*i, *j)); }
@@ -473,7 +473,7 @@ WASM_EXEC_TEST(I64GeS) {
}
WASM_EXEC_TEST(I64GtU) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_GTU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) { CHECK_EQ(*i > *j ? 1 : 0, r.Call(*i, *j)); }
@@ -481,7 +481,7 @@ WASM_EXEC_TEST(I64GtU) {
}
WASM_EXEC_TEST(I64GeU) {
- WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_GEU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) { CHECK_EQ(*i >= *j ? 1 : 0, r.Call(*i, *j)); }
@@ -490,20 +490,20 @@ WASM_EXEC_TEST(I64GeU) {
WASM_EXEC_TEST(I32ConvertI64) {
FOR_INT64_INPUTS(i) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
BUILD(r, WASM_I32_CONVERT_I64(WASM_I64V(*i)));
CHECK_EQ(static_cast<int32_t>(*i), r.Call());
}
}
WASM_EXEC_TEST(I64SConvertI32) {
- WasmRunner<int64_t, int32_t> r(execution_mode);
+ WasmRunner<int64_t, int32_t> r(execution_tier);
BUILD(r, WASM_I64_SCONVERT_I32(WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) { CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i)); }
}
WASM_EXEC_TEST(I64UConvertI32) {
- WasmRunner<int64_t, uint32_t> r(execution_mode);
+ WasmRunner<int64_t, uint32_t> r(execution_tier);
BUILD(r, WASM_I64_UCONVERT_I32(WASM_GET_LOCAL(0)));
FOR_UINT32_INPUTS(i) { CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i)); }
}
@@ -518,7 +518,7 @@ WASM_EXEC_TEST(I64Popcnt) {
{26, 0x1123456782345678},
{38, 0xFFEDCBA09EDCBA09}};
- WasmRunner<int64_t, uint64_t> r(execution_mode);
+ WasmRunner<int64_t, uint64_t> r(execution_tier);
BUILD(r, WASM_I64_POPCNT(WASM_GET_LOCAL(0)));
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(values[i].expected, r.Call(values[i].input));
@@ -526,7 +526,7 @@ WASM_EXEC_TEST(I64Popcnt) {
}
WASM_EXEC_TEST(F32SConvertI64) {
- WasmRunner<float, int64_t> r(execution_mode);
+ WasmRunner<float, int64_t> r(execution_tier);
BUILD(r, WASM_F32_SCONVERT_I64(WASM_GET_LOCAL(0)));
FOR_INT64_INPUTS(i) { CHECK_FLOAT_EQ(static_cast<float>(*i), r.Call(*i)); }
}
@@ -611,7 +611,7 @@ WASM_EXEC_TEST(F32UConvertI64) {
{0x8000008000000001, 0x5F000001},
{0x8000000000000400, 0x5F000000},
{0x8000000000000401, 0x5F000000}};
- WasmRunner<float, uint64_t> r(execution_mode);
+ WasmRunner<float, uint64_t> r(execution_tier);
BUILD(r, WASM_F32_UCONVERT_I64(WASM_GET_LOCAL(0)));
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(bit_cast<float>(values[i].expected), r.Call(values[i].input));
@@ -619,7 +619,7 @@ WASM_EXEC_TEST(F32UConvertI64) {
}
WASM_EXEC_TEST(F64SConvertI64) {
- WasmRunner<double, int64_t> r(execution_mode);
+ WasmRunner<double, int64_t> r(execution_tier);
BUILD(r, WASM_F64_SCONVERT_I64(WASM_GET_LOCAL(0)));
FOR_INT64_INPUTS(i) { CHECK_DOUBLE_EQ(static_cast<double>(*i), r.Call(*i)); }
}
@@ -703,7 +703,7 @@ WASM_EXEC_TEST(F64UConvertI64) {
{0x8000008000000001, 0x43E0000010000000},
{0x8000000000000400, 0x43E0000000000000},
{0x8000000000000401, 0x43E0000000000001}};
- WasmRunner<double, uint64_t> r(execution_mode);
+ WasmRunner<double, uint64_t> r(execution_tier);
BUILD(r, WASM_F64_UCONVERT_I64(WASM_GET_LOCAL(0)));
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(bit_cast<double>(values[i].expected), r.Call(values[i].input));
@@ -711,7 +711,7 @@ WASM_EXEC_TEST(F64UConvertI64) {
}
WASM_EXEC_TEST(I64SConvertF32) {
- WasmRunner<int64_t, float> r(execution_mode);
+ WasmRunner<int64_t, float> r(execution_tier);
BUILD(r, WASM_I64_SCONVERT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
@@ -726,7 +726,7 @@ WASM_EXEC_TEST(I64SConvertF32) {
WASM_EXEC_TEST(I64SConvertSatF32) {
EXPERIMENTAL_FLAG_SCOPE(sat_f2i_conversions);
- WasmRunner<int64_t, float> r(execution_mode);
+ WasmRunner<int64_t, float> r(execution_tier);
BUILD(r, WASM_I64_SCONVERT_SAT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
int64_t expected;
@@ -746,7 +746,7 @@ WASM_EXEC_TEST(I64SConvertSatF32) {
}
WASM_EXEC_TEST(I64SConvertF64) {
- WasmRunner<int64_t, double> r(execution_mode);
+ WasmRunner<int64_t, double> r(execution_tier);
BUILD(r, WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
@@ -761,7 +761,7 @@ WASM_EXEC_TEST(I64SConvertF64) {
WASM_EXEC_TEST(I64SConvertSatF64) {
EXPERIMENTAL_FLAG_SCOPE(sat_f2i_conversions);
- WasmRunner<int64_t, double> r(execution_mode);
+ WasmRunner<int64_t, double> r(execution_tier);
BUILD(r, WASM_I64_SCONVERT_SAT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
int64_t expected;
@@ -781,7 +781,7 @@ WASM_EXEC_TEST(I64SConvertSatF64) {
}
WASM_EXEC_TEST(I64UConvertF32) {
- WasmRunner<uint64_t, float> r(execution_mode);
+ WasmRunner<uint64_t, float> r(execution_tier);
BUILD(r, WASM_I64_UCONVERT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
@@ -796,7 +796,7 @@ WASM_EXEC_TEST(I64UConvertF32) {
WASM_EXEC_TEST(I64UConvertSatF32) {
EXPERIMENTAL_FLAG_SCOPE(sat_f2i_conversions);
- WasmRunner<int64_t, float> r(execution_mode);
+ WasmRunner<int64_t, float> r(execution_tier);
BUILD(r, WASM_I64_UCONVERT_SAT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
uint64_t expected;
@@ -816,7 +816,7 @@ WASM_EXEC_TEST(I64UConvertSatF32) {
}
WASM_EXEC_TEST(I64UConvertF64) {
- WasmRunner<uint64_t, double> r(execution_mode);
+ WasmRunner<uint64_t, double> r(execution_tier);
BUILD(r, WASM_I64_UCONVERT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
@@ -831,7 +831,7 @@ WASM_EXEC_TEST(I64UConvertF64) {
WASM_EXEC_TEST(I64UConvertSatF64) {
EXPERIMENTAL_FLAG_SCOPE(sat_f2i_conversions);
- WasmRunner<int64_t, double> r(execution_mode);
+ WasmRunner<int64_t, double> r(execution_tier);
BUILD(r, WASM_I64_UCONVERT_SAT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
int64_t expected;
@@ -858,7 +858,7 @@ WASM_EXEC_TEST(CallI64Parameter) {
FunctionSig sig(1, 19, param_types);
for (int i = 0; i < 19; i++) {
if (i == 2 || i == 3) continue;
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
// Build the target function.
WasmFunctionCompiler& t = r.NewFunction(&sig);
BUILD(t, WASM_GET_LOCAL(i));
@@ -889,7 +889,7 @@ WASM_EXEC_TEST(CallI64Return) {
return_types[1] = kWasmI32;
FunctionSig sig(2, 1, return_types);
- WasmRunner<int64_t> r(execution_mode);
+ WasmRunner<int64_t> r(execution_tier);
// Build the target function.
WasmFunctionCompiler& t = r.NewFunction(&sig);
BUILD(t, WASM_GET_LOCAL(0), WASM_I32V(7));
@@ -901,32 +901,32 @@ WASM_EXEC_TEST(CallI64Return) {
CHECK_EQ(0xBCD12340000000B, r.Call());
}
-void TestI64Binop(WasmExecutionMode execution_mode, WasmOpcode opcode,
+void TestI64Binop(ExecutionTier execution_tier, WasmOpcode opcode,
int64_t expected, int64_t a, int64_t b) {
{
- WasmRunner<int64_t> r(execution_mode);
+ WasmRunner<int64_t> r(execution_tier);
// return K op K
BUILD(r, WASM_BINOP(opcode, WASM_I64V(a), WASM_I64V(b)));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
// return a op b
BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(expected, r.Call(a, b));
}
}
-void TestI64Cmp(WasmExecutionMode execution_mode, WasmOpcode opcode,
+void TestI64Cmp(ExecutionTier execution_tier, WasmOpcode opcode,
int64_t expected, int64_t a, int64_t b) {
{
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
// return K op K
BUILD(r, WASM_BINOP(opcode, WASM_I64V(a), WASM_I64V(b)));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int32_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier);
// return a op b
BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(expected, r.Call(a, b));
@@ -934,66 +934,66 @@ void TestI64Cmp(WasmExecutionMode execution_mode, WasmOpcode opcode,
}
WASM_EXEC_TEST(I64Binops) {
- TestI64Binop(execution_mode, kExprI64Add, -5586332274295447011,
+ TestI64Binop(execution_tier, kExprI64Add, -5586332274295447011,
0x501B72EBABC26847, 0x625DE9793D8F79D6);
- TestI64Binop(execution_mode, kExprI64Sub, 9001903251710731490,
+ TestI64Binop(execution_tier, kExprI64Sub, 9001903251710731490,
0xF24FE6474640002E, 0x7562B6F711991B4C);
- TestI64Binop(execution_mode, kExprI64Mul, -4569547818546064176,
+ TestI64Binop(execution_tier, kExprI64Mul, -4569547818546064176,
0x231A263C2CBC6451, 0xEAD44DE6BD3E23D0);
- TestI64Binop(execution_mode, kExprI64Mul, -25963122347507043,
+ TestI64Binop(execution_tier, kExprI64Mul, -25963122347507043,
0x4DA1FA47C9352B73, 0x91FE82317AA035AF);
- TestI64Binop(execution_mode, kExprI64Mul, 7640290486138131960,
+ TestI64Binop(execution_tier, kExprI64Mul, 7640290486138131960,
0x185731ABE8EEA47C, 0x714EC59F1380D4C2);
- TestI64Binop(execution_mode, kExprI64DivS, -91517, 0x93B1190A34DE56A0,
+ TestI64Binop(execution_tier, kExprI64DivS, -91517, 0x93B1190A34DE56A0,
0x00004D8F68863948);
- TestI64Binop(execution_mode, kExprI64DivU, 149016, 0xE15B3727E8A2080A,
+ TestI64Binop(execution_tier, kExprI64DivU, 149016, 0xE15B3727E8A2080A,
0x0000631BFA72DB8B);
- TestI64Binop(execution_mode, kExprI64RemS, -664128064149968,
+ TestI64Binop(execution_tier, kExprI64RemS, -664128064149968,
0x9A78B4E4FE708692, 0x0003E0B6B3BE7609);
- TestI64Binop(execution_mode, kExprI64RemU, 1742040017332765,
+ TestI64Binop(execution_tier, kExprI64RemU, 1742040017332765,
0x0CE84708C6258C81, 0x000A6FDE82016697);
- TestI64Binop(execution_mode, kExprI64And, 2531040582801836054,
+ TestI64Binop(execution_tier, kExprI64And, 2531040582801836054,
0xAF257D1602644A16, 0x33B290A91A10D997);
- TestI64Binop(execution_mode, kExprI64Ior, 8556201506536114940,
+ TestI64Binop(execution_tier, kExprI64Ior, 8556201506536114940,
0x169D9BE7BD3F0A5C, 0x66BCA28D77AF40E8);
- TestI64Binop(execution_mode, kExprI64Xor, -4605655183785456377,
+ TestI64Binop(execution_tier, kExprI64Xor, -4605655183785456377,
0xB6EA20A5D48E85B8, 0x76FF4DA6C80688BF);
- TestI64Binop(execution_mode, kExprI64Shl, -7240704056088331264,
+ TestI64Binop(execution_tier, kExprI64Shl, -7240704056088331264,
0xEF4DC1ED030E8FFE, 9);
- TestI64Binop(execution_mode, kExprI64ShrU, 12500673744059159,
+ TestI64Binop(execution_tier, kExprI64ShrU, 12500673744059159,
0xB1A52FA7DEEC5D14, 10);
- TestI64Binop(execution_mode, kExprI64ShrS, 1725103446999874,
+ TestI64Binop(execution_tier, kExprI64ShrS, 1725103446999874,
0x3107C791461A112B, 11);
- TestI64Binop(execution_mode, kExprI64Ror, -8960135652432576946,
+ TestI64Binop(execution_tier, kExprI64Ror, -8960135652432576946,
0x73418D1717E4E83A, 12);
- TestI64Binop(execution_mode, kExprI64Ror, 7617662827409989779,
+ TestI64Binop(execution_tier, kExprI64Ror, 7617662827409989779,
0xEBFF67CF0C126D36, 13);
- TestI64Binop(execution_mode, kExprI64Rol, -2097714064174346012,
+ TestI64Binop(execution_tier, kExprI64Rol, -2097714064174346012,
0x43938B8DB0B0F230, 14);
- TestI64Binop(execution_mode, kExprI64Rol, 8728493013947314237,
+ TestI64Binop(execution_tier, kExprI64Rol, 8728493013947314237,
0xE07AF243AC4D219D, 15);
}
WASM_EXEC_TEST(I64Compare) {
- TestI64Cmp(execution_mode, kExprI64Eq, 0, 0xB915D8FA494064F0,
+ TestI64Cmp(execution_tier, kExprI64Eq, 0, 0xB915D8FA494064F0,
0x04D700B2536019A3);
- TestI64Cmp(execution_mode, kExprI64Ne, 1, 0xC2FAFAAAB0446CDC,
+ TestI64Cmp(execution_tier, kExprI64Ne, 1, 0xC2FAFAAAB0446CDC,
0x52A3328F780C97A3);
- TestI64Cmp(execution_mode, kExprI64LtS, 0, 0x673636E6306B0578,
+ TestI64Cmp(execution_tier, kExprI64LtS, 0, 0x673636E6306B0578,
0x028EC9ECA78F7227);
- TestI64Cmp(execution_mode, kExprI64LeS, 1, 0xAE5214114B86A0FA,
+ TestI64Cmp(execution_tier, kExprI64LeS, 1, 0xAE5214114B86A0FA,
0x7C1D21DA3DFD0CCF);
- TestI64Cmp(execution_mode, kExprI64LtU, 0, 0x7D52166381EC1CE0,
+ TestI64Cmp(execution_tier, kExprI64LtU, 0, 0x7D52166381EC1CE0,
0x59F4A6A9E78CD3D8);
- TestI64Cmp(execution_mode, kExprI64LeU, 1, 0xE4169A385C7EA0E0,
+ TestI64Cmp(execution_tier, kExprI64LeU, 1, 0xE4169A385C7EA0E0,
0xFBDBED2C8781E5BC);
- TestI64Cmp(execution_mode, kExprI64GtS, 0, 0x9D08FF8FB5F42E81,
+ TestI64Cmp(execution_tier, kExprI64GtS, 0, 0x9D08FF8FB5F42E81,
0xD4E5C9D7FE09F621);
- TestI64Cmp(execution_mode, kExprI64GeS, 1, 0x78DA3B2F73264E0F,
+ TestI64Cmp(execution_tier, kExprI64GeS, 1, 0x78DA3B2F73264E0F,
0x6FE5E2A67C501CBE);
- TestI64Cmp(execution_mode, kExprI64GtU, 0, 0x8F691284E44F7DA9,
+ TestI64Cmp(execution_tier, kExprI64GtU, 0, 0x8F691284E44F7DA9,
0xD5EA9BC1EE149192);
- TestI64Cmp(execution_mode, kExprI64GeU, 0, 0x0886A0C58C7AA224,
+ TestI64Cmp(execution_tier, kExprI64GeU, 0, 0x0886A0C58C7AA224,
0x5DDBE5A81FD7EE47);
}
@@ -1035,7 +1035,7 @@ WASM_EXEC_TEST(I64Clz) {
{62, 0x0000000000000002}, {63, 0x0000000000000001},
{64, 0x0000000000000000}};
- WasmRunner<int64_t, uint64_t> r(execution_mode);
+ WasmRunner<int64_t, uint64_t> r(execution_tier);
BUILD(r, WASM_I64_CLZ(WASM_GET_LOCAL(0)));
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(values[i].expected, r.Call(values[i].input));
@@ -1080,7 +1080,7 @@ WASM_EXEC_TEST(I64Ctz) {
{2, 0x000000009AFDBC84}, {1, 0x000000009AFDBC82},
{0, 0x000000009AFDBC81}};
- WasmRunner<int64_t, uint64_t> r(execution_mode);
+ WasmRunner<int64_t, uint64_t> r(execution_tier);
BUILD(r, WASM_I64_CTZ(WASM_GET_LOCAL(0)));
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(values[i].expected, r.Call(values[i].input));
@@ -1097,7 +1097,7 @@ WASM_EXEC_TEST(I64Popcnt2) {
{26, 0x1123456782345678},
{38, 0xFFEDCBA09EDCBA09}};
- WasmRunner<int64_t, uint64_t> r(execution_mode);
+ WasmRunner<int64_t, uint64_t> r(execution_tier);
BUILD(r, WASM_I64_POPCNT(WASM_GET_LOCAL(0)));
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(values[i].expected, r.Call(values[i].input));
@@ -1107,25 +1107,25 @@ WASM_EXEC_TEST(I64Popcnt2) {
// Test the WasmRunner with an Int64 return value and different numbers of
// Int64 parameters.
WASM_EXEC_TEST(I64WasmRunner) {
- {FOR_INT64_INPUTS(i){WasmRunner<int64_t> r(execution_mode);
+ {FOR_INT64_INPUTS(i){WasmRunner<int64_t> r(execution_tier);
BUILD(r, WASM_I64V(*i));
CHECK_EQ(*i, r.Call());
}
}
{
- WasmRunner<int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_GET_LOCAL(0));
FOR_INT64_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
{
- WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i ^ *j, r.Call(*i, *j)); }
}
}
{
- WasmRunner<int64_t, int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0),
WASM_I64_XOR(WASM_GET_LOCAL(1), WASM_GET_LOCAL(2))));
FOR_INT64_INPUTS(i) {
@@ -1137,7 +1137,7 @@ WASM_EXEC_TEST(I64WasmRunner) {
}
}
{
- WasmRunner<int64_t, int64_t, int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0),
WASM_I64_XOR(WASM_GET_LOCAL(1),
WASM_I64_XOR(WASM_GET_LOCAL(2),
@@ -1154,7 +1154,7 @@ WASM_EXEC_TEST(I64WasmRunner) {
}
WASM_EXEC_TEST(Call_Int64Sub) {
- WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
// Build the target function.
TestSignatures sigs;
WasmFunctionCompiler& t = r.NewFunction(sigs.l_ll());
@@ -1183,7 +1183,7 @@ WASM_EXEC_TEST(LoadStoreI64_sx) {
kExprI64LoadMem};
for (size_t m = 0; m < arraysize(loads); m++) {
- WasmRunner<int64_t> r(execution_mode);
+ WasmRunner<int64_t> r(execution_tier);
byte* memory = r.builder().AddMemoryElems<byte>(kWasmPageSize);
byte code[] = {
@@ -1222,7 +1222,7 @@ WASM_EXEC_TEST(LoadStoreI64_sx) {
WASM_EXEC_TEST(I64ReinterpretF64) {
- WasmRunner<int64_t> r(execution_mode);
+ WasmRunner<int64_t> r(execution_tier);
int64_t* memory =
r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
@@ -1237,7 +1237,7 @@ WASM_EXEC_TEST(I64ReinterpretF64) {
}
WASM_EXEC_TEST(SignallingNanSurvivesI64ReinterpretF64) {
- WasmRunner<int64_t> r(execution_mode);
+ WasmRunner<int64_t> r(execution_tier);
BUILD(r, WASM_I64_REINTERPRET_F64(WASM_SEQ(kExprF64Const, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xF4, 0x7F)));
@@ -1246,7 +1246,7 @@ WASM_EXEC_TEST(SignallingNanSurvivesI64ReinterpretF64) {
}
WASM_EXEC_TEST(F64ReinterpretI64) {
- WasmRunner<int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t> r(execution_tier);
int64_t* memory =
r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
@@ -1262,7 +1262,7 @@ WASM_EXEC_TEST(F64ReinterpretI64) {
}
WASM_EXEC_TEST(LoadMemI64) {
- WasmRunner<int64_t> r(execution_mode);
+ WasmRunner<int64_t> r(execution_tier);
int64_t* memory =
r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
r.builder().RandomizeMemory(1111);
@@ -1281,7 +1281,7 @@ WASM_EXEC_TEST(LoadMemI64) {
WASM_EXEC_TEST(LoadMemI64_alignment) {
for (byte alignment = 0; alignment <= 3; alignment++) {
- WasmRunner<int64_t> r(execution_mode);
+ WasmRunner<int64_t> r(execution_tier);
int64_t* memory =
r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
r.builder().RandomizeMemory(1111);
@@ -1302,7 +1302,7 @@ WASM_EXEC_TEST(LoadMemI64_alignment) {
WASM_EXEC_TEST(MemI64_Sum) {
const int kNumElems = 20;
- WasmRunner<uint64_t, int32_t> r(execution_mode);
+ WasmRunner<uint64_t, int32_t> r(execution_tier);
uint64_t* memory =
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
const byte kSum = r.AllocateLocal(kWasmI64);
@@ -1334,7 +1334,7 @@ WASM_EXEC_TEST(StoreMemI64_alignment) {
const int64_t kWritten = 0x12345678ABCD0011ll;
for (byte i = 0; i <= 3; i++) {
- WasmRunner<int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t> r(execution_tier);
int64_t* memory =
r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
BUILD(r, WASM_STORE_MEM_ALIGNMENT(MachineType::Int64(), WASM_ZERO, i,
@@ -1349,7 +1349,7 @@ WASM_EXEC_TEST(StoreMemI64_alignment) {
}
WASM_EXEC_TEST(I64Global) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
int64_t* global = r.builder().AddGlobal<int64_t>();
// global = global + p0
BUILD(r, WASM_SET_GLOBAL(
@@ -1359,14 +1359,14 @@ WASM_EXEC_TEST(I64Global) {
r.builder().WriteMemory<int64_t>(global, 0xFFFFFFFFFFFFFFFFLL);
for (int i = 9; i < 444444; i += 111111) {
- int64_t expected = *global & i;
+ int64_t expected = ReadLittleEndianValue<int64_t>(global) & i;
r.Call(i);
- CHECK_EQ(expected, *global);
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(global));
}
}
WASM_EXEC_TEST(I64Eqz) {
- WasmRunner<int32_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_EQZ(WASM_GET_LOCAL(0)));
FOR_INT64_INPUTS(i) {
@@ -1376,7 +1376,7 @@ WASM_EXEC_TEST(I64Eqz) {
}
WASM_EXEC_TEST(I64Ror) {
- WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_ROR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
@@ -1388,7 +1388,7 @@ WASM_EXEC_TEST(I64Ror) {
}
WASM_EXEC_TEST(I64Rol) {
- WasmRunner<int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_ROL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
@@ -1409,7 +1409,7 @@ WASM_EXEC_TEST(StoreMem_offset_oob_i64) {
constexpr size_t num_bytes = kWasmPageSize;
for (size_t m = 0; m < arraysize(machineTypes); m++) {
- WasmRunner<int32_t, uint32_t> r(execution_mode);
+ WasmRunner<int32_t, uint32_t> r(execution_tier);
byte* memory = r.builder().AddMemoryElems<byte>(num_bytes);
r.builder().RandomizeMemory(1119 + static_cast<int>(m));
@@ -1436,7 +1436,7 @@ WASM_EXEC_TEST(Store_i64_narrowed) {
stored_size_in_bytes = std::max(1, stored_size_in_bytes * 2);
constexpr int kBytes = 24;
uint8_t expected_memory[kBytes] = {0};
- WasmRunner<int32_t, int32_t, int64_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int64_t> r(execution_tier);
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
constexpr uint64_t kPattern = 0x0123456789abcdef;
@@ -1459,14 +1459,14 @@ WASM_EXEC_TEST(Store_i64_narrowed) {
}
WASM_EXEC_TEST(UnalignedInt64Load) {
- WasmRunner<uint64_t> r(execution_mode);
+ WasmRunner<uint64_t> r(execution_tier);
r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
BUILD(r, WASM_LOAD_MEM_ALIGNMENT(MachineType::Int64(), WASM_ONE, 3));
r.Call();
}
WASM_EXEC_TEST(UnalignedInt64Store) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(int64_t));
BUILD(r, WASM_SEQ(WASM_STORE_MEM_ALIGNMENT(MachineType::Int64(), WASM_ONE, 3,
WASM_I64V_1(1)),
@@ -1480,12 +1480,12 @@ WASM_EXEC_TEST(UnalignedInt64Store) {
for (size_t i = 0; i < sizeof(__buf); i++) vec.push_back(__buf[i]); \
} while (false)
-static void CompileCallIndirectMany(WasmExecutionMode mode, ValueType param) {
+static void CompileCallIndirectMany(ExecutionTier tier, ValueType param) {
// Make sure we don't run out of registers when compiling indirect calls
// with many many parameters.
TestSignatures sigs;
for (byte num_params = 0; num_params < 40; num_params++) {
- WasmRunner<void> r(mode);
+ WasmRunner<void> r(tier);
FunctionSig* sig = sigs.many(r.zone(), kWasmStmt, param, num_params);
r.builder().AddSignature(sig);
@@ -1506,10 +1506,10 @@ static void CompileCallIndirectMany(WasmExecutionMode mode, ValueType param) {
}
WASM_EXEC_TEST(Compile_Wasm_CallIndirect_Many_i64) {
- CompileCallIndirectMany(execution_mode, kWasmI64);
+ CompileCallIndirectMany(execution_tier, kWasmI64);
}
-static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
+static void Run_WasmMixedCall_N(ExecutionTier execution_tier, int start) {
const int kExpected = 6333;
const int kElemSize = 8;
TestSignatures sigs;
@@ -1525,7 +1525,7 @@ static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
for (int which = 0; which < num_params; which++) {
v8::internal::AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
MachineType* memtypes = &mixed[start];
MachineType result = memtypes[which];
@@ -1584,13 +1584,13 @@ static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
}
}
-WASM_EXEC_TEST(MixedCall_i64_0) { Run_WasmMixedCall_N(execution_mode, 0); }
-WASM_EXEC_TEST(MixedCall_i64_1) { Run_WasmMixedCall_N(execution_mode, 1); }
-WASM_EXEC_TEST(MixedCall_i64_2) { Run_WasmMixedCall_N(execution_mode, 2); }
-WASM_EXEC_TEST(MixedCall_i64_3) { Run_WasmMixedCall_N(execution_mode, 3); }
+WASM_EXEC_TEST(MixedCall_i64_0) { Run_WasmMixedCall_N(execution_tier, 0); }
+WASM_EXEC_TEST(MixedCall_i64_1) { Run_WasmMixedCall_N(execution_tier, 1); }
+WASM_EXEC_TEST(MixedCall_i64_2) { Run_WasmMixedCall_N(execution_tier, 2); }
+WASM_EXEC_TEST(MixedCall_i64_3) { Run_WasmMixedCall_N(execution_tier, 3); }
WASM_EXEC_TEST(Regress5874) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemoryElems<int64_t>(kWasmPageSize / sizeof(int64_t));
BUILD(r, kExprI64Const, 0x00, // --
@@ -1604,7 +1604,7 @@ WASM_EXEC_TEST(Regress5874) {
WASM_EXEC_TEST(Regression_6858) {
// WasmRunner with 5 params and returns, which is the maximum.
- WasmRunner<int64_t, int64_t, int64_t, int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t, int64_t, int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
int64_t dividend = 15;
int64_t divisor = 0;
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
index 9e15c46f8d..fc9e395d44 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
@@ -20,7 +20,7 @@ namespace internal {
namespace wasm {
WASM_EXEC_TEST(Int32AsmjsDivS) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_BINOP(kExprI32AsmjsDivS, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
const int32_t kMin = std::numeric_limits<int32_t>::min();
@@ -32,7 +32,7 @@ WASM_EXEC_TEST(Int32AsmjsDivS) {
}
WASM_EXEC_TEST(Int32AsmjsRemS) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_BINOP(kExprI32AsmjsRemS, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
const int32_t kMin = std::numeric_limits<int32_t>::min();
@@ -44,7 +44,7 @@ WASM_EXEC_TEST(Int32AsmjsRemS) {
}
WASM_EXEC_TEST(Int32AsmjsDivU) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_BINOP(kExprI32AsmjsDivU, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
const int32_t kMin = std::numeric_limits<int32_t>::min();
@@ -56,7 +56,7 @@ WASM_EXEC_TEST(Int32AsmjsDivU) {
}
WASM_EXEC_TEST(Int32AsmjsRemU) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_BINOP(kExprI32AsmjsRemU, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
const int32_t kMin = std::numeric_limits<int32_t>::min();
@@ -68,7 +68,7 @@ WASM_EXEC_TEST(Int32AsmjsRemU) {
}
WASM_EXEC_TEST(I32AsmjsSConvertF32) {
- WasmRunner<int32_t, float> r(execution_mode);
+ WasmRunner<int32_t, float> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_UNOP(kExprI32AsmjsSConvertF32, WASM_GET_LOCAL(0)));
@@ -79,7 +79,7 @@ WASM_EXEC_TEST(I32AsmjsSConvertF32) {
}
WASM_EXEC_TEST(I32AsmjsSConvertF64) {
- WasmRunner<int32_t, double> r(execution_mode);
+ WasmRunner<int32_t, double> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_UNOP(kExprI32AsmjsSConvertF64, WASM_GET_LOCAL(0)));
@@ -90,7 +90,7 @@ WASM_EXEC_TEST(I32AsmjsSConvertF64) {
}
WASM_EXEC_TEST(I32AsmjsUConvertF32) {
- WasmRunner<uint32_t, float> r(execution_mode);
+ WasmRunner<uint32_t, float> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_UNOP(kExprI32AsmjsUConvertF32, WASM_GET_LOCAL(0)));
@@ -101,7 +101,7 @@ WASM_EXEC_TEST(I32AsmjsUConvertF32) {
}
WASM_EXEC_TEST(I32AsmjsUConvertF64) {
- WasmRunner<uint32_t, double> r(execution_mode);
+ WasmRunner<uint32_t, double> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_UNOP(kExprI32AsmjsUConvertF64, WASM_GET_LOCAL(0)));
@@ -112,7 +112,7 @@ WASM_EXEC_TEST(I32AsmjsUConvertF64) {
}
WASM_EXEC_TEST(LoadMemI32_oob_asm) {
- WasmRunner<int32_t, uint32_t> r(execution_mode);
+ WasmRunner<int32_t, uint32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
int32_t* memory = r.builder().AddMemoryElems<int32_t>(8);
r.builder().RandomizeMemory(1112);
@@ -132,7 +132,7 @@ WASM_EXEC_TEST(LoadMemI32_oob_asm) {
}
WASM_EXEC_TEST(LoadMemF32_oob_asm) {
- WasmRunner<float, uint32_t> r(execution_mode);
+ WasmRunner<float, uint32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
float* memory = r.builder().AddMemoryElems<float>(8);
r.builder().RandomizeMemory(1112);
@@ -152,7 +152,7 @@ WASM_EXEC_TEST(LoadMemF32_oob_asm) {
}
WASM_EXEC_TEST(LoadMemF64_oob_asm) {
- WasmRunner<double, uint32_t> r(execution_mode);
+ WasmRunner<double, uint32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
double* memory = r.builder().AddMemoryElems<double>(8);
r.builder().RandomizeMemory(1112);
@@ -174,7 +174,7 @@ WASM_EXEC_TEST(LoadMemF64_oob_asm) {
}
WASM_EXEC_TEST(StoreMemI32_oob_asm) {
- WasmRunner<int32_t, uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<int32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
int32_t* memory = r.builder().AddMemoryElems<int32_t>(8);
r.builder().RandomizeMemory(1112);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
index fca190440f..96877fd571 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
@@ -10,10 +10,10 @@ namespace internal {
namespace wasm {
namespace test_run_wasm_atomics {
-void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
+void RunU32BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
Uint32BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
r.builder().SetHasSharedMemory();
@@ -33,28 +33,28 @@ void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
}
WASM_EXEC_TEST(I32AtomicAdd) {
- RunU32BinOp(execution_mode, kExprI32AtomicAdd, Add);
+ RunU32BinOp(execution_tier, kExprI32AtomicAdd, Add);
}
WASM_EXEC_TEST(I32AtomicSub) {
- RunU32BinOp(execution_mode, kExprI32AtomicSub, Sub);
+ RunU32BinOp(execution_tier, kExprI32AtomicSub, Sub);
}
WASM_EXEC_TEST(I32AtomicAnd) {
- RunU32BinOp(execution_mode, kExprI32AtomicAnd, And);
+ RunU32BinOp(execution_tier, kExprI32AtomicAnd, And);
}
WASM_EXEC_TEST(I32AtomicOr) {
- RunU32BinOp(execution_mode, kExprI32AtomicOr, Or);
+ RunU32BinOp(execution_tier, kExprI32AtomicOr, Or);
}
WASM_EXEC_TEST(I32AtomicXor) {
- RunU32BinOp(execution_mode, kExprI32AtomicXor, Xor);
+ RunU32BinOp(execution_tier, kExprI32AtomicXor, Xor);
}
WASM_EXEC_TEST(I32AtomicExchange) {
- RunU32BinOp(execution_mode, kExprI32AtomicExchange, Exchange);
+ RunU32BinOp(execution_tier, kExprI32AtomicExchange, Exchange);
}
-void RunU16BinOp(WasmExecutionMode mode, WasmOpcode wasm_op,
+void RunU16BinOp(ExecutionTier tier, WasmOpcode wasm_op,
Uint16BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t> r(mode);
+ WasmRunner<uint32_t, uint32_t> r(tier);
r.builder().SetHasSharedMemory();
uint16_t* memory =
r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
@@ -74,28 +74,28 @@ void RunU16BinOp(WasmExecutionMode mode, WasmOpcode wasm_op,
}
WASM_EXEC_TEST(I32AtomicAdd16U) {
- RunU16BinOp(execution_mode, kExprI32AtomicAdd16U, Add);
+ RunU16BinOp(execution_tier, kExprI32AtomicAdd16U, Add);
}
WASM_EXEC_TEST(I32AtomicSub16U) {
- RunU16BinOp(execution_mode, kExprI32AtomicSub16U, Sub);
+ RunU16BinOp(execution_tier, kExprI32AtomicSub16U, Sub);
}
WASM_EXEC_TEST(I32AtomicAnd16U) {
- RunU16BinOp(execution_mode, kExprI32AtomicAnd16U, And);
+ RunU16BinOp(execution_tier, kExprI32AtomicAnd16U, And);
}
WASM_EXEC_TEST(I32AtomicOr16U) {
- RunU16BinOp(execution_mode, kExprI32AtomicOr16U, Or);
+ RunU16BinOp(execution_tier, kExprI32AtomicOr16U, Or);
}
WASM_EXEC_TEST(I32AtomicXor16U) {
- RunU16BinOp(execution_mode, kExprI32AtomicXor16U, Xor);
+ RunU16BinOp(execution_tier, kExprI32AtomicXor16U, Xor);
}
WASM_EXEC_TEST(I32AtomicExchange16U) {
- RunU16BinOp(execution_mode, kExprI32AtomicExchange16U, Exchange);
+ RunU16BinOp(execution_tier, kExprI32AtomicExchange16U, Exchange);
}
-void RunU8BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
+void RunU8BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
Uint8BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
@@ -114,27 +114,27 @@ void RunU8BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
}
WASM_EXEC_TEST(I32AtomicAdd8U) {
- RunU8BinOp(execution_mode, kExprI32AtomicAdd8U, Add);
+ RunU8BinOp(execution_tier, kExprI32AtomicAdd8U, Add);
}
WASM_EXEC_TEST(I32AtomicSub8U) {
- RunU8BinOp(execution_mode, kExprI32AtomicSub8U, Sub);
+ RunU8BinOp(execution_tier, kExprI32AtomicSub8U, Sub);
}
WASM_EXEC_TEST(I32AtomicAnd8U) {
- RunU8BinOp(execution_mode, kExprI32AtomicAnd8U, And);
+ RunU8BinOp(execution_tier, kExprI32AtomicAnd8U, And);
}
WASM_EXEC_TEST(I32AtomicOr8U) {
- RunU8BinOp(execution_mode, kExprI32AtomicOr8U, Or);
+ RunU8BinOp(execution_tier, kExprI32AtomicOr8U, Or);
}
WASM_EXEC_TEST(I32AtomicXor8U) {
- RunU8BinOp(execution_mode, kExprI32AtomicXor8U, Xor);
+ RunU8BinOp(execution_tier, kExprI32AtomicXor8U, Xor);
}
WASM_EXEC_TEST(I32AtomicExchange8U) {
- RunU8BinOp(execution_mode, kExprI32AtomicExchange8U, Exchange);
+ RunU8BinOp(execution_tier, kExprI32AtomicExchange8U, Exchange);
}
WASM_EXEC_TEST(I32AtomicCompareExchange) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
@@ -155,7 +155,7 @@ WASM_EXEC_TEST(I32AtomicCompareExchange) {
WASM_EXEC_TEST(I32AtomicCompareExchange16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint16_t* memory =
r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
@@ -177,7 +177,7 @@ WASM_EXEC_TEST(I32AtomicCompareExchange16U) {
WASM_EXEC_TEST(I32AtomicCompareExchange8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
BUILD(r,
@@ -198,7 +198,7 @@ WASM_EXEC_TEST(I32AtomicCompareExchange8U) {
WASM_EXEC_TEST(I32AtomicLoad) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t> r(execution_mode);
+ WasmRunner<uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
@@ -214,7 +214,7 @@ WASM_EXEC_TEST(I32AtomicLoad) {
WASM_EXEC_TEST(I32AtomicLoad16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t> r(execution_mode);
+ WasmRunner<uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint16_t* memory =
r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
@@ -230,7 +230,7 @@ WASM_EXEC_TEST(I32AtomicLoad16U) {
WASM_EXEC_TEST(I32AtomicLoad8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t> r(execution_mode);
+ WasmRunner<uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI32AtomicLoad8U, WASM_ZERO,
@@ -245,7 +245,7 @@ WASM_EXEC_TEST(I32AtomicLoad8U) {
WASM_EXEC_TEST(I32AtomicStoreLoad) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
@@ -265,7 +265,7 @@ WASM_EXEC_TEST(I32AtomicStoreLoad) {
WASM_EXEC_TEST(I32AtomicStoreLoad16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint16_t* memory =
r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
@@ -286,7 +286,7 @@ WASM_EXEC_TEST(I32AtomicStoreLoad16U) {
WASM_EXEC_TEST(I32AtomicStoreLoad8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
@@ -305,7 +305,7 @@ WASM_EXEC_TEST(I32AtomicStoreLoad8U) {
WASM_EXEC_TEST(I32AtomicStoreParameter) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
r.builder().SetHasSharedMemory();
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
index 48169db191..21b943595a 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics64.cc
@@ -10,10 +10,10 @@ namespace internal {
namespace wasm {
namespace test_run_wasm_atomics_64 {
-void RunU64BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
+void RunU64BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
Uint64BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t> r(execution_tier);
uint64_t* memory =
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
r.builder().SetHasSharedMemory();
@@ -33,28 +33,28 @@ void RunU64BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
}
WASM_EXEC_TEST(I64AtomicAdd) {
- RunU64BinOp(execution_mode, kExprI64AtomicAdd, Add);
+ RunU64BinOp(execution_tier, kExprI64AtomicAdd, Add);
}
WASM_EXEC_TEST(I64AtomicSub) {
- RunU64BinOp(execution_mode, kExprI64AtomicSub, Sub);
+ RunU64BinOp(execution_tier, kExprI64AtomicSub, Sub);
}
WASM_EXEC_TEST(I64AtomicAnd) {
- RunU64BinOp(execution_mode, kExprI64AtomicAnd, And);
+ RunU64BinOp(execution_tier, kExprI64AtomicAnd, And);
}
WASM_EXEC_TEST(I64AtomicOr) {
- RunU64BinOp(execution_mode, kExprI64AtomicOr, Or);
+ RunU64BinOp(execution_tier, kExprI64AtomicOr, Or);
}
WASM_EXEC_TEST(I64AtomicXor) {
- RunU64BinOp(execution_mode, kExprI64AtomicXor, Xor);
+ RunU64BinOp(execution_tier, kExprI64AtomicXor, Xor);
}
WASM_EXEC_TEST(I64AtomicExchange) {
- RunU64BinOp(execution_mode, kExprI64AtomicExchange, Exchange);
+ RunU64BinOp(execution_tier, kExprI64AtomicExchange, Exchange);
}
-void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
+void RunU32BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
Uint32BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t> r(execution_tier);
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
r.builder().SetHasSharedMemory();
@@ -74,28 +74,28 @@ void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
}
WASM_EXEC_TEST(I64AtomicAdd32U) {
- RunU32BinOp(execution_mode, kExprI64AtomicAdd32U, Add);
+ RunU32BinOp(execution_tier, kExprI64AtomicAdd32U, Add);
}
WASM_EXEC_TEST(I64AtomicSub32U) {
- RunU32BinOp(execution_mode, kExprI64AtomicSub32U, Sub);
+ RunU32BinOp(execution_tier, kExprI64AtomicSub32U, Sub);
}
WASM_EXEC_TEST(I64AtomicAnd32U) {
- RunU32BinOp(execution_mode, kExprI64AtomicAnd32U, And);
+ RunU32BinOp(execution_tier, kExprI64AtomicAnd32U, And);
}
WASM_EXEC_TEST(I64AtomicOr32U) {
- RunU32BinOp(execution_mode, kExprI64AtomicOr32U, Or);
+ RunU32BinOp(execution_tier, kExprI64AtomicOr32U, Or);
}
WASM_EXEC_TEST(I64AtomicXor32U) {
- RunU32BinOp(execution_mode, kExprI64AtomicXor32U, Xor);
+ RunU32BinOp(execution_tier, kExprI64AtomicXor32U, Xor);
}
WASM_EXEC_TEST(I64AtomicExchange32U) {
- RunU32BinOp(execution_mode, kExprI64AtomicExchange32U, Exchange);
+ RunU32BinOp(execution_tier, kExprI64AtomicExchange32U, Exchange);
}
-void RunU16BinOp(WasmExecutionMode mode, WasmOpcode wasm_op,
+void RunU16BinOp(ExecutionTier tier, WasmOpcode wasm_op,
Uint16BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint64_t, uint64_t> r(mode);
+ WasmRunner<uint64_t, uint64_t> r(tier);
r.builder().SetHasSharedMemory();
uint16_t* memory =
r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
@@ -115,28 +115,28 @@ void RunU16BinOp(WasmExecutionMode mode, WasmOpcode wasm_op,
}
WASM_EXEC_TEST(I64AtomicAdd16U) {
- RunU16BinOp(execution_mode, kExprI64AtomicAdd16U, Add);
+ RunU16BinOp(execution_tier, kExprI64AtomicAdd16U, Add);
}
WASM_EXEC_TEST(I64AtomicSub16U) {
- RunU16BinOp(execution_mode, kExprI64AtomicSub16U, Sub);
+ RunU16BinOp(execution_tier, kExprI64AtomicSub16U, Sub);
}
WASM_EXEC_TEST(I64AtomicAnd16U) {
- RunU16BinOp(execution_mode, kExprI64AtomicAnd16U, And);
+ RunU16BinOp(execution_tier, kExprI64AtomicAnd16U, And);
}
WASM_EXEC_TEST(I64AtomicOr16U) {
- RunU16BinOp(execution_mode, kExprI64AtomicOr16U, Or);
+ RunU16BinOp(execution_tier, kExprI64AtomicOr16U, Or);
}
WASM_EXEC_TEST(I64AtomicXor16U) {
- RunU16BinOp(execution_mode, kExprI64AtomicXor16U, Xor);
+ RunU16BinOp(execution_tier, kExprI64AtomicXor16U, Xor);
}
WASM_EXEC_TEST(I64AtomicExchange16U) {
- RunU16BinOp(execution_mode, kExprI64AtomicExchange16U, Exchange);
+ RunU16BinOp(execution_tier, kExprI64AtomicExchange16U, Exchange);
}
-void RunU8BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
+void RunU8BinOp(ExecutionTier execution_tier, WasmOpcode wasm_op,
Uint8BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
@@ -155,27 +155,27 @@ void RunU8BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
}
WASM_EXEC_TEST(I64AtomicAdd8U) {
- RunU8BinOp(execution_mode, kExprI64AtomicAdd8U, Add);
+ RunU8BinOp(execution_tier, kExprI64AtomicAdd8U, Add);
}
WASM_EXEC_TEST(I64AtomicSub8U) {
- RunU8BinOp(execution_mode, kExprI64AtomicSub8U, Sub);
+ RunU8BinOp(execution_tier, kExprI64AtomicSub8U, Sub);
}
WASM_EXEC_TEST(I64AtomicAnd8U) {
- RunU8BinOp(execution_mode, kExprI64AtomicAnd8U, And);
+ RunU8BinOp(execution_tier, kExprI64AtomicAnd8U, And);
}
WASM_EXEC_TEST(I64AtomicOr8U) {
- RunU8BinOp(execution_mode, kExprI64AtomicOr8U, Or);
+ RunU8BinOp(execution_tier, kExprI64AtomicOr8U, Or);
}
WASM_EXEC_TEST(I64AtomicXor8U) {
- RunU8BinOp(execution_mode, kExprI64AtomicXor8U, Xor);
+ RunU8BinOp(execution_tier, kExprI64AtomicXor8U, Xor);
}
WASM_EXEC_TEST(I64AtomicExchange8U) {
- RunU8BinOp(execution_mode, kExprI64AtomicExchange8U, Exchange);
+ RunU8BinOp(execution_tier, kExprI64AtomicExchange8U, Exchange);
}
WASM_EXEC_TEST(I64AtomicCompareExchange) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint64_t* memory =
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
@@ -196,7 +196,7 @@ WASM_EXEC_TEST(I64AtomicCompareExchange) {
WASM_EXEC_TEST(I64AtomicCompareExchange32U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
@@ -218,7 +218,7 @@ WASM_EXEC_TEST(I64AtomicCompareExchange32U) {
WASM_EXEC_TEST(I64AtomicCompareExchange16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint16_t* memory =
r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
@@ -240,7 +240,7 @@ WASM_EXEC_TEST(I64AtomicCompareExchange16U) {
WASM_EXEC_TEST(I32AtomicCompareExchange8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t, uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
BUILD(r,
@@ -260,7 +260,7 @@ WASM_EXEC_TEST(I32AtomicCompareExchange8U) {
WASM_EXEC_TEST(I64AtomicLoad) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint64_t> r(execution_mode);
+ WasmRunner<uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint64_t* memory =
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
@@ -276,7 +276,7 @@ WASM_EXEC_TEST(I64AtomicLoad) {
WASM_EXEC_TEST(I64AtomicLoad32U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint64_t> r(execution_mode);
+ WasmRunner<uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
@@ -292,7 +292,7 @@ WASM_EXEC_TEST(I64AtomicLoad32U) {
WASM_EXEC_TEST(I64AtomicLoad16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint64_t> r(execution_mode);
+ WasmRunner<uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint16_t* memory =
r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
@@ -308,7 +308,7 @@ WASM_EXEC_TEST(I64AtomicLoad16U) {
WASM_EXEC_TEST(I64AtomicLoad8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint64_t> r(execution_mode);
+ WasmRunner<uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
BUILD(r, WASM_ATOMICS_LOAD_OP(kExprI64AtomicLoad8U, WASM_ZERO,
@@ -323,7 +323,7 @@ WASM_EXEC_TEST(I64AtomicLoad8U) {
WASM_EXEC_TEST(I64AtomicStoreLoad) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint64_t* memory =
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
@@ -343,7 +343,7 @@ WASM_EXEC_TEST(I64AtomicStoreLoad) {
WASM_EXEC_TEST(I64AtomicStoreLoad32U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
@@ -364,7 +364,7 @@ WASM_EXEC_TEST(I64AtomicStoreLoad32U) {
WASM_EXEC_TEST(I64AtomicStoreLoad16U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint16_t* memory =
r.builder().AddMemoryElems<uint16_t>(kWasmPageSize / sizeof(uint16_t));
@@ -385,7 +385,7 @@ WASM_EXEC_TEST(I64AtomicStoreLoad16U) {
WASM_EXEC_TEST(I64AtomicStoreLoad8U) {
EXPERIMENTAL_FLAG_SCOPE(threads);
- WasmRunner<uint64_t, uint64_t> r(execution_mode);
+ WasmRunner<uint64_t, uint64_t> r(execution_tier);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
index 76ca00cb3b..f788cc84b6 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -22,7 +22,7 @@ namespace wasm {
namespace test_run_wasm_interpreter {
TEST(Run_WasmInt8Const_i) {
- WasmRunner<int32_t> r(kExecuteInterpreter);
+ WasmRunner<int32_t> r(ExecutionTier::kInterpreter);
const byte kExpectedValue = 109;
// return(kExpectedValue)
BUILD(r, WASM_I32V_2(kExpectedValue));
@@ -30,14 +30,14 @@ TEST(Run_WasmInt8Const_i) {
}
TEST(Run_WasmIfElse) {
- WasmRunner<int32_t, int32_t> r(kExecuteInterpreter);
+ WasmRunner<int32_t, int32_t> r(ExecutionTier::kInterpreter);
BUILD(r, WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_I32V_1(9), WASM_I32V_1(10)));
CHECK_EQ(10, r.Call(0));
CHECK_EQ(9, r.Call(1));
}
TEST(Run_WasmIfReturn) {
- WasmRunner<int32_t, int32_t> r(kExecuteInterpreter);
+ WasmRunner<int32_t, int32_t> r(ExecutionTier::kInterpreter);
BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_RETURN1(WASM_I32V_2(77))),
WASM_I32V_2(65));
CHECK_EQ(65, r.Call(0));
@@ -53,7 +53,7 @@ TEST(Run_WasmNopsN) {
code[nops] = kExprI32Const;
code[nops + 1] = expected;
- WasmRunner<int32_t> r(kExecuteInterpreter);
+ WasmRunner<int32_t> r(ExecutionTier::kInterpreter);
r.Build(code, code + nops + 2);
CHECK_EQ(expected, r.Call());
}
@@ -76,7 +76,7 @@ TEST(Run_WasmConstsN) {
}
}
- WasmRunner<int32_t> r(kExecuteInterpreter);
+ WasmRunner<int32_t> r(ExecutionTier::kInterpreter);
r.Build(code, code + (count * 3));
CHECK_EQ(expected, r.Call());
}
@@ -95,7 +95,7 @@ TEST(Run_WasmBlocksN) {
code[2 + nops + 1] = expected;
code[2 + nops + 2] = kExprEnd;
- WasmRunner<int32_t> r(kExecuteInterpreter);
+ WasmRunner<int32_t> r(ExecutionTier::kInterpreter);
r.Build(code, code + nops + kExtra);
CHECK_EQ(expected, r.Call());
}
@@ -120,7 +120,7 @@ TEST(Run_WasmBlockBreakN) {
code[2 + index + 2] = kExprBr;
code[2 + index + 3] = 0;
- WasmRunner<int32_t> r(kExecuteInterpreter);
+ WasmRunner<int32_t> r(ExecutionTier::kInterpreter);
r.Build(code, code + kMaxNops + kExtra);
CHECK_EQ(expected, r.Call());
}
@@ -128,7 +128,7 @@ TEST(Run_WasmBlockBreakN) {
}
TEST(Run_Wasm_nested_ifs_i) {
- WasmRunner<int32_t, int32_t, int32_t> r(kExecuteInterpreter);
+ WasmRunner<int32_t, int32_t, int32_t> r(ExecutionTier::kInterpreter);
BUILD(
r,
@@ -178,7 +178,7 @@ TEST(Breakpoint_I32Add) {
Find(code, sizeof(code), kNumBreakpoints, kExprGetLocal, kExprGetLocal,
kExprI32Add);
- WasmRunner<int32_t, uint32_t, uint32_t> r(kExecuteInterpreter);
+ WasmRunner<int32_t, uint32_t, uint32_t> r(ExecutionTier::kInterpreter);
r.Build(code, code + arraysize(code));
@@ -217,7 +217,7 @@ TEST(Step_I32Mul) {
static const int kTraceLength = 4;
byte code[] = {WASM_I32_MUL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))};
- WasmRunner<int32_t, uint32_t, uint32_t> r(kExecuteInterpreter);
+ WasmRunner<int32_t, uint32_t, uint32_t> r(ExecutionTier::kInterpreter);
r.Build(code, code + arraysize(code));
@@ -255,7 +255,7 @@ TEST(Breakpoint_I32And_disable) {
std::unique_ptr<int[]> offsets =
Find(code, sizeof(code), kNumBreakpoints, kExprI32And);
- WasmRunner<int32_t, uint32_t, uint32_t> r(kExecuteInterpreter);
+ WasmRunner<int32_t, uint32_t, uint32_t> r(ExecutionTier::kInterpreter);
r.Build(code, code + arraysize(code));
@@ -293,14 +293,14 @@ TEST(Breakpoint_I32And_disable) {
TEST(GrowMemory) {
{
- WasmRunner<int32_t, uint32_t> r(kExecuteInterpreter);
+ WasmRunner<int32_t, uint32_t> r(ExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
r.builder().SetMaxMemPages(10);
BUILD(r, WASM_GROW_MEMORY(WASM_GET_LOCAL(0)));
CHECK_EQ(1, r.Call(1));
}
{
- WasmRunner<int32_t, uint32_t> r(kExecuteInterpreter);
+ WasmRunner<int32_t, uint32_t> r(ExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
r.builder().SetMaxMemPages(10);
BUILD(r, WASM_GROW_MEMORY(WASM_GET_LOCAL(0)));
@@ -311,7 +311,7 @@ TEST(GrowMemory) {
TEST(GrowMemoryPreservesData) {
int32_t index = 16;
int32_t value = 2335;
- WasmRunner<int32_t, uint32_t> r(kExecuteInterpreter);
+ WasmRunner<int32_t, uint32_t> r(ExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index),
WASM_I32V(value)),
@@ -322,7 +322,7 @@ TEST(GrowMemoryPreservesData) {
TEST(GrowMemoryInvalidSize) {
// Grow memory by an invalid amount without initial memory.
- WasmRunner<int32_t, uint32_t> r(kExecuteInterpreter);
+ WasmRunner<int32_t, uint32_t> r(ExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_GROW_MEMORY(WASM_GET_LOCAL(0)));
CHECK_EQ(-1, r.Call(1048575));
@@ -330,7 +330,7 @@ TEST(GrowMemoryInvalidSize) {
TEST(TestPossibleNondeterminism) {
{
- WasmRunner<int32_t, float> r(kExecuteInterpreter);
+ WasmRunner<int32_t, float> r(ExecutionTier::kInterpreter);
BUILD(r, WASM_I32_REINTERPRET_F32(WASM_GET_LOCAL(0)));
r.Call(1048575.5f);
CHECK(!r.possible_nondeterminism());
@@ -338,7 +338,7 @@ TEST(TestPossibleNondeterminism) {
CHECK(!r.possible_nondeterminism());
}
{
- WasmRunner<int64_t, double> r(kExecuteInterpreter);
+ WasmRunner<int64_t, double> r(ExecutionTier::kInterpreter);
BUILD(r, WASM_I64_REINTERPRET_F64(WASM_GET_LOCAL(0)));
r.Call(16.0);
CHECK(!r.possible_nondeterminism());
@@ -346,7 +346,7 @@ TEST(TestPossibleNondeterminism) {
CHECK(!r.possible_nondeterminism());
}
{
- WasmRunner<float, float> r(kExecuteInterpreter);
+ WasmRunner<float, float> r(ExecutionTier::kInterpreter);
BUILD(r, WASM_F32_COPYSIGN(WASM_F32(42.0f), WASM_GET_LOCAL(0)));
r.Call(16.0f);
CHECK(!r.possible_nondeterminism());
@@ -354,7 +354,7 @@ TEST(TestPossibleNondeterminism) {
CHECK(!r.possible_nondeterminism());
}
{
- WasmRunner<double, double> r(kExecuteInterpreter);
+ WasmRunner<double, double> r(ExecutionTier::kInterpreter);
BUILD(r, WASM_F64_COPYSIGN(WASM_F64(42.0), WASM_GET_LOCAL(0)));
r.Call(16.0);
CHECK(!r.possible_nondeterminism());
@@ -363,7 +363,7 @@ TEST(TestPossibleNondeterminism) {
}
{
int32_t index = 16;
- WasmRunner<int32_t, float> r(kExecuteInterpreter);
+ WasmRunner<int32_t, float> r(ExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_STORE_MEM(MachineType::Float32(), WASM_I32V(index),
WASM_GET_LOCAL(0)),
@@ -375,7 +375,7 @@ TEST(TestPossibleNondeterminism) {
}
{
int32_t index = 16;
- WasmRunner<int32_t, double> r(kExecuteInterpreter);
+ WasmRunner<int32_t, double> r(ExecutionTier::kInterpreter);
r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_STORE_MEM(MachineType::Float64(), WASM_I32V(index),
WASM_GET_LOCAL(0)),
@@ -386,7 +386,7 @@ TEST(TestPossibleNondeterminism) {
CHECK(!r.possible_nondeterminism());
}
{
- WasmRunner<float, float> r(kExecuteInterpreter);
+ WasmRunner<float, float> r(ExecutionTier::kInterpreter);
BUILD(r, WASM_F32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
r.Call(1048575.5f);
CHECK(!r.possible_nondeterminism());
@@ -394,7 +394,7 @@ TEST(TestPossibleNondeterminism) {
CHECK(r.possible_nondeterminism());
}
{
- WasmRunner<double, double> r(kExecuteInterpreter);
+ WasmRunner<double, double> r(ExecutionTier::kInterpreter);
BUILD(r, WASM_F64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
r.Call(16.0);
CHECK(!r.possible_nondeterminism());
@@ -402,7 +402,7 @@ TEST(TestPossibleNondeterminism) {
CHECK(r.possible_nondeterminism());
}
{
- WasmRunner<int32_t, float> r(kExecuteInterpreter);
+ WasmRunner<int32_t, float> r(ExecutionTier::kInterpreter);
BUILD(r, WASM_F32_EQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
r.Call(16.0);
CHECK(!r.possible_nondeterminism());
@@ -410,7 +410,7 @@ TEST(TestPossibleNondeterminism) {
CHECK(!r.possible_nondeterminism());
}
{
- WasmRunner<int32_t, double> r(kExecuteInterpreter);
+ WasmRunner<int32_t, double> r(ExecutionTier::kInterpreter);
BUILD(r, WASM_F64_EQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
r.Call(16.0);
CHECK(!r.possible_nondeterminism());
@@ -418,7 +418,7 @@ TEST(TestPossibleNondeterminism) {
CHECK(!r.possible_nondeterminism());
}
{
- WasmRunner<float, float> r(kExecuteInterpreter);
+ WasmRunner<float, float> r(ExecutionTier::kInterpreter);
BUILD(r, WASM_F32_MIN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
r.Call(1048575.5f);
CHECK(!r.possible_nondeterminism());
@@ -426,7 +426,7 @@ TEST(TestPossibleNondeterminism) {
CHECK(r.possible_nondeterminism());
}
{
- WasmRunner<double, double> r(kExecuteInterpreter);
+ WasmRunner<double, double> r(ExecutionTier::kInterpreter);
BUILD(r, WASM_F64_MAX(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
r.Call(16.0);
CHECK(!r.possible_nondeterminism());
@@ -436,7 +436,7 @@ TEST(TestPossibleNondeterminism) {
}
TEST(WasmInterpreterActivations) {
- WasmRunner<void> r(kExecuteInterpreter);
+ WasmRunner<void> r(ExecutionTier::kInterpreter);
Isolate* isolate = r.main_isolate();
BUILD(r, WASM_NOP);
@@ -466,7 +466,7 @@ TEST(WasmInterpreterActivations) {
}
TEST(InterpreterLoadWithoutMemory) {
- WasmRunner<int32_t, int32_t> r(kExecuteInterpreter);
+ WasmRunner<int32_t, int32_t> r(ExecutionTier::kInterpreter);
r.builder().AddMemory(0);
BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(0)));
CHECK_TRAP32(r.Call(0));
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
index 2b62119d25..b9de081c9f 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
@@ -7,7 +7,7 @@
#include <stdlib.h>
#include <string.h>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/assembler-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
@@ -96,7 +96,7 @@ void EXPECT_CALL(double expected, Handle<JSFunction> jsfunc, double a,
} // namespace
WASM_EXEC_TEST(Run_Int32Sub_jswrapped) {
- WasmRunner<int, int, int> r(execution_mode);
+ WasmRunner<int, int, int> r(execution_tier);
BUILD(r, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
Handle<JSFunction> jsfunc = r.builder().WrapCode(r.function()->func_index);
@@ -105,7 +105,7 @@ WASM_EXEC_TEST(Run_Int32Sub_jswrapped) {
}
WASM_EXEC_TEST(Run_Float32Div_jswrapped) {
- WasmRunner<float, float, float> r(execution_mode);
+ WasmRunner<float, float, float> r(execution_tier);
BUILD(r, WASM_F32_DIV(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
Handle<JSFunction> jsfunc = r.builder().WrapCode(r.function()->func_index);
@@ -114,7 +114,7 @@ WASM_EXEC_TEST(Run_Float32Div_jswrapped) {
}
WASM_EXEC_TEST(Run_Float64Add_jswrapped) {
- WasmRunner<double, double, double> r(execution_mode);
+ WasmRunner<double, double, double> r(execution_tier);
BUILD(r, WASM_F64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
Handle<JSFunction> jsfunc = r.builder().WrapCode(r.function()->func_index);
@@ -123,7 +123,7 @@ WASM_EXEC_TEST(Run_Float64Add_jswrapped) {
}
WASM_EXEC_TEST(Run_I32Popcount_jswrapped) {
- WasmRunner<int, int> r(execution_mode);
+ WasmRunner<int, int> r(execution_tier);
BUILD(r, WASM_I32_POPCNT(WASM_GET_LOCAL(0)));
Handle<JSFunction> jsfunc = r.builder().WrapCode(r.function()->func_index);
@@ -140,7 +140,7 @@ WASM_EXEC_TEST(Run_CallJS_Add_jswrapped) {
Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CompileRun(source))));
ManuallyImportedJSFunction import = {sigs.i_i(), js_function};
- WasmRunner<int, int> r(execution_mode, &import);
+ WasmRunner<int, int> r(execution_tier, &import);
uint32_t js_index = 0;
WasmFunctionCompiler& t = r.NewFunction(sigs.i_i());
@@ -153,7 +153,7 @@ WASM_EXEC_TEST(Run_CallJS_Add_jswrapped) {
EXPECT_CALL(-666666801, jsfunc, -666666900, -1);
}
-void RunJSSelectTest(WasmExecutionMode mode, int which) {
+void RunJSSelectTest(ExecutionTier tier, int which) {
const int kMaxParams = 8;
PredictableInputValues inputs(0x100);
ValueType type = kWasmF64;
@@ -164,7 +164,7 @@ void RunJSSelectTest(WasmExecutionMode mode, int which) {
FunctionSig sig(1, num_params, types);
ManuallyImportedJSFunction import = CreateJSSelector(&sig, which);
- WasmRunner<void> r(mode, &import);
+ WasmRunner<void> r(tier, &import);
uint32_t js_index = 0;
WasmFunctionCompiler& t = r.NewFunction(&sig);
@@ -191,45 +191,45 @@ void RunJSSelectTest(WasmExecutionMode mode, int which) {
WASM_EXEC_TEST(Run_JSSelect_0) {
CcTest::InitializeVM();
- RunJSSelectTest(execution_mode, 0);
+ RunJSSelectTest(execution_tier, 0);
}
WASM_EXEC_TEST(Run_JSSelect_1) {
CcTest::InitializeVM();
- RunJSSelectTest(execution_mode, 1);
+ RunJSSelectTest(execution_tier, 1);
}
WASM_EXEC_TEST(Run_JSSelect_2) {
CcTest::InitializeVM();
- RunJSSelectTest(execution_mode, 2);
+ RunJSSelectTest(execution_tier, 2);
}
WASM_EXEC_TEST(Run_JSSelect_3) {
CcTest::InitializeVM();
- RunJSSelectTest(execution_mode, 3);
+ RunJSSelectTest(execution_tier, 3);
}
WASM_EXEC_TEST(Run_JSSelect_4) {
CcTest::InitializeVM();
- RunJSSelectTest(execution_mode, 4);
+ RunJSSelectTest(execution_tier, 4);
}
WASM_EXEC_TEST(Run_JSSelect_5) {
CcTest::InitializeVM();
- RunJSSelectTest(execution_mode, 5);
+ RunJSSelectTest(execution_tier, 5);
}
WASM_EXEC_TEST(Run_JSSelect_6) {
CcTest::InitializeVM();
- RunJSSelectTest(execution_mode, 6);
+ RunJSSelectTest(execution_tier, 6);
}
WASM_EXEC_TEST(Run_JSSelect_7) {
CcTest::InitializeVM();
- RunJSSelectTest(execution_mode, 7);
+ RunJSSelectTest(execution_tier, 7);
}
-void RunWASMSelectTest(WasmExecutionMode mode, int which) {
+void RunWASMSelectTest(ExecutionTier tier, int which) {
PredictableInputValues inputs(0x200);
Isolate* isolate = CcTest::InitIsolateOnce();
const int kMaxParams = 8;
@@ -239,7 +239,7 @@ void RunWASMSelectTest(WasmExecutionMode mode, int which) {
type, type, type, type};
FunctionSig sig(1, num_params, types);
- WasmRunner<void> r(mode);
+ WasmRunner<void> r(tier);
WasmFunctionCompiler& t = r.NewFunction(&sig);
BUILD(t, WASM_GET_LOCAL(which));
Handle<JSFunction> jsfunc = r.builder().WrapCode(t.function_index());
@@ -262,46 +262,45 @@ void RunWASMSelectTest(WasmExecutionMode mode, int which) {
WASM_EXEC_TEST(Run_WASMSelect_0) {
CcTest::InitializeVM();
- RunWASMSelectTest(execution_mode, 0);
+ RunWASMSelectTest(execution_tier, 0);
}
WASM_EXEC_TEST(Run_WASMSelect_1) {
CcTest::InitializeVM();
- RunWASMSelectTest(execution_mode, 1);
+ RunWASMSelectTest(execution_tier, 1);
}
WASM_EXEC_TEST(Run_WASMSelect_2) {
CcTest::InitializeVM();
- RunWASMSelectTest(execution_mode, 2);
+ RunWASMSelectTest(execution_tier, 2);
}
WASM_EXEC_TEST(Run_WASMSelect_3) {
CcTest::InitializeVM();
- RunWASMSelectTest(execution_mode, 3);
+ RunWASMSelectTest(execution_tier, 3);
}
WASM_EXEC_TEST(Run_WASMSelect_4) {
CcTest::InitializeVM();
- RunWASMSelectTest(execution_mode, 4);
+ RunWASMSelectTest(execution_tier, 4);
}
WASM_EXEC_TEST(Run_WASMSelect_5) {
CcTest::InitializeVM();
- RunWASMSelectTest(execution_mode, 5);
+ RunWASMSelectTest(execution_tier, 5);
}
WASM_EXEC_TEST(Run_WASMSelect_6) {
CcTest::InitializeVM();
- RunWASMSelectTest(execution_mode, 6);
+ RunWASMSelectTest(execution_tier, 6);
}
WASM_EXEC_TEST(Run_WASMSelect_7) {
CcTest::InitializeVM();
- RunWASMSelectTest(execution_mode, 7);
+ RunWASMSelectTest(execution_tier, 7);
}
-void RunWASMSelectAlignTest(WasmExecutionMode mode, int num_args,
- int num_params) {
+void RunWASMSelectAlignTest(ExecutionTier tier, int num_args, int num_params) {
PredictableInputValues inputs(0x300);
Isolate* isolate = CcTest::InitIsolateOnce();
const int kMaxParams = 10;
@@ -312,7 +311,7 @@ void RunWASMSelectAlignTest(WasmExecutionMode mode, int num_args,
FunctionSig sig(1, num_params, types);
for (int which = 0; which < num_params; which++) {
- WasmRunner<void> r(mode);
+ WasmRunner<void> r(tier);
WasmFunctionCompiler& t = r.NewFunction(&sig);
BUILD(t, WASM_GET_LOCAL(which));
Handle<JSFunction> jsfunc = r.builder().WrapCode(t.function_index());
@@ -336,67 +335,66 @@ void RunWASMSelectAlignTest(WasmExecutionMode mode, int num_args,
WASM_EXEC_TEST(Run_WASMSelectAlign_0) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(execution_mode, 0, 1);
- RunWASMSelectAlignTest(execution_mode, 0, 2);
+ RunWASMSelectAlignTest(execution_tier, 0, 1);
+ RunWASMSelectAlignTest(execution_tier, 0, 2);
}
WASM_EXEC_TEST(Run_WASMSelectAlign_1) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(execution_mode, 1, 2);
- RunWASMSelectAlignTest(execution_mode, 1, 3);
+ RunWASMSelectAlignTest(execution_tier, 1, 2);
+ RunWASMSelectAlignTest(execution_tier, 1, 3);
}
WASM_EXEC_TEST(Run_WASMSelectAlign_2) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(execution_mode, 2, 3);
- RunWASMSelectAlignTest(execution_mode, 2, 4);
+ RunWASMSelectAlignTest(execution_tier, 2, 3);
+ RunWASMSelectAlignTest(execution_tier, 2, 4);
}
WASM_EXEC_TEST(Run_WASMSelectAlign_3) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(execution_mode, 3, 3);
- RunWASMSelectAlignTest(execution_mode, 3, 4);
+ RunWASMSelectAlignTest(execution_tier, 3, 3);
+ RunWASMSelectAlignTest(execution_tier, 3, 4);
}
WASM_EXEC_TEST(Run_WASMSelectAlign_4) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(execution_mode, 4, 3);
- RunWASMSelectAlignTest(execution_mode, 4, 4);
+ RunWASMSelectAlignTest(execution_tier, 4, 3);
+ RunWASMSelectAlignTest(execution_tier, 4, 4);
}
WASM_EXEC_TEST(Run_WASMSelectAlign_7) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(execution_mode, 7, 5);
- RunWASMSelectAlignTest(execution_mode, 7, 6);
- RunWASMSelectAlignTest(execution_mode, 7, 7);
+ RunWASMSelectAlignTest(execution_tier, 7, 5);
+ RunWASMSelectAlignTest(execution_tier, 7, 6);
+ RunWASMSelectAlignTest(execution_tier, 7, 7);
}
WASM_EXEC_TEST(Run_WASMSelectAlign_8) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(execution_mode, 8, 5);
- RunWASMSelectAlignTest(execution_mode, 8, 6);
- RunWASMSelectAlignTest(execution_mode, 8, 7);
- RunWASMSelectAlignTest(execution_mode, 8, 8);
+ RunWASMSelectAlignTest(execution_tier, 8, 5);
+ RunWASMSelectAlignTest(execution_tier, 8, 6);
+ RunWASMSelectAlignTest(execution_tier, 8, 7);
+ RunWASMSelectAlignTest(execution_tier, 8, 8);
}
WASM_EXEC_TEST(Run_WASMSelectAlign_9) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(execution_mode, 9, 6);
- RunWASMSelectAlignTest(execution_mode, 9, 7);
- RunWASMSelectAlignTest(execution_mode, 9, 8);
- RunWASMSelectAlignTest(execution_mode, 9, 9);
+ RunWASMSelectAlignTest(execution_tier, 9, 6);
+ RunWASMSelectAlignTest(execution_tier, 9, 7);
+ RunWASMSelectAlignTest(execution_tier, 9, 8);
+ RunWASMSelectAlignTest(execution_tier, 9, 9);
}
WASM_EXEC_TEST(Run_WASMSelectAlign_10) {
CcTest::InitializeVM();
- RunWASMSelectAlignTest(execution_mode, 10, 7);
- RunWASMSelectAlignTest(execution_mode, 10, 8);
- RunWASMSelectAlignTest(execution_mode, 10, 9);
- RunWASMSelectAlignTest(execution_mode, 10, 10);
+ RunWASMSelectAlignTest(execution_tier, 10, 7);
+ RunWASMSelectAlignTest(execution_tier, 10, 8);
+ RunWASMSelectAlignTest(execution_tier, 10, 9);
+ RunWASMSelectAlignTest(execution_tier, 10, 10);
}
-void RunJSSelectAlignTest(WasmExecutionMode mode, int num_args,
- int num_params) {
+void RunJSSelectAlignTest(ExecutionTier tier, int num_args, int num_params) {
PredictableInputValues inputs(0x400);
Isolate* isolate = CcTest::InitIsolateOnce();
Factory* factory = isolate->factory();
@@ -427,7 +425,7 @@ void RunJSSelectAlignTest(WasmExecutionMode mode, int num_args,
for (int which = 0; which < num_params; which++) {
HandleScope scope(isolate);
ManuallyImportedJSFunction import = CreateJSSelector(&sig, which);
- WasmRunner<void> r(mode, &import);
+ WasmRunner<void> r(tier, &import);
WasmFunctionCompiler& t = r.NewFunction(&sig);
t.Build(&code[0], &code[end]);
@@ -454,64 +452,64 @@ void RunJSSelectAlignTest(WasmExecutionMode mode, int num_args,
WASM_EXEC_TEST(Run_JSSelectAlign_0) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(execution_mode, 0, 1);
- RunJSSelectAlignTest(execution_mode, 0, 2);
+ RunJSSelectAlignTest(execution_tier, 0, 1);
+ RunJSSelectAlignTest(execution_tier, 0, 2);
}
WASM_EXEC_TEST(Run_JSSelectAlign_1) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(execution_mode, 1, 2);
- RunJSSelectAlignTest(execution_mode, 1, 3);
+ RunJSSelectAlignTest(execution_tier, 1, 2);
+ RunJSSelectAlignTest(execution_tier, 1, 3);
}
WASM_EXEC_TEST(Run_JSSelectAlign_2) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(execution_mode, 2, 3);
- RunJSSelectAlignTest(execution_mode, 2, 4);
+ RunJSSelectAlignTest(execution_tier, 2, 3);
+ RunJSSelectAlignTest(execution_tier, 2, 4);
}
WASM_EXEC_TEST(Run_JSSelectAlign_3) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(execution_mode, 3, 3);
- RunJSSelectAlignTest(execution_mode, 3, 4);
+ RunJSSelectAlignTest(execution_tier, 3, 3);
+ RunJSSelectAlignTest(execution_tier, 3, 4);
}
WASM_EXEC_TEST(Run_JSSelectAlign_4) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(execution_mode, 4, 3);
- RunJSSelectAlignTest(execution_mode, 4, 4);
+ RunJSSelectAlignTest(execution_tier, 4, 3);
+ RunJSSelectAlignTest(execution_tier, 4, 4);
}
WASM_EXEC_TEST(Run_JSSelectAlign_7) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(execution_mode, 7, 3);
- RunJSSelectAlignTest(execution_mode, 7, 4);
- RunJSSelectAlignTest(execution_mode, 7, 4);
- RunJSSelectAlignTest(execution_mode, 7, 4);
+ RunJSSelectAlignTest(execution_tier, 7, 3);
+ RunJSSelectAlignTest(execution_tier, 7, 4);
+ RunJSSelectAlignTest(execution_tier, 7, 4);
+ RunJSSelectAlignTest(execution_tier, 7, 4);
}
WASM_EXEC_TEST(Run_JSSelectAlign_8) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(execution_mode, 8, 5);
- RunJSSelectAlignTest(execution_mode, 8, 6);
- RunJSSelectAlignTest(execution_mode, 8, 7);
- RunJSSelectAlignTest(execution_mode, 8, 8);
+ RunJSSelectAlignTest(execution_tier, 8, 5);
+ RunJSSelectAlignTest(execution_tier, 8, 6);
+ RunJSSelectAlignTest(execution_tier, 8, 7);
+ RunJSSelectAlignTest(execution_tier, 8, 8);
}
WASM_EXEC_TEST(Run_JSSelectAlign_9) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(execution_mode, 9, 6);
- RunJSSelectAlignTest(execution_mode, 9, 7);
- RunJSSelectAlignTest(execution_mode, 9, 8);
- RunJSSelectAlignTest(execution_mode, 9, 9);
+ RunJSSelectAlignTest(execution_tier, 9, 6);
+ RunJSSelectAlignTest(execution_tier, 9, 7);
+ RunJSSelectAlignTest(execution_tier, 9, 8);
+ RunJSSelectAlignTest(execution_tier, 9, 9);
}
WASM_EXEC_TEST(Run_JSSelectAlign_10) {
CcTest::InitializeVM();
- RunJSSelectAlignTest(execution_mode, 10, 7);
- RunJSSelectAlignTest(execution_mode, 10, 8);
- RunJSSelectAlignTest(execution_mode, 10, 9);
- RunJSSelectAlignTest(execution_mode, 10, 10);
+ RunJSSelectAlignTest(execution_tier, 10, 7);
+ RunJSSelectAlignTest(execution_tier, 10, 8);
+ RunJSSelectAlignTest(execution_tier, 10, 9);
+ RunJSSelectAlignTest(execution_tier, 10, 10);
}
#undef ADD_CODE
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index bbceec73a3..d25aeafa33 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -5,7 +5,7 @@
#include <stdlib.h>
#include <string.h>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/objects-inl.h"
#include "src/snapshot/code-serializer.h"
#include "src/version.h"
@@ -886,9 +886,11 @@ TEST(AtomicOpDisassembly) {
testing::SetupIsolateForWasmModule(isolate);
ErrorThrower thrower(isolate, "Test");
+ auto enabled_features = WasmFeaturesFromIsolate(isolate);
MaybeHandle<WasmModuleObject> module_object =
isolate->wasm_engine()->SyncCompile(
- isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()));
+ isolate, enabled_features, &thrower,
+ ModuleWireBytes(buffer.begin(), buffer.end()));
module_object.ToHandleChecked()->DisassembleFunction(0);
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-sign-extension.cc b/deps/v8/test/cctest/wasm/test-run-wasm-sign-extension.cc
index 17a79bc27f..71b5285439 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-sign-extension.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-sign-extension.cc
@@ -13,7 +13,7 @@ namespace wasm {
// TODO(gdeepti): Enable tests to run in the interpreter.
WASM_EXEC_TEST(I32SExtendI8) {
EXPERIMENTAL_FLAG_SCOPE(se);
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_I32_SIGN_EXT_I8(WASM_GET_LOCAL(0)));
CHECK_EQ(0, r.Call(0));
CHECK_EQ(1, r.Call(1));
@@ -24,7 +24,7 @@ WASM_EXEC_TEST(I32SExtendI8) {
WASM_EXEC_TEST(I32SExtendI16) {
EXPERIMENTAL_FLAG_SCOPE(se);
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_I32_SIGN_EXT_I16(WASM_GET_LOCAL(0)));
CHECK_EQ(0, r.Call(0));
CHECK_EQ(1, r.Call(1));
@@ -35,7 +35,7 @@ WASM_EXEC_TEST(I32SExtendI16) {
WASM_EXEC_TEST(I64SExtendI8) {
EXPERIMENTAL_FLAG_SCOPE(se);
- WasmRunner<int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SIGN_EXT_I8(WASM_GET_LOCAL(0)));
CHECK_EQ(0, r.Call(0));
CHECK_EQ(1, r.Call(1));
@@ -46,7 +46,7 @@ WASM_EXEC_TEST(I64SExtendI8) {
WASM_EXEC_TEST(I64SExtendI16) {
EXPERIMENTAL_FLAG_SCOPE(se);
- WasmRunner<int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SIGN_EXT_I16(WASM_GET_LOCAL(0)));
CHECK_EQ(0, r.Call(0));
CHECK_EQ(1, r.Call(1));
@@ -57,7 +57,7 @@ WASM_EXEC_TEST(I64SExtendI16) {
WASM_EXEC_TEST(I64SExtendI32) {
EXPERIMENTAL_FLAG_SCOPE(se);
- WasmRunner<int64_t, int64_t> r(execution_mode);
+ WasmRunner<int64_t, int64_t> r(execution_tier);
BUILD(r, WASM_I64_SIGN_EXT_I32(WASM_GET_LOCAL(0)));
CHECK_EQ(0, r.Call(0));
CHECK_EQ(1, r.Call(1));
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index 271ce341fe..f60c65b727 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -32,23 +32,22 @@ typedef int8_t (*Int8BinOp)(int8_t, int8_t);
typedef int (*Int8CompareOp)(int8_t, int8_t);
typedef int8_t (*Int8ShiftOp)(int8_t, int);
-#define WASM_SIMD_TEST(name) \
- void RunWasm_##name##_Impl(LowerSimd lower_simd, \
- WasmExecutionMode execution_mode); \
- TEST(RunWasm_##name##_turbofan) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kNoLowerSimd, kExecuteTurbofan); \
- } \
- TEST(RunWasm_##name##_interpreter) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kNoLowerSimd, kExecuteInterpreter); \
- } \
- TEST(RunWasm_##name##_simd_lowered) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kLowerSimd, kExecuteTurbofan); \
- } \
- void RunWasm_##name##_Impl(LowerSimd lower_simd, \
- WasmExecutionMode execution_mode)
+#define WASM_SIMD_TEST(name) \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd, \
+ ExecutionTier execution_tier); \
+ TEST(RunWasm_##name##_turbofan) { \
+ EXPERIMENTAL_FLAG_SCOPE(simd); \
+ RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kOptimized); \
+ } \
+ TEST(RunWasm_##name##_interpreter) { \
+ EXPERIMENTAL_FLAG_SCOPE(simd); \
+ RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kInterpreter); \
+ } \
+ TEST(RunWasm_##name##_simd_lowered) { \
+ EXPERIMENTAL_FLAG_SCOPE(simd); \
+ RunWasm_##name##_Impl(kLowerSimd, ExecutionTier::kOptimized); \
+ } \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd, ExecutionTier execution_tier)
// Generic expected value functions.
template <typename T>
@@ -400,7 +399,7 @@ bool SkipFPValue(float x) {
bool SkipFPExpectedValue(float x) { return std::isnan(x) || SkipFPValue(x); }
WASM_SIMD_TEST(F32x4Splat) {
- WasmRunner<int32_t, float> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, float> r(execution_tier, lower_simd);
byte lane_val = 0;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r,
@@ -414,7 +413,7 @@ WASM_SIMD_TEST(F32x4Splat) {
}
WASM_SIMD_TEST(F32x4ReplaceLane) {
- WasmRunner<int32_t, float, float> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
byte old_val = 0;
byte new_val = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -443,7 +442,7 @@ WASM_SIMD_TEST(F32x4ReplaceLane) {
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
// Tests both signed and unsigned conversion.
WASM_SIMD_TEST(F32x4ConvertI32x4) {
- WasmRunner<int32_t, int32_t, float, float> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t, float, float> r(execution_tier, lower_simd);
byte a = 0;
byte expected_signed = 1;
byte expected_unsigned = 2;
@@ -467,10 +466,10 @@ WASM_SIMD_TEST(F32x4ConvertI32x4) {
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
-void RunF32x4UnOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
+void RunF32x4UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op, FloatUnOp expected_op,
float error = 0.0f) {
- WasmRunner<int32_t, float, float, float> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
byte a = 0;
byte low = 1;
byte high = 2;
@@ -490,27 +489,27 @@ void RunF32x4UnOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
}
WASM_SIMD_TEST(F32x4Abs) {
- RunF32x4UnOpTest(execution_mode, lower_simd, kExprF32x4Abs, std::abs);
+ RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Abs, std::abs);
}
WASM_SIMD_TEST(F32x4Neg) {
- RunF32x4UnOpTest(execution_mode, lower_simd, kExprF32x4Neg, Negate);
+ RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Neg, Negate);
}
static const float kApproxError = 0.01f;
WASM_SIMD_TEST(F32x4RecipApprox) {
- RunF32x4UnOpTest(execution_mode, lower_simd, kExprF32x4RecipApprox, Recip,
+ RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipApprox, Recip,
kApproxError);
}
WASM_SIMD_TEST(F32x4RecipSqrtApprox) {
- RunF32x4UnOpTest(execution_mode, lower_simd, kExprF32x4RecipSqrtApprox,
+ RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipSqrtApprox,
RecipSqrt, kApproxError);
}
-void RunF32x4BinOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
+void RunF32x4BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op, FloatBinOp expected_op) {
- WasmRunner<int32_t, float, float, float> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -534,25 +533,24 @@ void RunF32x4BinOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
}
WASM_SIMD_TEST(F32x4Add) {
- RunF32x4BinOpTest(execution_mode, lower_simd, kExprF32x4Add, Add);
+ RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Add, Add);
}
WASM_SIMD_TEST(F32x4Sub) {
- RunF32x4BinOpTest(execution_mode, lower_simd, kExprF32x4Sub, Sub);
+ RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Sub, Sub);
}
WASM_SIMD_TEST(F32x4Mul) {
- RunF32x4BinOpTest(execution_mode, lower_simd, kExprF32x4Mul, Mul);
+ RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Mul, Mul);
}
WASM_SIMD_TEST(F32x4_Min) {
- RunF32x4BinOpTest(execution_mode, lower_simd, kExprF32x4Min, JSMin);
+ RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Min, JSMin);
}
WASM_SIMD_TEST(F32x4_Max) {
- RunF32x4BinOpTest(execution_mode, lower_simd, kExprF32x4Max, JSMax);
+ RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Max, JSMax);
}
-void RunF32x4CompareOpTest(WasmExecutionMode execution_mode,
- LowerSimd lower_simd, WasmOpcode simd_op,
- FloatCompareOp expected_op) {
- WasmRunner<int32_t, float, float, int32_t> r(execution_mode, lower_simd);
+void RunF32x4CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode simd_op, FloatCompareOp expected_op) {
+ WasmRunner<int32_t, float, float, int32_t> r(execution_tier, lower_simd);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -576,27 +574,27 @@ void RunF32x4CompareOpTest(WasmExecutionMode execution_mode,
}
WASM_SIMD_TEST(F32x4Eq) {
- RunF32x4CompareOpTest(execution_mode, lower_simd, kExprF32x4Eq, Equal);
+ RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Eq, Equal);
}
WASM_SIMD_TEST(F32x4Ne) {
- RunF32x4CompareOpTest(execution_mode, lower_simd, kExprF32x4Ne, NotEqual);
+ RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Ne, NotEqual);
}
WASM_SIMD_TEST(F32x4Gt) {
- RunF32x4CompareOpTest(execution_mode, lower_simd, kExprF32x4Gt, Greater);
+ RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Gt, Greater);
}
WASM_SIMD_TEST(F32x4Ge) {
- RunF32x4CompareOpTest(execution_mode, lower_simd, kExprF32x4Ge, GreaterEqual);
+ RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Ge, GreaterEqual);
}
WASM_SIMD_TEST(F32x4Lt) {
- RunF32x4CompareOpTest(execution_mode, lower_simd, kExprF32x4Lt, Less);
+ RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Lt, Less);
}
WASM_SIMD_TEST(F32x4Le) {
- RunF32x4CompareOpTest(execution_mode, lower_simd, kExprF32x4Le, LessEqual);
+ RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Le, LessEqual);
}
WASM_SIMD_TEST(I32x4Splat) {
@@ -610,7 +608,7 @@ WASM_SIMD_TEST(I32x4Splat) {
// return 0
//
// return 1
- WasmRunner<int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
byte lane_val = 0;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r,
@@ -621,7 +619,7 @@ WASM_SIMD_TEST(I32x4Splat) {
}
WASM_SIMD_TEST(I32x4ReplaceLane) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
byte old_val = 0;
byte new_val = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -647,7 +645,7 @@ WASM_SIMD_TEST(I32x4ReplaceLane) {
}
WASM_SIMD_TEST(I16x8Splat) {
- WasmRunner<int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
byte lane_val = 0;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r,
@@ -658,7 +656,7 @@ WASM_SIMD_TEST(I16x8Splat) {
}
WASM_SIMD_TEST(I16x8ReplaceLane) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
byte old_val = 0;
byte new_val = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -707,7 +705,7 @@ WASM_SIMD_TEST(I16x8ReplaceLane) {
}
WASM_SIMD_TEST(I8x16Splat) {
- WasmRunner<int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
byte lane_val = 0;
byte simd = r.AllocateLocal(kWasmS128);
BUILD(r,
@@ -718,7 +716,7 @@ WASM_SIMD_TEST(I8x16Splat) {
}
WASM_SIMD_TEST(I8x16ReplaceLane) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
byte old_val = 0;
byte new_val = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -839,7 +837,7 @@ int32_t ConvertToInt(double val, bool unsigned_integer) {
// Tests both signed and unsigned conversion.
WASM_SIMD_TEST(I32x4ConvertF32x4) {
- WasmRunner<int32_t, float, int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, float, int32_t, int32_t> r(execution_tier, lower_simd);
byte a = 0;
byte expected_signed = 1;
byte expected_unsigned = 2;
@@ -864,7 +862,7 @@ WASM_SIMD_TEST(I32x4ConvertF32x4) {
// Tests both signed and unsigned conversion from I16x8 (unpacking).
WASM_SIMD_TEST(I32x4ConvertI16x8) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t, int32_t> r(execution_mode,
+ WasmRunner<int32_t, int32_t, int32_t, int32_t, int32_t> r(execution_tier,
lower_simd);
byte a = 0;
byte unpacked_signed = 1;
@@ -905,9 +903,9 @@ WASM_SIMD_TEST(I32x4ConvertI16x8) {
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
-void RunI32x4UnOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
+void RunI32x4UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op, Int32UnOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -919,16 +917,16 @@ void RunI32x4UnOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
}
WASM_SIMD_TEST(I32x4Neg) {
- RunI32x4UnOpTest(execution_mode, lower_simd, kExprI32x4Neg, Negate);
+ RunI32x4UnOpTest(execution_tier, lower_simd, kExprI32x4Neg, Negate);
}
WASM_SIMD_TEST(S128Not) {
- RunI32x4UnOpTest(execution_mode, lower_simd, kExprS128Not, Not);
+ RunI32x4UnOpTest(execution_tier, lower_simd, kExprS128Not, Not);
}
-void RunI32x4BinOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
+void RunI32x4BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op, Int32BinOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -946,51 +944,50 @@ void RunI32x4BinOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
}
WASM_SIMD_TEST(I32x4Add) {
- RunI32x4BinOpTest(execution_mode, lower_simd, kExprI32x4Add, Add);
+ RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Add, Add);
}
WASM_SIMD_TEST(I32x4Sub) {
- RunI32x4BinOpTest(execution_mode, lower_simd, kExprI32x4Sub, Sub);
+ RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Sub, Sub);
}
WASM_SIMD_TEST(I32x4Mul) {
- RunI32x4BinOpTest(execution_mode, lower_simd, kExprI32x4Mul, Mul);
+ RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Mul, Mul);
}
WASM_SIMD_TEST(I32x4MinS) {
- RunI32x4BinOpTest(execution_mode, lower_simd, kExprI32x4MinS, Minimum);
+ RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MinS, Minimum);
}
WASM_SIMD_TEST(I32x4MaxS) {
- RunI32x4BinOpTest(execution_mode, lower_simd, kExprI32x4MaxS, Maximum);
+ RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MaxS, Maximum);
}
WASM_SIMD_TEST(I32x4MinU) {
- RunI32x4BinOpTest(execution_mode, lower_simd, kExprI32x4MinU,
+ RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MinU,
UnsignedMinimum);
}
WASM_SIMD_TEST(I32x4MaxU) {
- RunI32x4BinOpTest(execution_mode, lower_simd, kExprI32x4MaxU,
+ RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MaxU,
UnsignedMaximum);
}
WASM_SIMD_TEST(S128And) {
- RunI32x4BinOpTest(execution_mode, lower_simd, kExprS128And, And);
+ RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128And, And);
}
WASM_SIMD_TEST(S128Or) {
- RunI32x4BinOpTest(execution_mode, lower_simd, kExprS128Or, Or);
+ RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128Or, Or);
}
WASM_SIMD_TEST(S128Xor) {
- RunI32x4BinOpTest(execution_mode, lower_simd, kExprS128Xor, Xor);
+ RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128Xor, Xor);
}
-void RunI32x4CompareOpTest(WasmExecutionMode execution_mode,
- LowerSimd lower_simd, WasmOpcode simd_op,
- Int32CompareOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
+void RunI32x4CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode simd_op, Int32CompareOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -1008,54 +1005,54 @@ void RunI32x4CompareOpTest(WasmExecutionMode execution_mode,
}
WASM_SIMD_TEST(I32x4Eq) {
- RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4Eq, Equal);
+ RunI32x4CompareOpTest(execution_tier, lower_simd, kExprI32x4Eq, Equal);
}
WASM_SIMD_TEST(I32x4Ne) {
- RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4Ne, NotEqual);
+ RunI32x4CompareOpTest(execution_tier, lower_simd, kExprI32x4Ne, NotEqual);
}
WASM_SIMD_TEST(I32x4LtS) {
- RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4LtS, Less);
+ RunI32x4CompareOpTest(execution_tier, lower_simd, kExprI32x4LtS, Less);
}
WASM_SIMD_TEST(I32x4LeS) {
- RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4LeS, LessEqual);
+ RunI32x4CompareOpTest(execution_tier, lower_simd, kExprI32x4LeS, LessEqual);
}
WASM_SIMD_TEST(I32x4GtS) {
- RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4GtS, Greater);
+ RunI32x4CompareOpTest(execution_tier, lower_simd, kExprI32x4GtS, Greater);
}
WASM_SIMD_TEST(I32x4GeS) {
- RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4GeS,
+ RunI32x4CompareOpTest(execution_tier, lower_simd, kExprI32x4GeS,
GreaterEqual);
}
WASM_SIMD_TEST(I32x4LtU) {
- RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4LtU,
+ RunI32x4CompareOpTest(execution_tier, lower_simd, kExprI32x4LtU,
UnsignedLess);
}
WASM_SIMD_TEST(I32x4LeU) {
- RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4LeU,
+ RunI32x4CompareOpTest(execution_tier, lower_simd, kExprI32x4LeU,
UnsignedLessEqual);
}
WASM_SIMD_TEST(I32x4GtU) {
- RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4GtU,
+ RunI32x4CompareOpTest(execution_tier, lower_simd, kExprI32x4GtU,
UnsignedGreater);
}
WASM_SIMD_TEST(I32x4GeU) {
- RunI32x4CompareOpTest(execution_mode, lower_simd, kExprI32x4GeU,
+ RunI32x4CompareOpTest(execution_tier, lower_simd, kExprI32x4GeU,
UnsignedGreaterEqual);
}
-void RunI32x4ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
+void RunI32x4ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op, Int32ShiftOp expected_op) {
for (int shift = 1; shift < 32; ++shift) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -1069,17 +1066,17 @@ void RunI32x4ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
}
WASM_SIMD_TEST(I32x4Shl) {
- RunI32x4ShiftOpTest(execution_mode, lower_simd, kExprI32x4Shl,
+ RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4Shl,
LogicalShiftLeft);
}
WASM_SIMD_TEST(I32x4ShrS) {
- RunI32x4ShiftOpTest(execution_mode, lower_simd, kExprI32x4ShrS,
+ RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4ShrS,
ArithmeticShiftRight);
}
WASM_SIMD_TEST(I32x4ShrU) {
- RunI32x4ShiftOpTest(execution_mode, lower_simd, kExprI32x4ShrU,
+ RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4ShrU,
LogicalShiftRight);
}
@@ -1087,7 +1084,7 @@ WASM_SIMD_TEST(I32x4ShrU) {
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
// Tests both signed and unsigned conversion from I8x16 (unpacking).
WASM_SIMD_TEST(I16x8ConvertI8x16) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t, int32_t> r(execution_mode,
+ WasmRunner<int32_t, int32_t, int32_t, int32_t, int32_t> r(execution_tier,
lower_simd);
byte a = 0;
byte unpacked_signed = 1;
@@ -1130,9 +1127,9 @@ WASM_SIMD_TEST(I16x8ConvertI8x16) {
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
-void RunI16x8UnOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
+void RunI16x8UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op, Int16UnOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -1144,7 +1141,7 @@ void RunI16x8UnOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
}
WASM_SIMD_TEST(I16x8Neg) {
- RunI16x8UnOpTest(execution_mode, lower_simd, kExprI16x8Neg, Negate);
+ RunI16x8UnOpTest(execution_tier, lower_simd, kExprI16x8Neg, Negate);
}
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
@@ -1152,7 +1149,7 @@ WASM_SIMD_TEST(I16x8Neg) {
// Tests both signed and unsigned conversion from I32x4 (packing).
WASM_SIMD_TEST(I16x8ConvertI32x4) {
WasmRunner<int32_t, int32_t, int32_t, int32_t, int32_t, int32_t, int32_t> r(
- execution_mode, lower_simd);
+ execution_tier, lower_simd);
byte a = 0;
byte b = 1;
// indices for packed signed params
@@ -1196,9 +1193,9 @@ WASM_SIMD_TEST(I16x8ConvertI32x4) {
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
-void RunI16x8BinOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
+void RunI16x8BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op, Int16BinOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -1216,59 +1213,58 @@ void RunI16x8BinOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
}
WASM_SIMD_TEST(I16x8Add) {
- RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8Add, Add);
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Add, Add);
}
WASM_SIMD_TEST(I16x8AddSaturateS) {
- RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8AddSaturateS,
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8AddSaturateS,
AddSaturate);
}
WASM_SIMD_TEST(I16x8Sub) {
- RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8Sub, Sub);
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Sub, Sub);
}
WASM_SIMD_TEST(I16x8SubSaturateS) {
- RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8SubSaturateS,
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8SubSaturateS,
SubSaturate);
}
WASM_SIMD_TEST(I16x8Mul) {
- RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8Mul, Mul);
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Mul, Mul);
}
WASM_SIMD_TEST(I16x8MinS) {
- RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8MinS, Minimum);
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MinS, Minimum);
}
WASM_SIMD_TEST(I16x8MaxS) {
- RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8MaxS, Maximum);
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MaxS, Maximum);
}
WASM_SIMD_TEST(I16x8AddSaturateU) {
- RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8AddSaturateU,
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8AddSaturateU,
UnsignedAddSaturate);
}
WASM_SIMD_TEST(I16x8SubSaturateU) {
- RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8SubSaturateU,
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8SubSaturateU,
UnsignedSubSaturate);
}
WASM_SIMD_TEST(I16x8MinU) {
- RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8MinU,
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MinU,
UnsignedMinimum);
}
WASM_SIMD_TEST(I16x8MaxU) {
- RunI16x8BinOpTest(execution_mode, lower_simd, kExprI16x8MaxU,
+ RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MaxU,
UnsignedMaximum);
}
-void RunI16x8CompareOpTest(WasmExecutionMode execution_mode,
- LowerSimd lower_simd, WasmOpcode simd_op,
- Int16CompareOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
+void RunI16x8CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode simd_op, Int16CompareOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -1286,54 +1282,54 @@ void RunI16x8CompareOpTest(WasmExecutionMode execution_mode,
}
WASM_SIMD_TEST(I16x8Eq) {
- RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8Eq, Equal);
+ RunI16x8CompareOpTest(execution_tier, lower_simd, kExprI16x8Eq, Equal);
}
WASM_SIMD_TEST(I16x8Ne) {
- RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8Ne, NotEqual);
+ RunI16x8CompareOpTest(execution_tier, lower_simd, kExprI16x8Ne, NotEqual);
}
WASM_SIMD_TEST(I16x8LtS) {
- RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8LtS, Less);
+ RunI16x8CompareOpTest(execution_tier, lower_simd, kExprI16x8LtS, Less);
}
WASM_SIMD_TEST(I16x8LeS) {
- RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8LeS, LessEqual);
+ RunI16x8CompareOpTest(execution_tier, lower_simd, kExprI16x8LeS, LessEqual);
}
WASM_SIMD_TEST(I16x8GtS) {
- RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8GtS, Greater);
+ RunI16x8CompareOpTest(execution_tier, lower_simd, kExprI16x8GtS, Greater);
}
WASM_SIMD_TEST(I16x8GeS) {
- RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8GeS,
+ RunI16x8CompareOpTest(execution_tier, lower_simd, kExprI16x8GeS,
GreaterEqual);
}
WASM_SIMD_TEST(I16x8GtU) {
- RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8GtU,
+ RunI16x8CompareOpTest(execution_tier, lower_simd, kExprI16x8GtU,
UnsignedGreater);
}
WASM_SIMD_TEST(I16x8GeU) {
- RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8GeU,
+ RunI16x8CompareOpTest(execution_tier, lower_simd, kExprI16x8GeU,
UnsignedGreaterEqual);
}
WASM_SIMD_TEST(I16x8LtU) {
- RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8LtU,
+ RunI16x8CompareOpTest(execution_tier, lower_simd, kExprI16x8LtU,
UnsignedLess);
}
WASM_SIMD_TEST(I16x8LeU) {
- RunI16x8CompareOpTest(execution_mode, lower_simd, kExprI16x8LeU,
+ RunI16x8CompareOpTest(execution_tier, lower_simd, kExprI16x8LeU,
UnsignedLessEqual);
}
-void RunI16x8ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
+void RunI16x8ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op, Int16ShiftOp expected_op) {
for (int shift = 1; shift < 16; ++shift) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -1347,23 +1343,23 @@ void RunI16x8ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
}
WASM_SIMD_TEST(I16x8Shl) {
- RunI16x8ShiftOpTest(execution_mode, lower_simd, kExprI16x8Shl,
+ RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8Shl,
LogicalShiftLeft);
}
WASM_SIMD_TEST(I16x8ShrS) {
- RunI16x8ShiftOpTest(execution_mode, lower_simd, kExprI16x8ShrS,
+ RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8ShrS,
ArithmeticShiftRight);
}
WASM_SIMD_TEST(I16x8ShrU) {
- RunI16x8ShiftOpTest(execution_mode, lower_simd, kExprI16x8ShrU,
+ RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8ShrU,
LogicalShiftRight);
}
-void RunI8x16UnOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
+void RunI8x16UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op, Int8UnOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -1375,7 +1371,7 @@ void RunI8x16UnOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
}
WASM_SIMD_TEST(I8x16Neg) {
- RunI8x16UnOpTest(execution_mode, lower_simd, kExprI8x16Neg, Negate);
+ RunI8x16UnOpTest(execution_tier, lower_simd, kExprI8x16Neg, Negate);
}
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
@@ -1383,7 +1379,7 @@ WASM_SIMD_TEST(I8x16Neg) {
// Tests both signed and unsigned conversion from I16x8 (packing).
WASM_SIMD_TEST(I8x16ConvertI16x8) {
WasmRunner<int32_t, int32_t, int32_t, int32_t, int32_t, int32_t, int32_t> r(
- execution_mode, lower_simd);
+ execution_tier, lower_simd);
byte a = 0;
byte b = 1;
// indices for packed signed params
@@ -1429,9 +1425,9 @@ WASM_SIMD_TEST(I8x16ConvertI16x8) {
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
-void RunI8x16BinOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
+void RunI8x16BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op, Int8BinOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -1449,55 +1445,54 @@ void RunI8x16BinOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
}
WASM_SIMD_TEST(I8x16Add) {
- RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16Add, Add);
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Add, Add);
}
WASM_SIMD_TEST(I8x16AddSaturateS) {
- RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16AddSaturateS,
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16AddSaturateS,
AddSaturate);
}
WASM_SIMD_TEST(I8x16Sub) {
- RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16Sub, Sub);
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Sub, Sub);
}
WASM_SIMD_TEST(I8x16SubSaturateS) {
- RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16SubSaturateS,
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16SubSaturateS,
SubSaturate);
}
WASM_SIMD_TEST(I8x16MinS) {
- RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16MinS, Minimum);
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MinS, Minimum);
}
WASM_SIMD_TEST(I8x16MaxS) {
- RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16MaxS, Maximum);
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MaxS, Maximum);
}
WASM_SIMD_TEST(I8x16AddSaturateU) {
- RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16AddSaturateU,
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16AddSaturateU,
UnsignedAddSaturate);
}
WASM_SIMD_TEST(I8x16SubSaturateU) {
- RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16SubSaturateU,
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16SubSaturateU,
UnsignedSubSaturate);
}
WASM_SIMD_TEST(I8x16MinU) {
- RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16MinU,
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MinU,
UnsignedMinimum);
}
WASM_SIMD_TEST(I8x16MaxU) {
- RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16MaxU,
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MaxU,
UnsignedMaximum);
}
-void RunI8x16CompareOpTest(WasmExecutionMode execution_mode,
- LowerSimd lower_simd, WasmOpcode simd_op,
- Int8CompareOp expected_op) {
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
+void RunI8x16CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode simd_op, Int8CompareOp expected_op) {
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
byte a = 0;
byte b = 1;
byte expected = 2;
@@ -1515,62 +1510,62 @@ void RunI8x16CompareOpTest(WasmExecutionMode execution_mode,
}
WASM_SIMD_TEST(I8x16Eq) {
- RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16Eq, Equal);
+ RunI8x16CompareOpTest(execution_tier, lower_simd, kExprI8x16Eq, Equal);
}
WASM_SIMD_TEST(I8x16Ne) {
- RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16Ne, NotEqual);
+ RunI8x16CompareOpTest(execution_tier, lower_simd, kExprI8x16Ne, NotEqual);
}
WASM_SIMD_TEST(I8x16GtS) {
- RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16GtS, Greater);
+ RunI8x16CompareOpTest(execution_tier, lower_simd, kExprI8x16GtS, Greater);
}
WASM_SIMD_TEST(I8x16GeS) {
- RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16GeS,
+ RunI8x16CompareOpTest(execution_tier, lower_simd, kExprI8x16GeS,
GreaterEqual);
}
WASM_SIMD_TEST(I8x16LtS) {
- RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16LtS, Less);
+ RunI8x16CompareOpTest(execution_tier, lower_simd, kExprI8x16LtS, Less);
}
WASM_SIMD_TEST(I8x16LeS) {
- RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16LeS, LessEqual);
+ RunI8x16CompareOpTest(execution_tier, lower_simd, kExprI8x16LeS, LessEqual);
}
WASM_SIMD_TEST(I8x16GtU) {
- RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16GtU,
+ RunI8x16CompareOpTest(execution_tier, lower_simd, kExprI8x16GtU,
UnsignedGreater);
}
WASM_SIMD_TEST(I8x16GeU) {
- RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16GeU,
+ RunI8x16CompareOpTest(execution_tier, lower_simd, kExprI8x16GeU,
UnsignedGreaterEqual);
}
WASM_SIMD_TEST(I8x16LtU) {
- RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16LtU,
+ RunI8x16CompareOpTest(execution_tier, lower_simd, kExprI8x16LtU,
UnsignedLess);
}
WASM_SIMD_TEST(I8x16LeU) {
- RunI8x16CompareOpTest(execution_mode, lower_simd, kExprI8x16LeU,
+ RunI8x16CompareOpTest(execution_tier, lower_simd, kExprI8x16LeU,
UnsignedLessEqual);
}
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
WASM_SIMD_TEST(I8x16Mul) {
- RunI8x16BinOpTest(execution_mode, lower_simd, kExprI8x16Mul, Mul);
+ RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Mul, Mul);
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
-void RunI8x16ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
+void RunI8x16ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op, Int8ShiftOp expected_op) {
for (int shift = 1; shift < 8; ++shift) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
byte a = 0;
byte expected = 1;
byte simd = r.AllocateLocal(kWasmS128);
@@ -1586,17 +1581,17 @@ void RunI8x16ShiftOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
WASM_SIMD_TEST(I8x16Shl) {
- RunI8x16ShiftOpTest(execution_mode, lower_simd, kExprI8x16Shl,
+ RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16Shl,
LogicalShiftLeft);
}
WASM_SIMD_TEST(I8x16ShrS) {
- RunI8x16ShiftOpTest(execution_mode, lower_simd, kExprI8x16ShrS,
+ RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16ShrS,
ArithmeticShiftRight);
}
WASM_SIMD_TEST(I8x16ShrU) {
- RunI8x16ShiftOpTest(execution_mode, lower_simd, kExprI8x16ShrU,
+ RunI8x16ShiftOpTest(execution_tier, lower_simd, kExprI8x16ShrU,
LogicalShiftRight);
}
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||
@@ -1607,7 +1602,7 @@ WASM_SIMD_TEST(I8x16ShrU) {
// vector.
#define WASM_SIMD_SELECT_TEST(format) \
WASM_SIMD_TEST(S##format##Select) { \
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode, lower_simd); \
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd); \
byte val1 = 0; \
byte val2 = 1; \
byte src1 = r.AllocateLocal(kWasmS128); \
@@ -1647,7 +1642,7 @@ WASM_SIMD_SELECT_TEST(8x16)
// rest 0. The mask is not the result of a comparison op.
#define WASM_SIMD_NON_CANONICAL_SELECT_TEST(format) \
WASM_SIMD_TEST(S##format##NonCanonicalSelect) { \
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode, \
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_tier, \
lower_simd); \
byte val1 = 0; \
byte val2 = 1; \
@@ -1684,16 +1679,16 @@ WASM_SIMD_NON_CANONICAL_SELECT_TEST(8x16)
// Test binary ops with two lane test patterns, all lanes distinct.
template <typename T>
void RunBinaryLaneOpTest(
- WasmExecutionMode execution_mode, LowerSimd lower_simd, WasmOpcode simd_op,
+ ExecutionTier execution_tier, LowerSimd lower_simd, WasmOpcode simd_op,
const std::array<T, kSimd128Size / sizeof(T)>& expected) {
- WasmRunner<int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
// Set up two test patterns as globals, e.g. [0, 1, 2, 3] and [4, 5, 6, 7].
T* src0 = r.builder().AddGlobal<T>(kWasmS128);
T* src1 = r.builder().AddGlobal<T>(kWasmS128);
static const int kElems = kSimd128Size / sizeof(T);
for (int i = 0; i < kElems; i++) {
- src0[i] = i;
- src1[i] = kElems + i;
+ WriteLittleEndianValue<T>(&src0[i], i);
+ WriteLittleEndianValue<T>(&src1[i], kElems + i);
}
if (simd_op == kExprS8x16Shuffle) {
BUILD(r,
@@ -1710,50 +1705,50 @@ void RunBinaryLaneOpTest(
CHECK_EQ(1, r.Call());
for (size_t i = 0; i < expected.size(); i++) {
- CHECK_EQ(src0[i], expected[i]);
+ CHECK_EQ(ReadLittleEndianValue<T>(&src0[i]), expected[i]);
}
}
WASM_SIMD_TEST(I32x4AddHoriz) {
// Inputs are [0 1 2 3] and [4 5 6 7].
- RunBinaryLaneOpTest<int32_t>(execution_mode, lower_simd, kExprI32x4AddHoriz,
+ RunBinaryLaneOpTest<int32_t>(execution_tier, lower_simd, kExprI32x4AddHoriz,
{{1, 5, 9, 13}});
}
WASM_SIMD_TEST(I16x8AddHoriz) {
// Inputs are [0 1 2 3 4 5 6 7] and [8 9 10 11 12 13 14 15].
- RunBinaryLaneOpTest<int16_t>(execution_mode, lower_simd, kExprI16x8AddHoriz,
+ RunBinaryLaneOpTest<int16_t>(execution_tier, lower_simd, kExprI16x8AddHoriz,
{{1, 5, 9, 13, 17, 21, 25, 29}});
}
WASM_SIMD_TEST(F32x4AddHoriz) {
// Inputs are [0.0f 1.0f 2.0f 3.0f] and [4.0f 5.0f 6.0f 7.0f].
- RunBinaryLaneOpTest<float>(execution_mode, lower_simd, kExprF32x4AddHoriz,
+ RunBinaryLaneOpTest<float>(execution_tier, lower_simd, kExprF32x4AddHoriz,
{{1.0f, 5.0f, 9.0f, 13.0f}});
}
// Test shuffle ops.
-void RunShuffleOpTest(WasmExecutionMode execution_mode, LowerSimd lower_simd,
+void RunShuffleOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode simd_op,
const std::array<int8_t, kSimd128Size>& shuffle) {
// Test the original shuffle.
- RunBinaryLaneOpTest<int8_t>(execution_mode, lower_simd, simd_op, shuffle);
+ RunBinaryLaneOpTest<int8_t>(execution_tier, lower_simd, simd_op, shuffle);
// Test a non-canonical (inputs reversed) version of the shuffle.
std::array<int8_t, kSimd128Size> other_shuffle(shuffle);
for (size_t i = 0; i < shuffle.size(); ++i) other_shuffle[i] ^= kSimd128Size;
- RunBinaryLaneOpTest<int8_t>(execution_mode, lower_simd, simd_op,
+ RunBinaryLaneOpTest<int8_t>(execution_tier, lower_simd, simd_op,
other_shuffle);
// Test the swizzle (one-operand) version of the shuffle.
std::array<int8_t, kSimd128Size> swizzle(shuffle);
for (size_t i = 0; i < shuffle.size(); ++i) swizzle[i] &= (kSimd128Size - 1);
- RunBinaryLaneOpTest<int8_t>(execution_mode, lower_simd, simd_op, swizzle);
+ RunBinaryLaneOpTest<int8_t>(execution_tier, lower_simd, simd_op, swizzle);
// Test the non-canonical swizzle (one-operand) version of the shuffle.
std::array<int8_t, kSimd128Size> other_swizzle(shuffle);
for (size_t i = 0; i < shuffle.size(); ++i) other_swizzle[i] |= kSimd128Size;
- RunBinaryLaneOpTest<int8_t>(execution_mode, lower_simd, simd_op,
+ RunBinaryLaneOpTest<int8_t>(execution_tier, lower_simd, simd_op,
other_swizzle);
}
@@ -1868,7 +1863,7 @@ ShuffleMap test_shuffles = {
WASM_SIMD_TEST(Name) { \
ShuffleMap::const_iterator it = test_shuffles.find(k##Name); \
DCHECK_NE(it, test_shuffles.end()); \
- RunShuffleOpTest(execution_mode, lower_simd, kExprS8x16Shuffle, \
+ RunShuffleOpTest(execution_tier, lower_simd, kExprS8x16Shuffle, \
it->second); \
}
SHUFFLE_LIST(SHUFFLE_TEST)
@@ -1881,7 +1876,7 @@ WASM_SIMD_TEST(S8x16Blend) {
for (int bias = 1; bias < kSimd128Size; bias++) {
for (int i = 0; i < bias; i++) expected[i] = i;
for (int i = bias; i < kSimd128Size; i++) expected[i] = i + kSimd128Size;
- RunShuffleOpTest(execution_mode, lower_simd, kExprS8x16Shuffle, expected);
+ RunShuffleOpTest(execution_tier, lower_simd, kExprS8x16Shuffle, expected);
}
}
@@ -1899,7 +1894,7 @@ WASM_SIMD_TEST(S8x16Concat) {
for (int j = 0; j < n; ++j) {
expected[i++] = j + kSimd128Size;
}
- RunShuffleOpTest(execution_mode, lower_simd, kExprS8x16Shuffle, expected);
+ RunShuffleOpTest(execution_tier, lower_simd, kExprS8x16Shuffle, expected);
}
}
@@ -1926,7 +1921,7 @@ WASM_SIMD_TEST(S8x16ShuffleFuzz) {
for (int i = 0; i < kTests; ++i) {
auto shuffle = Combine(GetRandomTestShuffle(rng), GetRandomTestShuffle(rng),
GetRandomTestShuffle(rng));
- RunShuffleOpTest(execution_mode, lower_simd, kExprS8x16Shuffle, shuffle);
+ RunShuffleOpTest(execution_tier, lower_simd, kExprS8x16Shuffle, shuffle);
}
}
@@ -1957,35 +1952,34 @@ void BuildShuffle(std::vector<Shuffle>& shuffles, std::vector<byte>* buffer) {
}
// Runs tests of compiled code, using the interpreter as a reference.
-#define WASM_SIMD_COMPILED_TEST(name) \
- void RunWasm_##name##_Impl(LowerSimd lower_simd, \
- WasmExecutionMode execution_mode); \
- TEST(RunWasm_##name##_turbofan) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kNoLowerSimd, kExecuteTurbofan); \
- } \
- TEST(RunWasm_##name##_simd_lowered) { \
- EXPERIMENTAL_FLAG_SCOPE(simd); \
- RunWasm_##name##_Impl(kLowerSimd, kExecuteTurbofan); \
- } \
- void RunWasm_##name##_Impl(LowerSimd lower_simd, \
- WasmExecutionMode execution_mode)
-
-void RunWasmCode(WasmExecutionMode execution_mode, LowerSimd lower_simd,
+#define WASM_SIMD_COMPILED_TEST(name) \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd, \
+ ExecutionTier execution_tier); \
+ TEST(RunWasm_##name##_turbofan) { \
+ EXPERIMENTAL_FLAG_SCOPE(simd); \
+ RunWasm_##name##_Impl(kNoLowerSimd, ExecutionTier::kOptimized); \
+ } \
+ TEST(RunWasm_##name##_simd_lowered) { \
+ EXPERIMENTAL_FLAG_SCOPE(simd); \
+ RunWasm_##name##_Impl(kLowerSimd, ExecutionTier::kOptimized); \
+ } \
+ void RunWasm_##name##_Impl(LowerSimd lower_simd, ExecutionTier execution_tier)
+
+void RunWasmCode(ExecutionTier execution_tier, LowerSimd lower_simd,
const std::vector<byte>& code,
std::array<int8_t, kSimd128Size>* result) {
- WasmRunner<int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
// Set up two test patterns as globals, e.g. [0, 1, 2, 3] and [4, 5, 6, 7].
int8_t* src0 = r.builder().AddGlobal<int8_t>(kWasmS128);
int8_t* src1 = r.builder().AddGlobal<int8_t>(kWasmS128);
for (int i = 0; i < kSimd128Size; ++i) {
- src0[i] = i;
- src1[i] = kSimd128Size + i;
+ WriteLittleEndianValue<int8_t>(&src0[i], i);
+ WriteLittleEndianValue<int8_t>(&src1[i], kSimd128Size + i);
}
r.Build(code.data(), code.data() + code.size());
CHECK_EQ(1, r.Call());
for (size_t i = 0; i < kSimd128Size; i++) {
- (*result)[i] = src0[i];
+ (*result)[i] = ReadLittleEndianValue<int8_t>(&src0[i]);
}
}
@@ -2009,10 +2003,10 @@ WASM_SIMD_COMPILED_TEST(S8x16MultiShuffleFuzz) {
// Run the code using the interpreter to get the expected result.
std::array<int8_t, kSimd128Size> expected;
- RunWasmCode(kExecuteInterpreter, kNoLowerSimd, buffer, &expected);
+ RunWasmCode(ExecutionTier::kInterpreter, kNoLowerSimd, buffer, &expected);
// Run the SIMD or scalar lowered compiled code and compare results.
std::array<int8_t, kSimd128Size> result;
- RunWasmCode(execution_mode, lower_simd, buffer, &result);
+ RunWasmCode(execution_tier, lower_simd, buffer, &result);
for (size_t i = 0; i < kSimd128Size; ++i) {
CHECK_EQ(result[i], expected[i]);
}
@@ -2024,7 +2018,7 @@ WASM_SIMD_COMPILED_TEST(S8x16MultiShuffleFuzz) {
// test inputs. Test inputs with all true, all false, one true, and one false.
#define WASM_SIMD_BOOL_REDUCTION_TEST(format, lanes) \
WASM_SIMD_TEST(ReductionTest##lanes) { \
- WasmRunner<int32_t> r(execution_mode, lower_simd); \
+ WasmRunner<int32_t> r(execution_tier, lower_simd); \
byte zero = r.AllocateLocal(kWasmS128); \
byte one_one = r.AllocateLocal(kWasmS128); \
byte reduced = r.AllocateLocal(kWasmI32); \
@@ -2097,7 +2091,7 @@ WASM_SIMD_BOOL_REDUCTION_TEST(16x8, 8)
WASM_SIMD_BOOL_REDUCTION_TEST(8x16, 16)
WASM_SIMD_TEST(SimdI32x4ExtractWithF32x4) {
- WasmRunner<int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
BUILD(r, WASM_IF_ELSE_I(
WASM_I32_EQ(WASM_SIMD_I32x4_EXTRACT_LANE(
0, WASM_SIMD_F32x4_SPLAT(WASM_F32(30.5))),
@@ -2109,7 +2103,7 @@ WASM_SIMD_TEST(SimdI32x4ExtractWithF32x4) {
// V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_IA32
WASM_SIMD_TEST(SimdF32x4ExtractWithI32x4) {
- WasmRunner<int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
BUILD(r,
WASM_IF_ELSE_I(WASM_F32_EQ(WASM_SIMD_F32x4_EXTRACT_LANE(
0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(15))),
@@ -2123,7 +2117,7 @@ WASM_SIMD_TEST(SimdF32x4AddWithI32x4) {
// representable as a float.
const int kOne = 0x3F800000;
const int kTwo = 0x40000000;
- WasmRunner<int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
BUILD(r,
WASM_IF_ELSE_I(
WASM_F32_EQ(
@@ -2138,7 +2132,7 @@ WASM_SIMD_TEST(SimdF32x4AddWithI32x4) {
}
WASM_SIMD_TEST(SimdI32x4AddWithF32x4) {
- WasmRunner<int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
BUILD(r,
WASM_IF_ELSE_I(
WASM_I32_EQ(
@@ -2153,7 +2147,7 @@ WASM_SIMD_TEST(SimdI32x4AddWithF32x4) {
}
WASM_SIMD_TEST(SimdI32x4Local) {
- WasmRunner<int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(31))),
@@ -2162,7 +2156,7 @@ WASM_SIMD_TEST(SimdI32x4Local) {
}
WASM_SIMD_TEST(SimdI32x4SplatFromExtract) {
- WasmRunner<int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(0, WASM_SIMD_I32x4_EXTRACT_LANE(
@@ -2173,7 +2167,7 @@ WASM_SIMD_TEST(SimdI32x4SplatFromExtract) {
}
WASM_SIMD_TEST(SimdI32x4For) {
- WasmRunner<int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmS128);
BUILD(r,
@@ -2207,7 +2201,7 @@ WASM_SIMD_TEST(SimdI32x4For) {
}
WASM_SIMD_TEST(SimdF32x4For) {
- WasmRunner<int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(1, WASM_SIMD_F32x4_SPLAT(WASM_F32(21.25))),
@@ -2233,30 +2227,21 @@ WASM_SIMD_TEST(SimdF32x4For) {
template <typename T, int numLanes = 4>
void SetVectorByLanes(T* v, const std::array<T, numLanes>& arr) {
for (int lane = 0; lane < numLanes; lane++) {
- const T& value = arr[lane];
-#if defined(V8_TARGET_BIG_ENDIAN)
- v[numLanes - 1 - lane] = value;
-#else
- v[lane] = value;
-#endif
+ WriteLittleEndianValue<T>(&v[lane], arr[lane]);
}
}
template <typename T>
-const T& GetScalar(T* v, int lane) {
+const T GetScalar(T* v, int lane) {
constexpr int kElems = kSimd128Size / sizeof(T);
-#if defined(V8_TARGET_BIG_ENDIAN)
- const int index = kElems - 1 - lane;
-#else
const int index = lane;
-#endif
USE(kElems);
DCHECK(index >= 0 && index < kElems);
- return v[index];
+ return ReadLittleEndianValue<T>(&v[index]);
}
WASM_SIMD_TEST(SimdI32x4GetGlobal) {
- WasmRunner<int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Pad the globals with a few unused slots to get a non-zero offset.
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
@@ -2284,7 +2269,7 @@ WASM_SIMD_TEST(SimdI32x4GetGlobal) {
}
WASM_SIMD_TEST(SimdI32x4SetGlobal) {
- WasmRunner<int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Pad the globals with a few unused slots to get a non-zero offset.
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
r.builder().AddGlobal<int32_t>(kWasmI32); // purposefully unused
@@ -2307,7 +2292,7 @@ WASM_SIMD_TEST(SimdI32x4SetGlobal) {
}
WASM_SIMD_TEST(SimdF32x4GetGlobal) {
- WasmRunner<int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
float* global = r.builder().AddGlobal<float>(kWasmS128);
SetVectorByLanes<float>(global, {{0.0, 1.5, 2.25, 3.5}});
r.AllocateLocal(kWasmI32);
@@ -2330,7 +2315,7 @@ WASM_SIMD_TEST(SimdF32x4GetGlobal) {
}
WASM_SIMD_TEST(SimdF32x4SetGlobal) {
- WasmRunner<int32_t, int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
float* global = r.builder().AddGlobal<float>(kWasmS128);
BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_SPLAT(WASM_F32(13.5))),
WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_REPLACE_LANE(1, WASM_GET_GLOBAL(0),
@@ -2348,7 +2333,7 @@ WASM_SIMD_TEST(SimdF32x4SetGlobal) {
}
WASM_SIMD_TEST(SimdLoadStoreLoad) {
- WasmRunner<int32_t> r(execution_mode, lower_simd);
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
// Load memory, store it, then reload it and extract the first lane. Use a
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index bf47ddeadb..0ba12aedd9 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -27,7 +27,7 @@ namespace test_run_wasm {
#define RET_I8(x) WASM_I32V_2(x), kExprReturn
WASM_EXEC_TEST(Int32Const) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
const int32_t kExpectedValue = 0x11223344;
// return(kExpectedValue)
BUILD(r, WASM_I32V_5(kExpectedValue));
@@ -36,7 +36,7 @@ WASM_EXEC_TEST(Int32Const) {
WASM_EXEC_TEST(Int32Const_many) {
FOR_INT32_INPUTS(i) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
const int32_t kExpectedValue = *i;
// return(kExpectedValue)
BUILD(r, WASM_I32V(kExpectedValue));
@@ -46,58 +46,58 @@ WASM_EXEC_TEST(Int32Const_many) {
WASM_EXEC_TEST(GraphTrimming) {
// This WebAssembly code requires graph trimming in the TurboFan compiler.
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, kExprGetLocal, 0, kExprGetLocal, 0, kExprGetLocal, 0, kExprI32RemS,
kExprI32Eq, kExprGetLocal, 0, kExprI32DivS, kExprUnreachable);
r.Call(1);
}
WASM_EXEC_TEST(Int32Param0) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
// return(local[0])
BUILD(r, WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Int32Param0_fallthru) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
// local[0]
BUILD(r, WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Int32Param1) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
// local[1]
BUILD(r, WASM_GET_LOCAL(1));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(-111, *i)); }
}
WASM_EXEC_TEST(Int32Add) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
// 11 + 44
BUILD(r, WASM_I32_ADD(WASM_I32V_1(11), WASM_I32V_1(44)));
CHECK_EQ(55, r.Call());
}
WASM_EXEC_TEST(Int32Add_P) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
// p0 + 13
BUILD(r, WASM_I32_ADD(WASM_I32V_1(13), WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i + 13, r.Call(*i)); }
}
WASM_EXEC_TEST(Int32Add_P_fallthru) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
// p0 + 13
BUILD(r, WASM_I32_ADD(WASM_I32V_1(13), WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i + 13, r.Call(*i)); }
}
-static void RunInt32AddTest(WasmExecutionMode execution_mode, const byte* code,
+static void RunInt32AddTest(ExecutionTier execution_tier, const byte* code,
size_t size) {
TestSignatures sigs;
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
r.builder().AddSignature(sigs.ii_v());
r.builder().AddSignature(sigs.iii_v());
r.Build(code, code + size);
@@ -114,7 +114,7 @@ WASM_EXEC_TEST(Int32Add_P2) {
EXPERIMENTAL_FLAG_SCOPE(mv);
static const byte code[] = {
WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))};
- RunInt32AddTest(execution_mode, code, sizeof(code));
+ RunInt32AddTest(execution_tier, code, sizeof(code));
}
WASM_EXEC_TEST(Int32Add_block1) {
@@ -122,7 +122,7 @@ WASM_EXEC_TEST(Int32Add_block1) {
static const byte code[] = {
WASM_BLOCK_X(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
kExprI32Add};
- RunInt32AddTest(execution_mode, code, sizeof(code));
+ RunInt32AddTest(execution_tier, code, sizeof(code));
}
WASM_EXEC_TEST(Int32Add_block2) {
@@ -130,7 +130,7 @@ WASM_EXEC_TEST(Int32Add_block2) {
static const byte code[] = {
WASM_BLOCK_X(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), kExprBr, DEPTH_0),
kExprI32Add};
- RunInt32AddTest(execution_mode, code, sizeof(code));
+ RunInt32AddTest(execution_tier, code, sizeof(code));
}
WASM_EXEC_TEST(Int32Add_multi_if) {
@@ -140,11 +140,11 @@ WASM_EXEC_TEST(Int32Add_multi_if) {
WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
kExprI32Add};
- RunInt32AddTest(execution_mode, code, sizeof(code));
+ RunInt32AddTest(execution_tier, code, sizeof(code));
}
WASM_EXEC_TEST(Float32Add) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
// int(11.5f + 44.5f)
BUILD(r,
WASM_I32_SCONVERT_F32(WASM_F32_ADD(WASM_F32(11.5f), WASM_F32(44.5f))));
@@ -152,7 +152,7 @@ WASM_EXEC_TEST(Float32Add) {
}
WASM_EXEC_TEST(Float64Add) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
// return int(13.5d + 43.5d)
BUILD(r, WASM_I32_SCONVERT_F64(WASM_F64_ADD(WASM_F64(13.5), WASM_F64(43.5))));
CHECK_EQ(57, r.Call());
@@ -161,18 +161,18 @@ WASM_EXEC_TEST(Float64Add) {
// clang-format messes up the FOR_INT32_INPUTS macros.
// clang-format off
template<typename ctype>
-static void TestInt32Binop(WasmExecutionMode execution_mode, WasmOpcode opcode,
+static void TestInt32Binop(ExecutionTier execution_tier, WasmOpcode opcode,
ctype(*expected)(ctype, ctype)) {
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- WasmRunner<ctype> r(execution_mode);
+ WasmRunner<ctype> r(execution_tier);
// Apply {opcode} on two constants.
BUILD(r, WASM_BINOP(opcode, WASM_I32V(*i), WASM_I32V(*j)));
CHECK_EQ(expected(*i, *j), r.Call());
}
}
{
- WasmRunner<ctype, ctype, ctype> r(execution_mode);
+ WasmRunner<ctype, ctype, ctype> r(execution_tier);
// Apply {opcode} on two parameters.
BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT32_INPUTS(i) {
@@ -186,7 +186,7 @@ static void TestInt32Binop(WasmExecutionMode execution_mode, WasmOpcode opcode,
#define WASM_I32_BINOP_TEST(expr, ctype, expected) \
WASM_EXEC_TEST(I32Binop_##expr) { \
- TestInt32Binop<ctype>(execution_mode, kExprI32##expr, \
+ TestInt32Binop<ctype>(execution_tier, kExprI32##expr, \
[](ctype a, ctype b) -> ctype { return expected; }); \
}
@@ -221,16 +221,16 @@ WASM_I32_BINOP_TEST(GeU, uint32_t, a >= b)
#undef WASM_I32_BINOP_TEST
-void TestInt32Unop(WasmExecutionMode execution_mode, WasmOpcode opcode,
+void TestInt32Unop(ExecutionTier execution_tier, WasmOpcode opcode,
int32_t expected, int32_t a) {
{
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
// return op K
BUILD(r, WASM_UNOP(opcode, WASM_I32V(a)));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
// return op a
BUILD(r, WASM_UNOP(opcode, WASM_GET_LOCAL(0)));
CHECK_EQ(expected, r.Call(a));
@@ -238,96 +238,96 @@ void TestInt32Unop(WasmExecutionMode execution_mode, WasmOpcode opcode,
}
WASM_EXEC_TEST(Int32Clz) {
- TestInt32Unop(execution_mode, kExprI32Clz, 0, 0x80001000);
- TestInt32Unop(execution_mode, kExprI32Clz, 1, 0x40000500);
- TestInt32Unop(execution_mode, kExprI32Clz, 2, 0x20000300);
- TestInt32Unop(execution_mode, kExprI32Clz, 3, 0x10000003);
- TestInt32Unop(execution_mode, kExprI32Clz, 4, 0x08050000);
- TestInt32Unop(execution_mode, kExprI32Clz, 5, 0x04006000);
- TestInt32Unop(execution_mode, kExprI32Clz, 6, 0x02000000);
- TestInt32Unop(execution_mode, kExprI32Clz, 7, 0x010000A0);
- TestInt32Unop(execution_mode, kExprI32Clz, 8, 0x00800C00);
- TestInt32Unop(execution_mode, kExprI32Clz, 9, 0x00400000);
- TestInt32Unop(execution_mode, kExprI32Clz, 10, 0x0020000D);
- TestInt32Unop(execution_mode, kExprI32Clz, 11, 0x00100F00);
- TestInt32Unop(execution_mode, kExprI32Clz, 12, 0x00080000);
- TestInt32Unop(execution_mode, kExprI32Clz, 13, 0x00041000);
- TestInt32Unop(execution_mode, kExprI32Clz, 14, 0x00020020);
- TestInt32Unop(execution_mode, kExprI32Clz, 15, 0x00010300);
- TestInt32Unop(execution_mode, kExprI32Clz, 16, 0x00008040);
- TestInt32Unop(execution_mode, kExprI32Clz, 17, 0x00004005);
- TestInt32Unop(execution_mode, kExprI32Clz, 18, 0x00002050);
- TestInt32Unop(execution_mode, kExprI32Clz, 19, 0x00001700);
- TestInt32Unop(execution_mode, kExprI32Clz, 20, 0x00000870);
- TestInt32Unop(execution_mode, kExprI32Clz, 21, 0x00000405);
- TestInt32Unop(execution_mode, kExprI32Clz, 22, 0x00000203);
- TestInt32Unop(execution_mode, kExprI32Clz, 23, 0x00000101);
- TestInt32Unop(execution_mode, kExprI32Clz, 24, 0x00000089);
- TestInt32Unop(execution_mode, kExprI32Clz, 25, 0x00000041);
- TestInt32Unop(execution_mode, kExprI32Clz, 26, 0x00000022);
- TestInt32Unop(execution_mode, kExprI32Clz, 27, 0x00000013);
- TestInt32Unop(execution_mode, kExprI32Clz, 28, 0x00000008);
- TestInt32Unop(execution_mode, kExprI32Clz, 29, 0x00000004);
- TestInt32Unop(execution_mode, kExprI32Clz, 30, 0x00000002);
- TestInt32Unop(execution_mode, kExprI32Clz, 31, 0x00000001);
- TestInt32Unop(execution_mode, kExprI32Clz, 32, 0x00000000);
+ TestInt32Unop(execution_tier, kExprI32Clz, 0, 0x80001000);
+ TestInt32Unop(execution_tier, kExprI32Clz, 1, 0x40000500);
+ TestInt32Unop(execution_tier, kExprI32Clz, 2, 0x20000300);
+ TestInt32Unop(execution_tier, kExprI32Clz, 3, 0x10000003);
+ TestInt32Unop(execution_tier, kExprI32Clz, 4, 0x08050000);
+ TestInt32Unop(execution_tier, kExprI32Clz, 5, 0x04006000);
+ TestInt32Unop(execution_tier, kExprI32Clz, 6, 0x02000000);
+ TestInt32Unop(execution_tier, kExprI32Clz, 7, 0x010000A0);
+ TestInt32Unop(execution_tier, kExprI32Clz, 8, 0x00800C00);
+ TestInt32Unop(execution_tier, kExprI32Clz, 9, 0x00400000);
+ TestInt32Unop(execution_tier, kExprI32Clz, 10, 0x0020000D);
+ TestInt32Unop(execution_tier, kExprI32Clz, 11, 0x00100F00);
+ TestInt32Unop(execution_tier, kExprI32Clz, 12, 0x00080000);
+ TestInt32Unop(execution_tier, kExprI32Clz, 13, 0x00041000);
+ TestInt32Unop(execution_tier, kExprI32Clz, 14, 0x00020020);
+ TestInt32Unop(execution_tier, kExprI32Clz, 15, 0x00010300);
+ TestInt32Unop(execution_tier, kExprI32Clz, 16, 0x00008040);
+ TestInt32Unop(execution_tier, kExprI32Clz, 17, 0x00004005);
+ TestInt32Unop(execution_tier, kExprI32Clz, 18, 0x00002050);
+ TestInt32Unop(execution_tier, kExprI32Clz, 19, 0x00001700);
+ TestInt32Unop(execution_tier, kExprI32Clz, 20, 0x00000870);
+ TestInt32Unop(execution_tier, kExprI32Clz, 21, 0x00000405);
+ TestInt32Unop(execution_tier, kExprI32Clz, 22, 0x00000203);
+ TestInt32Unop(execution_tier, kExprI32Clz, 23, 0x00000101);
+ TestInt32Unop(execution_tier, kExprI32Clz, 24, 0x00000089);
+ TestInt32Unop(execution_tier, kExprI32Clz, 25, 0x00000041);
+ TestInt32Unop(execution_tier, kExprI32Clz, 26, 0x00000022);
+ TestInt32Unop(execution_tier, kExprI32Clz, 27, 0x00000013);
+ TestInt32Unop(execution_tier, kExprI32Clz, 28, 0x00000008);
+ TestInt32Unop(execution_tier, kExprI32Clz, 29, 0x00000004);
+ TestInt32Unop(execution_tier, kExprI32Clz, 30, 0x00000002);
+ TestInt32Unop(execution_tier, kExprI32Clz, 31, 0x00000001);
+ TestInt32Unop(execution_tier, kExprI32Clz, 32, 0x00000000);
}
WASM_EXEC_TEST(Int32Ctz) {
- TestInt32Unop(execution_mode, kExprI32Ctz, 32, 0x00000000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 31, 0x80000000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 30, 0x40000000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 29, 0x20000000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 28, 0x10000000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 27, 0xA8000000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 26, 0xF4000000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 25, 0x62000000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 24, 0x91000000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 23, 0xCD800000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 22, 0x09400000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 21, 0xAF200000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 20, 0xAC100000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 19, 0xE0B80000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 18, 0x9CE40000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 17, 0xC7920000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 16, 0xB8F10000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 15, 0x3B9F8000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 14, 0xDB4C4000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 13, 0xE9A32000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 12, 0xFCA61000);
- TestInt32Unop(execution_mode, kExprI32Ctz, 11, 0x6C8A7800);
- TestInt32Unop(execution_mode, kExprI32Ctz, 10, 0x8CE5A400);
- TestInt32Unop(execution_mode, kExprI32Ctz, 9, 0xCB7D0200);
- TestInt32Unop(execution_mode, kExprI32Ctz, 8, 0xCB4DC100);
- TestInt32Unop(execution_mode, kExprI32Ctz, 7, 0xDFBEC580);
- TestInt32Unop(execution_mode, kExprI32Ctz, 6, 0x27A9DB40);
- TestInt32Unop(execution_mode, kExprI32Ctz, 5, 0xDE3BCB20);
- TestInt32Unop(execution_mode, kExprI32Ctz, 4, 0xD7E8A610);
- TestInt32Unop(execution_mode, kExprI32Ctz, 3, 0x9AFDBC88);
- TestInt32Unop(execution_mode, kExprI32Ctz, 2, 0x9AFDBC84);
- TestInt32Unop(execution_mode, kExprI32Ctz, 1, 0x9AFDBC82);
- TestInt32Unop(execution_mode, kExprI32Ctz, 0, 0x9AFDBC81);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 32, 0x00000000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 31, 0x80000000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 30, 0x40000000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 29, 0x20000000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 28, 0x10000000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 27, 0xA8000000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 26, 0xF4000000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 25, 0x62000000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 24, 0x91000000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 23, 0xCD800000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 22, 0x09400000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 21, 0xAF200000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 20, 0xAC100000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 19, 0xE0B80000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 18, 0x9CE40000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 17, 0xC7920000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 16, 0xB8F10000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 15, 0x3B9F8000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 14, 0xDB4C4000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 13, 0xE9A32000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 12, 0xFCA61000);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 11, 0x6C8A7800);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 10, 0x8CE5A400);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 9, 0xCB7D0200);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 8, 0xCB4DC100);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 7, 0xDFBEC580);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 6, 0x27A9DB40);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 5, 0xDE3BCB20);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 4, 0xD7E8A610);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 3, 0x9AFDBC88);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 2, 0x9AFDBC84);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 1, 0x9AFDBC82);
+ TestInt32Unop(execution_tier, kExprI32Ctz, 0, 0x9AFDBC81);
}
WASM_EXEC_TEST(Int32Popcnt) {
- TestInt32Unop(execution_mode, kExprI32Popcnt, 32, 0xFFFFFFFF);
- TestInt32Unop(execution_mode, kExprI32Popcnt, 0, 0x00000000);
- TestInt32Unop(execution_mode, kExprI32Popcnt, 1, 0x00008000);
- TestInt32Unop(execution_mode, kExprI32Popcnt, 13, 0x12345678);
- TestInt32Unop(execution_mode, kExprI32Popcnt, 19, 0xFEDCBA09);
+ TestInt32Unop(execution_tier, kExprI32Popcnt, 32, 0xFFFFFFFF);
+ TestInt32Unop(execution_tier, kExprI32Popcnt, 0, 0x00000000);
+ TestInt32Unop(execution_tier, kExprI32Popcnt, 1, 0x00008000);
+ TestInt32Unop(execution_tier, kExprI32Popcnt, 13, 0x12345678);
+ TestInt32Unop(execution_tier, kExprI32Popcnt, 19, 0xFEDCBA09);
}
WASM_EXEC_TEST(I32Eqz) {
- TestInt32Unop(execution_mode, kExprI32Eqz, 0, 1);
- TestInt32Unop(execution_mode, kExprI32Eqz, 0, -1);
- TestInt32Unop(execution_mode, kExprI32Eqz, 0, -827343);
- TestInt32Unop(execution_mode, kExprI32Eqz, 0, 8888888);
- TestInt32Unop(execution_mode, kExprI32Eqz, 1, 0);
+ TestInt32Unop(execution_tier, kExprI32Eqz, 0, 1);
+ TestInt32Unop(execution_tier, kExprI32Eqz, 0, -1);
+ TestInt32Unop(execution_tier, kExprI32Eqz, 0, -827343);
+ TestInt32Unop(execution_tier, kExprI32Eqz, 0, 8888888);
+ TestInt32Unop(execution_tier, kExprI32Eqz, 1, 0);
}
WASM_EXEC_TEST(Int32DivS_trap) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(0, r.Call(0, 100));
@@ -338,7 +338,7 @@ WASM_EXEC_TEST(Int32DivS_trap) {
}
WASM_EXEC_TEST(Int32RemS_trap) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(33, r.Call(133, 100));
@@ -349,7 +349,7 @@ WASM_EXEC_TEST(Int32RemS_trap) {
}
WASM_EXEC_TEST(Int32DivU_trap) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(0, r.Call(0, 100));
@@ -360,7 +360,7 @@ WASM_EXEC_TEST(Int32DivU_trap) {
}
WASM_EXEC_TEST(Int32RemU_trap) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_I32_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(17, r.Call(217, 100));
const int32_t kMin = std::numeric_limits<int32_t>::min();
@@ -372,7 +372,7 @@ WASM_EXEC_TEST(Int32RemU_trap) {
WASM_EXEC_TEST(Int32DivS_byzero_const) {
for (int8_t denom = -2; denom < 8; ++denom) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_I32V_1(denom)));
for (int32_t val = -7; val < 8; ++val) {
if (denom == 0) {
@@ -386,7 +386,7 @@ WASM_EXEC_TEST(Int32DivS_byzero_const) {
WASM_EXEC_TEST(Int32AsmjsDivS_byzero_const) {
for (int8_t denom = -2; denom < 8; ++denom) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_I32_ASMJS_DIVS(WASM_GET_LOCAL(0), WASM_I32V_1(denom)));
FOR_INT32_INPUTS(i) {
@@ -403,7 +403,7 @@ WASM_EXEC_TEST(Int32AsmjsDivS_byzero_const) {
WASM_EXEC_TEST(Int32AsmjsRemS_byzero_const) {
for (int8_t denom = -2; denom < 8; ++denom) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_I32_ASMJS_REMS(WASM_GET_LOCAL(0), WASM_I32V_1(denom)));
FOR_INT32_INPUTS(i) {
@@ -420,7 +420,7 @@ WASM_EXEC_TEST(Int32AsmjsRemS_byzero_const) {
WASM_EXEC_TEST(Int32DivU_byzero_const) {
for (uint32_t denom = 0xFFFFFFFE; denom < 8; ++denom) {
- WasmRunner<uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<uint32_t, uint32_t> r(execution_tier);
BUILD(r, WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_I32V_1(denom)));
for (uint32_t val = 0xFFFFFFF0; val < 8; ++val) {
@@ -434,7 +434,7 @@ WASM_EXEC_TEST(Int32DivU_byzero_const) {
}
WASM_EXEC_TEST(Int32DivS_trap_effect) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_IF_ELSE_I(
@@ -455,34 +455,34 @@ WASM_EXEC_TEST(Int32DivS_trap_effect) {
CHECK_TRAP(r.Call(0, 0));
}
-void TestFloat32Binop(WasmExecutionMode execution_mode, WasmOpcode opcode,
+void TestFloat32Binop(ExecutionTier execution_tier, WasmOpcode opcode,
int32_t expected, float a, float b) {
{
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
// return K op K
BUILD(r, WASM_BINOP(opcode, WASM_F32(a), WASM_F32(b)));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int32_t, float, float> r(execution_mode);
+ WasmRunner<int32_t, float, float> r(execution_tier);
// return a op b
BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(expected, r.Call(a, b));
}
}
-void TestFloat32BinopWithConvert(WasmExecutionMode execution_mode,
+void TestFloat32BinopWithConvert(ExecutionTier execution_tier,
WasmOpcode opcode, int32_t expected, float a,
float b) {
{
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
// return int(K op K)
BUILD(r,
WASM_I32_SCONVERT_F32(WASM_BINOP(opcode, WASM_F32(a), WASM_F32(b))));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int32_t, float, float> r(execution_mode);
+ WasmRunner<int32_t, float, float> r(execution_tier);
// return int(a op b)
BUILD(r, WASM_I32_SCONVERT_F32(
WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
@@ -490,66 +490,66 @@ void TestFloat32BinopWithConvert(WasmExecutionMode execution_mode,
}
}
-void TestFloat32UnopWithConvert(WasmExecutionMode execution_mode,
- WasmOpcode opcode, int32_t expected, float a) {
+void TestFloat32UnopWithConvert(ExecutionTier execution_tier, WasmOpcode opcode,
+ int32_t expected, float a) {
{
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
// return int(op(K))
BUILD(r, WASM_I32_SCONVERT_F32(WASM_UNOP(opcode, WASM_F32(a))));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int32_t, float> r(execution_mode);
+ WasmRunner<int32_t, float> r(execution_tier);
// return int(op(a))
BUILD(r, WASM_I32_SCONVERT_F32(WASM_UNOP(opcode, WASM_GET_LOCAL(0))));
CHECK_EQ(expected, r.Call(a));
}
}
-void TestFloat64Binop(WasmExecutionMode execution_mode, WasmOpcode opcode,
+void TestFloat64Binop(ExecutionTier execution_tier, WasmOpcode opcode,
int32_t expected, double a, double b) {
{
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
// return K op K
BUILD(r, WASM_BINOP(opcode, WASM_F64(a), WASM_F64(b)));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int32_t, double, double> r(execution_mode);
+ WasmRunner<int32_t, double, double> r(execution_tier);
// return a op b
BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(expected, r.Call(a, b));
}
}
-void TestFloat64BinopWithConvert(WasmExecutionMode execution_mode,
+void TestFloat64BinopWithConvert(ExecutionTier execution_tier,
WasmOpcode opcode, int32_t expected, double a,
double b) {
{
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
// return int(K op K)
BUILD(r,
WASM_I32_SCONVERT_F64(WASM_BINOP(opcode, WASM_F64(a), WASM_F64(b))));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int32_t, double, double> r(execution_mode);
+ WasmRunner<int32_t, double, double> r(execution_tier);
BUILD(r, WASM_I32_SCONVERT_F64(
WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
CHECK_EQ(expected, r.Call(a, b));
}
}
-void TestFloat64UnopWithConvert(WasmExecutionMode execution_mode,
- WasmOpcode opcode, int32_t expected, double a) {
+void TestFloat64UnopWithConvert(ExecutionTier execution_tier, WasmOpcode opcode,
+ int32_t expected, double a) {
{
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
// return int(op(K))
BUILD(r, WASM_I32_SCONVERT_F64(WASM_UNOP(opcode, WASM_F64(a))));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int32_t, double> r(execution_mode);
+ WasmRunner<int32_t, double> r(execution_tier);
// return int(op(a))
BUILD(r, WASM_I32_SCONVERT_F64(WASM_UNOP(opcode, WASM_GET_LOCAL(0))));
CHECK_EQ(expected, r.Call(a));
@@ -557,50 +557,50 @@ void TestFloat64UnopWithConvert(WasmExecutionMode execution_mode,
}
WASM_EXEC_TEST(Float32Binops) {
- TestFloat32Binop(execution_mode, kExprF32Eq, 1, 8.125f, 8.125f);
- TestFloat32Binop(execution_mode, kExprF32Ne, 1, 8.125f, 8.127f);
- TestFloat32Binop(execution_mode, kExprF32Lt, 1, -9.5f, -9.0f);
- TestFloat32Binop(execution_mode, kExprF32Le, 1, -1111.0f, -1111.0f);
- TestFloat32Binop(execution_mode, kExprF32Gt, 1, -9.0f, -9.5f);
- TestFloat32Binop(execution_mode, kExprF32Ge, 1, -1111.0f, -1111.0f);
+ TestFloat32Binop(execution_tier, kExprF32Eq, 1, 8.125f, 8.125f);
+ TestFloat32Binop(execution_tier, kExprF32Ne, 1, 8.125f, 8.127f);
+ TestFloat32Binop(execution_tier, kExprF32Lt, 1, -9.5f, -9.0f);
+ TestFloat32Binop(execution_tier, kExprF32Le, 1, -1111.0f, -1111.0f);
+ TestFloat32Binop(execution_tier, kExprF32Gt, 1, -9.0f, -9.5f);
+ TestFloat32Binop(execution_tier, kExprF32Ge, 1, -1111.0f, -1111.0f);
- TestFloat32BinopWithConvert(execution_mode, kExprF32Add, 10, 3.5f, 6.5f);
- TestFloat32BinopWithConvert(execution_mode, kExprF32Sub, 2, 44.5f, 42.5f);
- TestFloat32BinopWithConvert(execution_mode, kExprF32Mul, -66, -132.1f, 0.5f);
- TestFloat32BinopWithConvert(execution_mode, kExprF32Div, 11, 22.1f, 2.0f);
+ TestFloat32BinopWithConvert(execution_tier, kExprF32Add, 10, 3.5f, 6.5f);
+ TestFloat32BinopWithConvert(execution_tier, kExprF32Sub, 2, 44.5f, 42.5f);
+ TestFloat32BinopWithConvert(execution_tier, kExprF32Mul, -66, -132.1f, 0.5f);
+ TestFloat32BinopWithConvert(execution_tier, kExprF32Div, 11, 22.1f, 2.0f);
}
WASM_EXEC_TEST(Float32Unops) {
- TestFloat32UnopWithConvert(execution_mode, kExprF32Abs, 8, 8.125f);
- TestFloat32UnopWithConvert(execution_mode, kExprF32Abs, 9, -9.125f);
- TestFloat32UnopWithConvert(execution_mode, kExprF32Neg, -213, 213.125f);
- TestFloat32UnopWithConvert(execution_mode, kExprF32Sqrt, 12, 144.4f);
+ TestFloat32UnopWithConvert(execution_tier, kExprF32Abs, 8, 8.125f);
+ TestFloat32UnopWithConvert(execution_tier, kExprF32Abs, 9, -9.125f);
+ TestFloat32UnopWithConvert(execution_tier, kExprF32Neg, -213, 213.125f);
+ TestFloat32UnopWithConvert(execution_tier, kExprF32Sqrt, 12, 144.4f);
}
WASM_EXEC_TEST(Float64Binops) {
- TestFloat64Binop(execution_mode, kExprF64Eq, 1, 16.25, 16.25);
- TestFloat64Binop(execution_mode, kExprF64Ne, 1, 16.25, 16.15);
- TestFloat64Binop(execution_mode, kExprF64Lt, 1, -32.4, 11.7);
- TestFloat64Binop(execution_mode, kExprF64Le, 1, -88.9, -88.9);
- TestFloat64Binop(execution_mode, kExprF64Gt, 1, 11.7, -32.4);
- TestFloat64Binop(execution_mode, kExprF64Ge, 1, -88.9, -88.9);
-
- TestFloat64BinopWithConvert(execution_mode, kExprF64Add, 100, 43.5, 56.5);
- TestFloat64BinopWithConvert(execution_mode, kExprF64Sub, 200, 12200.1,
+ TestFloat64Binop(execution_tier, kExprF64Eq, 1, 16.25, 16.25);
+ TestFloat64Binop(execution_tier, kExprF64Ne, 1, 16.25, 16.15);
+ TestFloat64Binop(execution_tier, kExprF64Lt, 1, -32.4, 11.7);
+ TestFloat64Binop(execution_tier, kExprF64Le, 1, -88.9, -88.9);
+ TestFloat64Binop(execution_tier, kExprF64Gt, 1, 11.7, -32.4);
+ TestFloat64Binop(execution_tier, kExprF64Ge, 1, -88.9, -88.9);
+
+ TestFloat64BinopWithConvert(execution_tier, kExprF64Add, 100, 43.5, 56.5);
+ TestFloat64BinopWithConvert(execution_tier, kExprF64Sub, 200, 12200.1,
12000.1);
- TestFloat64BinopWithConvert(execution_mode, kExprF64Mul, -33, 134, -0.25);
- TestFloat64BinopWithConvert(execution_mode, kExprF64Div, -1111, -2222.3, 2);
+ TestFloat64BinopWithConvert(execution_tier, kExprF64Mul, -33, 134, -0.25);
+ TestFloat64BinopWithConvert(execution_tier, kExprF64Div, -1111, -2222.3, 2);
}
WASM_EXEC_TEST(Float64Unops) {
- TestFloat64UnopWithConvert(execution_mode, kExprF64Abs, 108, 108.125);
- TestFloat64UnopWithConvert(execution_mode, kExprF64Abs, 209, -209.125);
- TestFloat64UnopWithConvert(execution_mode, kExprF64Neg, -209, 209.125);
- TestFloat64UnopWithConvert(execution_mode, kExprF64Sqrt, 13, 169.4);
+ TestFloat64UnopWithConvert(execution_tier, kExprF64Abs, 108, 108.125);
+ TestFloat64UnopWithConvert(execution_tier, kExprF64Abs, 209, -209.125);
+ TestFloat64UnopWithConvert(execution_tier, kExprF64Neg, -209, 209.125);
+ TestFloat64UnopWithConvert(execution_tier, kExprF64Sqrt, 13, 169.4);
}
WASM_EXEC_TEST(Float32Neg) {
- WasmRunner<float, float> r(execution_mode);
+ WasmRunner<float, float> r(execution_tier);
BUILD(r, WASM_F32_NEG(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
@@ -610,7 +610,7 @@ WASM_EXEC_TEST(Float32Neg) {
}
WASM_EXEC_TEST(Float64Neg) {
- WasmRunner<double, double> r(execution_mode);
+ WasmRunner<double, double> r(execution_tier);
BUILD(r, WASM_F64_NEG(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
@@ -620,7 +620,7 @@ WASM_EXEC_TEST(Float64Neg) {
}
WASM_EXEC_TEST(IfElse_P) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
// if (p0) return 11; else return 22;
BUILD(r, WASM_IF_ELSE_I(WASM_GET_LOCAL(0), // --
WASM_I32V_1(11), // --
@@ -632,34 +632,34 @@ WASM_EXEC_TEST(IfElse_P) {
}
WASM_EXEC_TEST(If_empty1) {
- WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
BUILD(r, WASM_GET_LOCAL(0), kExprIf, kLocalVoid, kExprEnd, WASM_GET_LOCAL(1));
FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i - 9, *i)); }
}
WASM_EXEC_TEST(IfElse_empty1) {
- WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
BUILD(r, WASM_GET_LOCAL(0), kExprIf, kLocalVoid, kExprElse, kExprEnd,
WASM_GET_LOCAL(1));
FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i - 8, *i)); }
}
WASM_EXEC_TEST(IfElse_empty2) {
- WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
BUILD(r, WASM_GET_LOCAL(0), kExprIf, kLocalVoid, WASM_NOP, kExprElse,
kExprEnd, WASM_GET_LOCAL(1));
FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i - 7, *i)); }
}
WASM_EXEC_TEST(IfElse_empty3) {
- WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
BUILD(r, WASM_GET_LOCAL(0), kExprIf, kLocalVoid, kExprElse, WASM_NOP,
kExprEnd, WASM_GET_LOCAL(1));
FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i - 6, *i)); }
}
WASM_EXEC_TEST(If_chain1) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
// if (p0) 13; if (p0) 14; 15
BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_NOP),
WASM_IF(WASM_GET_LOCAL(0), WASM_NOP), WASM_I32V_1(15));
@@ -667,7 +667,7 @@ WASM_EXEC_TEST(If_chain1) {
}
WASM_EXEC_TEST(If_chain_set) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
// if (p0) p1 = 73; if (p0) p1 = 74; p1
BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(1, WASM_I32V_2(73))),
WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(1, WASM_I32V_2(74))),
@@ -679,7 +679,7 @@ WASM_EXEC_TEST(If_chain_set) {
}
WASM_EXEC_TEST(IfElse_Unreachable1) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
// 0 ? unreachable : 27
BUILD(r, WASM_IF_ELSE_I(WASM_ZERO, // --
WASM_UNREACHABLE, // --
@@ -688,7 +688,7 @@ WASM_EXEC_TEST(IfElse_Unreachable1) {
}
WASM_EXEC_TEST(IfElse_Unreachable2) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
// 1 ? 28 : unreachable
BUILD(r, WASM_IF_ELSE_I(WASM_I32V_1(1), // --
WASM_I32V_1(28), // --
@@ -697,21 +697,21 @@ WASM_EXEC_TEST(IfElse_Unreachable2) {
}
WASM_EXEC_TEST(Return12) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
BUILD(r, RET_I8(12));
CHECK_EQ(12, r.Call());
}
WASM_EXEC_TEST(Return17) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK(RET_I8(17)), WASM_ZERO);
CHECK_EQ(17, r.Call());
}
WASM_EXEC_TEST(Return_I32) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, RET(WASM_GET_LOCAL(0)));
@@ -719,7 +719,7 @@ WASM_EXEC_TEST(Return_I32) {
}
WASM_EXEC_TEST(Return_F32) {
- WasmRunner<float, float> r(execution_mode);
+ WasmRunner<float, float> r(execution_tier);
BUILD(r, RET(WASM_GET_LOCAL(0)));
@@ -735,7 +735,7 @@ WASM_EXEC_TEST(Return_F32) {
}
WASM_EXEC_TEST(Return_F64) {
- WasmRunner<double, double> r(execution_mode);
+ WasmRunner<double, double> r(execution_tier);
BUILD(r, RET(WASM_GET_LOCAL(0)));
@@ -751,7 +751,7 @@ WASM_EXEC_TEST(Return_F64) {
}
WASM_EXEC_TEST(Select_float_parameters) {
- WasmRunner<float, float, float, int32_t> r(execution_mode);
+ WasmRunner<float, float, float, int32_t> r(execution_tier);
// return select(11, 22, a);
BUILD(r,
WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)));
@@ -759,7 +759,7 @@ WASM_EXEC_TEST(Select_float_parameters) {
}
WASM_EXEC_TEST(Select) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
// return select(11, 22, a);
BUILD(r, WASM_SELECT(WASM_I32V_1(11), WASM_I32V_1(22), WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) {
@@ -769,7 +769,7 @@ WASM_EXEC_TEST(Select) {
}
WASM_EXEC_TEST(Select_strict1) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
// select(a=0, a=1, a=2); return a
BUILD(r, WASM_SELECT(WASM_TEE_LOCAL(0, WASM_ZERO),
WASM_TEE_LOCAL(0, WASM_I32V_1(1)),
@@ -779,7 +779,7 @@ WASM_EXEC_TEST(Select_strict1) {
}
WASM_EXEC_TEST(Select_strict2) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmI32);
// select(b=5, c=6, a)
@@ -792,7 +792,7 @@ WASM_EXEC_TEST(Select_strict2) {
}
WASM_EXEC_TEST(Select_strict3) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
r.AllocateLocal(kWasmI32);
r.AllocateLocal(kWasmI32);
// select(b=5, c=6, a=b)
@@ -806,7 +806,7 @@ WASM_EXEC_TEST(Select_strict3) {
}
WASM_EXEC_TEST(BrIf_strict) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_BRV_IF(0, WASM_GET_LOCAL(0),
WASM_TEE_LOCAL(0, WASM_I32V_2(99)))));
@@ -814,7 +814,7 @@ WASM_EXEC_TEST(BrIf_strict) {
}
WASM_EXEC_TEST(Br_height) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(
WASM_BLOCK(WASM_BRV_IFD(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)),
WASM_RETURN1(WASM_I32V_1(9))),
@@ -827,7 +827,7 @@ WASM_EXEC_TEST(Br_height) {
}
WASM_EXEC_TEST(Regression_660262) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
BUILD(r, kExprI32Const, 0x00, kExprI32Const, 0x00, kExprI32LoadMem, 0x00,
0x0F, kExprBrTable, 0x00, 0x80, 0x00); // entries=0
@@ -835,14 +835,14 @@ WASM_EXEC_TEST(Regression_660262) {
}
WASM_EXEC_TEST(BrTable0a) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, B1(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0)))),
WASM_I32V_2(91));
FOR_INT32_INPUTS(i) { CHECK_EQ(91, r.Call(*i)); }
}
WASM_EXEC_TEST(BrTable0b) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r,
B1(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 1, BR_TARGET(0), BR_TARGET(0)))),
WASM_I32V_2(92));
@@ -850,7 +850,7 @@ WASM_EXEC_TEST(BrTable0b) {
}
WASM_EXEC_TEST(BrTable0c) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(
r,
B1(B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 1, BR_TARGET(0), BR_TARGET(1))),
@@ -863,13 +863,13 @@ WASM_EXEC_TEST(BrTable0c) {
}
WASM_EXEC_TEST(BrTable1) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0))), RET_I8(93));
FOR_INT32_INPUTS(i) { CHECK_EQ(93, r.Call(*i)); }
}
WASM_EXEC_TEST(BrTable_loop) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r,
B2(B1(WASM_LOOP(WASM_BR_TABLE(WASM_INC_LOCAL_BYV(0, 1), 2, BR_TARGET(2),
BR_TARGET(1), BR_TARGET(0)))),
@@ -883,7 +883,7 @@ WASM_EXEC_TEST(BrTable_loop) {
}
WASM_EXEC_TEST(BrTable_br) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r,
B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 1, BR_TARGET(1), BR_TARGET(0))),
RET_I8(91)),
@@ -895,7 +895,7 @@ WASM_EXEC_TEST(BrTable_br) {
}
WASM_EXEC_TEST(BrTable_br2) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, B2(B2(B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 3, BR_TARGET(1),
BR_TARGET(2), BR_TARGET(3), BR_TARGET(0))),
@@ -926,7 +926,7 @@ WASM_EXEC_TEST(BrTable4) {
RET_I8(73)),
WASM_I32V_2(75)};
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
r.Build(code, code + arraysize(code));
for (int x = -3; x < 50; ++x) {
@@ -956,7 +956,7 @@ WASM_EXEC_TEST(BrTable4x4) {
RET_I8(53)),
WASM_I32V_2(55)};
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
r.Build(code, code + arraysize(code));
for (int x = -6; x < 47; ++x) {
@@ -981,7 +981,7 @@ WASM_EXEC_TEST(BrTable4_fallthru) {
WASM_INC_LOCAL_BY(1, 8)),
WASM_GET_LOCAL(1)};
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
r.Build(code, code + arraysize(code));
CHECK_EQ(15, r.Call(0, 0));
@@ -1005,14 +1005,14 @@ WASM_EXEC_TEST(BrTable_loop_target) {
BR_TARGET(0), BR_TARGET(1), BR_TARGET(1))),
WASM_ONE)};
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
r.Build(code, code + arraysize(code));
CHECK_EQ(1, r.Call(0));
}
WASM_EXEC_TEST(F32ReinterpretI32) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
@@ -1027,7 +1027,7 @@ WASM_EXEC_TEST(F32ReinterpretI32) {
}
WASM_EXEC_TEST(I32ReinterpretF32) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
@@ -1046,7 +1046,7 @@ WASM_EXEC_TEST(I32ReinterpretF32) {
#ifndef USE_SIMULATOR
WASM_EXEC_TEST(SignallingNanSurvivesI32ReinterpretF32) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
BUILD(r, WASM_I32_REINTERPRET_F32(
WASM_SEQ(kExprF32Const, 0x00, 0x00, 0xA0, 0x7F)));
@@ -1058,7 +1058,7 @@ WASM_EXEC_TEST(SignallingNanSurvivesI32ReinterpretF32) {
#endif
WASM_EXEC_TEST(LoadMaxUint32Offset) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_LOAD_MEM_OFFSET(MachineType::Int32(), // type
@@ -1069,7 +1069,7 @@ WASM_EXEC_TEST(LoadMaxUint32Offset) {
}
WASM_EXEC_TEST(LoadStoreLoad) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
@@ -1085,28 +1085,28 @@ WASM_EXEC_TEST(LoadStoreLoad) {
}
WASM_EXEC_TEST(UnalignedFloat32Load) {
- WasmRunner<float> r(execution_mode);
+ WasmRunner<float> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_LOAD_MEM_ALIGNMENT(MachineType::Float32(), WASM_ONE, 2));
r.Call();
}
WASM_EXEC_TEST(UnalignedFloat64Load) {
- WasmRunner<double> r(execution_mode);
+ WasmRunner<double> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_LOAD_MEM_ALIGNMENT(MachineType::Float64(), WASM_ONE, 3));
r.Call();
}
WASM_EXEC_TEST(UnalignedInt32Load) {
- WasmRunner<uint32_t> r(execution_mode);
+ WasmRunner<uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_LOAD_MEM_ALIGNMENT(MachineType::Int32(), WASM_ONE, 2));
r.Call();
}
WASM_EXEC_TEST(UnalignedInt32Store) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_SEQ(WASM_STORE_MEM_ALIGNMENT(MachineType::Int32(), WASM_ONE, 2,
WASM_I32V_1(1)),
@@ -1115,7 +1115,7 @@ WASM_EXEC_TEST(UnalignedInt32Store) {
}
WASM_EXEC_TEST(UnalignedFloat32Store) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_SEQ(WASM_STORE_MEM_ALIGNMENT(MachineType::Float32(), WASM_ONE,
2, WASM_F32(1.0)),
@@ -1124,7 +1124,7 @@ WASM_EXEC_TEST(UnalignedFloat32Store) {
}
WASM_EXEC_TEST(UnalignedFloat64Store) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_SEQ(WASM_STORE_MEM_ALIGNMENT(MachineType::Float64(), WASM_ONE,
3, WASM_F64(1.0)),
@@ -1134,7 +1134,7 @@ WASM_EXEC_TEST(UnalignedFloat64Store) {
WASM_EXEC_TEST(VoidReturn1) {
const int32_t kExpected = -414444;
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
// Build the test function.
WasmFunctionCompiler& test_func = r.NewFunction<void>();
@@ -1151,7 +1151,7 @@ WASM_EXEC_TEST(VoidReturn1) {
WASM_EXEC_TEST(VoidReturn2) {
const int32_t kExpected = -414444;
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
// Build the test function.
WasmFunctionCompiler& test_func = r.NewFunction<void>();
@@ -1167,67 +1167,67 @@ WASM_EXEC_TEST(VoidReturn2) {
}
WASM_EXEC_TEST(BrEmpty) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BRV(0, WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(BrIfEmpty) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Block_empty) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, kExprBlock, kLocalVoid, kExprEnd, WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Block_empty_br1) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, B1(WASM_BR(0)), WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Block_empty_brif1) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK(WASM_BR_IF(0, WASM_ZERO)), WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Block_empty_brif2) {
- WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
BUILD(r, WASM_BLOCK(WASM_BR_IF(0, WASM_GET_LOCAL(1))), WASM_GET_LOCAL(0));
FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i, *i + 1)); }
}
WASM_EXEC_TEST(Block_i) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Block_f) {
- WasmRunner<float, float> r(execution_mode);
+ WasmRunner<float, float> r(execution_tier);
BUILD(r, WASM_BLOCK_F(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Block_d) {
- WasmRunner<double, double> r(execution_mode);
+ WasmRunner<double, double> r(execution_tier);
BUILD(r, WASM_BLOCK_D(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) { CHECK_FLOAT_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Block_br2) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_BRV(0, WASM_GET_LOCAL(0))));
FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, static_cast<uint32_t>(r.Call(*i))); }
}
WASM_EXEC_TEST(Block_If_P) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
// block { if (p0) break 51; 52; }
BUILD(r, WASM_BLOCK_I( // --
WASM_IF(WASM_GET_LOCAL(0), // --
@@ -1240,49 +1240,49 @@ WASM_EXEC_TEST(Block_If_P) {
}
WASM_EXEC_TEST(Loop_empty) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, kExprLoop, kLocalVoid, kExprEnd, WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Loop_i) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_LOOP_I(WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Loop_f) {
- WasmRunner<float, float> r(execution_mode);
+ WasmRunner<float, float> r(execution_tier);
BUILD(r, WASM_LOOP_F(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Loop_d) {
- WasmRunner<double, double> r(execution_mode);
+ WasmRunner<double, double> r(execution_tier);
BUILD(r, WASM_LOOP_D(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) { CHECK_FLOAT_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Loop_empty_br1) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, B1(WASM_LOOP(WASM_BR(1))), WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Loop_empty_brif1) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, B1(WASM_LOOP(WASM_BR_IF(1, WASM_ZERO))), WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
WASM_EXEC_TEST(Loop_empty_brif2) {
- WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<uint32_t, uint32_t, uint32_t> r(execution_tier);
BUILD(r, WASM_LOOP_I(WASM_BRV_IF(1, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i, *i + 1)); }
}
WASM_EXEC_TEST(Loop_empty_brif3) {
- WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_mode);
+ WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
BUILD(r, WASM_LOOP(WASM_BRV_IFD(1, WASM_GET_LOCAL(2), WASM_GET_LOCAL(0))),
WASM_GET_LOCAL(1));
FOR_UINT32_INPUTS(i) {
@@ -1294,7 +1294,7 @@ WASM_EXEC_TEST(Loop_empty_brif3) {
}
WASM_EXEC_TEST(Block_BrIf_P) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_BRV_IFD(0, WASM_I32V_1(51), WASM_GET_LOCAL(0)),
WASM_I32V_1(52)));
FOR_INT32_INPUTS(i) {
@@ -1304,7 +1304,7 @@ WASM_EXEC_TEST(Block_BrIf_P) {
}
WASM_EXEC_TEST(Block_IfElse_P_assign) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
// { if (p0) p0 = 71; else p0 = 72; return p0; }
BUILD(r, // --
WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
@@ -1318,7 +1318,7 @@ WASM_EXEC_TEST(Block_IfElse_P_assign) {
}
WASM_EXEC_TEST(Block_IfElse_P_return) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
// if (p0) return 81; else return 82;
BUILD(r, // --
WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
@@ -1332,7 +1332,7 @@ WASM_EXEC_TEST(Block_IfElse_P_return) {
}
WASM_EXEC_TEST(Block_If_P_assign) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
// { if (p0) p0 = 61; p0; }
BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_I32V_1(61))),
WASM_GET_LOCAL(0));
@@ -1343,14 +1343,14 @@ WASM_EXEC_TEST(Block_If_P_assign) {
}
WASM_EXEC_TEST(DanglingAssign) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
// { return 0; p0 = 0; }
BUILD(r, WASM_BLOCK_I(RET_I8(99), WASM_TEE_LOCAL(0, WASM_ZERO)));
CHECK_EQ(99, r.Call(1));
}
WASM_EXEC_TEST(ExprIf_P) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
// p0 ? 11 : 22;
BUILD(r, WASM_IF_ELSE_I(WASM_GET_LOCAL(0), // --
WASM_I32V_1(11), // --
@@ -1362,7 +1362,7 @@ WASM_EXEC_TEST(ExprIf_P) {
}
WASM_EXEC_TEST(CountDown) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_LOOP(WASM_IFB(WASM_GET_LOCAL(0),
WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0),
WASM_I32V_1(1))),
@@ -1374,7 +1374,7 @@ WASM_EXEC_TEST(CountDown) {
}
WASM_EXEC_TEST(CountDown_fallthru) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(
r,
WASM_LOOP(
@@ -1388,7 +1388,7 @@ WASM_EXEC_TEST(CountDown_fallthru) {
}
WASM_EXEC_TEST(WhileCountDown) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_WHILE(WASM_GET_LOCAL(0),
WASM_SET_LOCAL(
0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I32V_1(1)))),
@@ -1399,7 +1399,7 @@ WASM_EXEC_TEST(WhileCountDown) {
}
WASM_EXEC_TEST(Loop_if_break1) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_LOOP(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(2, WASM_GET_LOCAL(1))),
WASM_SET_LOCAL(0, WASM_I32V_2(99))),
WASM_GET_LOCAL(0));
@@ -1410,7 +1410,7 @@ WASM_EXEC_TEST(Loop_if_break1) {
}
WASM_EXEC_TEST(Loop_if_break2) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_LOOP(WASM_BRV_IF(1, WASM_GET_LOCAL(1), WASM_GET_LOCAL(0)),
WASM_DROP, WASM_SET_LOCAL(0, WASM_I32V_2(99))),
WASM_GET_LOCAL(0));
@@ -1421,7 +1421,7 @@ WASM_EXEC_TEST(Loop_if_break2) {
}
WASM_EXEC_TEST(Loop_if_break_fallthru) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, B1(WASM_LOOP(WASM_IF(WASM_GET_LOCAL(0), WASM_BR(2)),
WASM_SET_LOCAL(0, WASM_I32V_2(93)))),
WASM_GET_LOCAL(0));
@@ -1432,7 +1432,7 @@ WASM_EXEC_TEST(Loop_if_break_fallthru) {
}
WASM_EXEC_TEST(Loop_if_break_fallthru2) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, B1(B1(WASM_LOOP(WASM_IF(WASM_GET_LOCAL(0), WASM_BR(2)),
WASM_SET_LOCAL(0, WASM_I32V_2(93))))),
WASM_GET_LOCAL(0));
@@ -1443,7 +1443,7 @@ WASM_EXEC_TEST(Loop_if_break_fallthru2) {
}
WASM_EXEC_TEST(IfBreak1) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_SEQ(WASM_BR(0), WASM_UNREACHABLE)),
WASM_I32V_2(91));
CHECK_EQ(91, r.Call(0));
@@ -1452,7 +1452,7 @@ WASM_EXEC_TEST(IfBreak1) {
}
WASM_EXEC_TEST(IfBreak2) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_SEQ(WASM_BR(0), RET_I8(77))),
WASM_I32V_2(81));
CHECK_EQ(81, r.Call(0));
@@ -1461,7 +1461,7 @@ WASM_EXEC_TEST(IfBreak2) {
}
WASM_EXEC_TEST(LoadMemI32) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
r.builder().RandomizeMemory(1111);
@@ -1480,7 +1480,7 @@ WASM_EXEC_TEST(LoadMemI32) {
WASM_EXEC_TEST(LoadMemI32_alignment) {
for (byte alignment = 0; alignment <= 2; ++alignment) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
r.builder().RandomizeMemory(1111);
@@ -1500,7 +1500,7 @@ WASM_EXEC_TEST(LoadMemI32_alignment) {
}
WASM_EXEC_TEST(LoadMemI32_oob) {
- WasmRunner<int32_t, uint32_t> r(execution_mode);
+ WasmRunner<int32_t, uint32_t> r(execution_tier);
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
r.builder().RandomizeMemory(1111);
@@ -1529,7 +1529,7 @@ WASM_EXEC_TEST(LoadMem_offset_oob) {
constexpr size_t num_bytes = kWasmPageSize;
for (size_t m = 0; m < arraysize(machineTypes); ++m) {
- WasmRunner<int32_t, uint32_t> r(execution_mode);
+ WasmRunner<int32_t, uint32_t> r(execution_tier);
r.builder().AddMemoryElems<byte>(num_bytes);
r.builder().RandomizeMemory(1116 + static_cast<int>(m));
@@ -1549,7 +1549,7 @@ WASM_EXEC_TEST(LoadMem_offset_oob) {
}
WASM_EXEC_TEST(LoadMemI32_offset) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
r.builder().RandomizeMemory(1111);
@@ -1581,7 +1581,7 @@ WASM_EXEC_TEST(LoadMemI32_const_oob_misaligned) {
for (byte offset = 0; offset < kRunwayLength + 5; ++offset) {
for (uint32_t index = kWasmPageSize - kRunwayLength;
index < kWasmPageSize + 5; ++index) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemoryElems<byte>(kWasmPageSize);
r.builder().RandomizeMemory();
@@ -1604,7 +1604,7 @@ WASM_EXEC_TEST(LoadMemI32_const_oob) {
for (byte offset = 0; offset < kRunwayLength + 5; offset += 4) {
for (uint32_t index = kWasmPageSize - kRunwayLength;
index < kWasmPageSize + 5; index += 4) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemoryElems<byte>(kWasmPageSize);
r.builder().RandomizeMemory();
@@ -1624,7 +1624,7 @@ WASM_EXEC_TEST(StoreMemI32_alignment) {
const int32_t kWritten = 0x12345678;
for (byte i = 0; i <= 2; ++i) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
BUILD(r, WASM_STORE_MEM_ALIGNMENT(MachineType::Int32(), WASM_ZERO, i,
@@ -1639,7 +1639,7 @@ WASM_EXEC_TEST(StoreMemI32_alignment) {
}
WASM_EXEC_TEST(StoreMemI32_offset) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
const int32_t kWritten = 0xAABBCCDD;
@@ -1672,7 +1672,7 @@ WASM_EXEC_TEST(StoreMem_offset_oob) {
constexpr size_t num_bytes = kWasmPageSize;
for (size_t m = 0; m < arraysize(machineTypes); ++m) {
- WasmRunner<int32_t, uint32_t> r(execution_mode);
+ WasmRunner<int32_t, uint32_t> r(execution_tier);
byte* memory = r.builder().AddMemoryElems<byte>(num_bytes);
r.builder().RandomizeMemory(1119 + static_cast<int>(m));
@@ -1700,7 +1700,7 @@ WASM_EXEC_TEST(Store_i32_narrowed) {
stored_size_in_bytes = std::max(1, stored_size_in_bytes * 2);
constexpr int kBytes = 24;
uint8_t expected_memory[kBytes] = {0};
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(kWasmPageSize);
constexpr uint32_t kPattern = 0x12345678;
@@ -1724,7 +1724,7 @@ WASM_EXEC_TEST(Store_i32_narrowed) {
WASM_EXEC_TEST(LoadMemI32_P) {
const int kNumElems = 8;
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
int32_t* memory =
r.builder().AddMemoryElems<int32_t>(kWasmPageSize / sizeof(int32_t));
r.builder().RandomizeMemory(2222);
@@ -1738,7 +1738,7 @@ WASM_EXEC_TEST(LoadMemI32_P) {
WASM_EXEC_TEST(MemI32_Sum) {
const int kNumElems = 20;
- WasmRunner<uint32_t, int32_t> r(execution_mode);
+ WasmRunner<uint32_t, int32_t> r(execution_tier);
uint32_t* memory =
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(int32_t));
const byte kSum = r.AllocateLocal(kWasmI32);
@@ -1768,7 +1768,7 @@ WASM_EXEC_TEST(MemI32_Sum) {
WASM_EXEC_TEST(CheckMachIntsZero) {
const int kNumElems = 55;
- WasmRunner<uint32_t, int32_t> r(execution_mode);
+ WasmRunner<uint32_t, int32_t> r(execution_tier);
r.builder().AddMemoryElems<uint32_t>(kWasmPageSize / sizeof(uint32_t));
BUILD(r, // --
@@ -1796,7 +1796,7 @@ WASM_EXEC_TEST(CheckMachIntsZero) {
WASM_EXEC_TEST(MemF32_Sum) {
const int kSize = 5;
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
r.builder().AddMemoryElems<float>(kWasmPageSize / sizeof(float));
float* buffer = r.builder().raw_mem_start<float>();
r.builder().WriteMemory(&buffer[0], -99.25f);
@@ -1824,10 +1824,9 @@ WASM_EXEC_TEST(MemF32_Sum) {
}
template <typename T>
-T GenerateAndRunFold(WasmExecutionMode execution_mode, WasmOpcode binop,
- T* buffer, uint32_t size, ValueType astType,
- MachineType memType) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+T GenerateAndRunFold(ExecutionTier execution_tier, WasmOpcode binop, T* buffer,
+ uint32_t size, ValueType astType, MachineType memType) {
+ WasmRunner<int32_t, int32_t> r(execution_tier);
T* memory = r.builder().AddMemoryElems<T>(static_cast<uint32_t>(
RoundUp(size * sizeof(T), kWasmPageSize) / sizeof(sizeof(T))));
for (uint32_t i = 0; i < size; ++i) {
@@ -1855,19 +1854,19 @@ WASM_EXEC_TEST(MemF64_Mul) {
const size_t kSize = 6;
double buffer[kSize] = {1, 2, 2, 2, 2, 2};
double result =
- GenerateAndRunFold<double>(execution_mode, kExprF64Mul, buffer, kSize,
+ GenerateAndRunFold<double>(execution_tier, kExprF64Mul, buffer, kSize,
kWasmF64, MachineType::Float64());
CHECK_EQ(32, result);
}
WASM_EXEC_TEST(Build_Wasm_Infinite_Loop) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
// Only build the graph and compile, don't run.
BUILD(r, WASM_INFINITE_LOOP, WASM_ZERO);
}
WASM_EXEC_TEST(Build_Wasm_Infinite_Loop_effect) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
// Only build the graph and compile, don't run.
@@ -1876,49 +1875,49 @@ WASM_EXEC_TEST(Build_Wasm_Infinite_Loop_effect) {
}
WASM_EXEC_TEST(Unreachable0a) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_BRV(0, WASM_I32V_1(9)), RET(WASM_GET_LOCAL(0))));
CHECK_EQ(9, r.Call(0));
CHECK_EQ(9, r.Call(1));
}
WASM_EXEC_TEST(Unreachable0b) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_BRV(0, WASM_I32V_1(7)), WASM_UNREACHABLE));
CHECK_EQ(7, r.Call(0));
CHECK_EQ(7, r.Call(1));
}
WASM_COMPILED_EXEC_TEST(Build_Wasm_Unreachable1) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_UNREACHABLE);
}
WASM_COMPILED_EXEC_TEST(Build_Wasm_Unreachable2) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_UNREACHABLE, WASM_UNREACHABLE);
}
WASM_COMPILED_EXEC_TEST(Build_Wasm_Unreachable3) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_UNREACHABLE, WASM_UNREACHABLE, WASM_UNREACHABLE);
}
WASM_COMPILED_EXEC_TEST(Build_Wasm_UnreachableIf1) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_UNREACHABLE,
WASM_IF(WASM_GET_LOCAL(0), WASM_SEQ(WASM_GET_LOCAL(0), WASM_DROP)),
WASM_ZERO);
}
WASM_COMPILED_EXEC_TEST(Build_Wasm_UnreachableIf2) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_UNREACHABLE,
WASM_IF_ELSE_I(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_UNREACHABLE));
}
WASM_EXEC_TEST(Unreachable_Load) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
BUILD(r, WASM_BLOCK_I(WASM_BRV(0, WASM_GET_LOCAL(0)),
WASM_LOAD_MEM(MachineType::Int8(), WASM_GET_LOCAL(0))));
@@ -1927,21 +1926,21 @@ WASM_EXEC_TEST(Unreachable_Load) {
}
WASM_EXEC_TEST(BrV_Fallthrough) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_BLOCK(WASM_BRV(1, WASM_I32V_1(42))),
WASM_I32V_1(22)));
CHECK_EQ(42, r.Call());
}
WASM_EXEC_TEST(Infinite_Loop_not_taken1) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_INFINITE_LOOP), WASM_I32V_1(45));
// Run the code, but don't go into the infinite loop.
CHECK_EQ(45, r.Call(0));
}
WASM_EXEC_TEST(Infinite_Loop_not_taken2) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(
WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_I32V_1(45)),
WASM_INFINITE_LOOP),
@@ -1951,7 +1950,7 @@ WASM_EXEC_TEST(Infinite_Loop_not_taken2) {
}
WASM_EXEC_TEST(Infinite_Loop_not_taken2_brif) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_BRV_IF(0, WASM_I32V_1(45), WASM_GET_LOCAL(0)),
WASM_INFINITE_LOOP));
// Run the code, but don't go into the infinite loop.
@@ -2005,7 +2004,7 @@ TEST(Build_Wasm_SimpleExprs) {
}
WASM_EXEC_TEST(Int32LoadInt8_signext) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
const int kNumElems = kWasmPageSize;
int8_t* memory = r.builder().AddMemoryElems<int8_t>(kNumElems);
r.builder().RandomizeMemory();
@@ -2018,7 +2017,7 @@ WASM_EXEC_TEST(Int32LoadInt8_signext) {
}
WASM_EXEC_TEST(Int32LoadInt8_zeroext) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
const int kNumElems = kWasmPageSize;
byte* memory = r.builder().AddMemory(kNumElems);
r.builder().RandomizeMemory(77);
@@ -2031,7 +2030,7 @@ WASM_EXEC_TEST(Int32LoadInt8_zeroext) {
}
WASM_EXEC_TEST(Int32LoadInt16_signext) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
const int kNumBytes = kWasmPageSize;
byte* memory = r.builder().AddMemory(kNumBytes);
r.builder().RandomizeMemory(888);
@@ -2045,7 +2044,7 @@ WASM_EXEC_TEST(Int32LoadInt16_signext) {
}
WASM_EXEC_TEST(Int32LoadInt16_zeroext) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
const int kNumBytes = kWasmPageSize;
byte* memory = r.builder().AddMemory(kNumBytes);
r.builder().RandomizeMemory(9999);
@@ -2059,18 +2058,18 @@ WASM_EXEC_TEST(Int32LoadInt16_zeroext) {
}
WASM_EXEC_TEST(Int32Global) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
int32_t* global = r.builder().AddGlobal<int32_t>();
// global = global + p0
BUILD(r,
WASM_SET_GLOBAL(0, WASM_I32_ADD(WASM_GET_GLOBAL(0), WASM_GET_LOCAL(0))),
WASM_ZERO);
- *global = 116;
+ WriteLittleEndianValue<int32_t>(global, 116);
for (int i = 9; i < 444444; i += 111111) {
- int32_t expected = *global + i;
+ int32_t expected = ReadLittleEndianValue<int32_t>(global) + i;
r.Call(i);
- CHECK_EQ(expected, *global);
+ CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(global));
}
}
@@ -2078,7 +2077,7 @@ WASM_EXEC_TEST(Int32Globals_DontAlias) {
const int kNumGlobals = 3;
for (int g = 0; g < kNumGlobals; ++g) {
// global = global + p0
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
int32_t* globals[] = {r.builder().AddGlobal<int32_t>(),
r.builder().AddGlobal<int32_t>(),
r.builder().AddGlobal<int32_t>()};
@@ -2088,23 +2087,24 @@ WASM_EXEC_TEST(Int32Globals_DontAlias) {
WASM_GET_GLOBAL(g));
// Check that reading/writing global number {g} doesn't alter the others.
- *globals[g] = 116 * g;
+ WriteLittleEndianValue<int32_t>(globals[g], 116 * g);
int32_t before[kNumGlobals];
for (int i = 9; i < 444444; i += 111113) {
- int32_t sum = *globals[g] + i;
- for (int j = 0; j < kNumGlobals; ++j) before[j] = *globals[j];
+ int32_t sum = ReadLittleEndianValue<int32_t>(globals[g]) + i;
+ for (int j = 0; j < kNumGlobals; ++j)
+ before[j] = ReadLittleEndianValue<int32_t>(globals[j]);
int32_t result = r.Call(i);
CHECK_EQ(sum, result);
for (int j = 0; j < kNumGlobals; ++j) {
int32_t expected = j == g ? sum : before[j];
- CHECK_EQ(expected, *globals[j]);
+ CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(globals[j]));
}
}
}
}
WASM_EXEC_TEST(Float32Global) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
float* global = r.builder().AddGlobal<float>();
// global = global + p0
BUILD(r, WASM_SET_GLOBAL(
@@ -2112,16 +2112,16 @@ WASM_EXEC_TEST(Float32Global) {
WASM_F32_SCONVERT_I32(WASM_GET_LOCAL(0)))),
WASM_ZERO);
- *global = 1.25;
+ WriteLittleEndianValue<float>(global, 1.25);
for (int i = 9; i < 4444; i += 1111) {
- volatile float expected = *global + i;
+ volatile float expected = ReadLittleEndianValue<float>(global) + i;
r.Call(i);
- CHECK_EQ(expected, *global);
+ CHECK_EQ(expected, ReadLittleEndianValue<float>(global));
}
}
WASM_EXEC_TEST(Float64Global) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
double* global = r.builder().AddGlobal<double>();
// global = global + p0
BUILD(r, WASM_SET_GLOBAL(
@@ -2129,16 +2129,16 @@ WASM_EXEC_TEST(Float64Global) {
WASM_F64_SCONVERT_I32(WASM_GET_LOCAL(0)))),
WASM_ZERO);
- *global = 1.25;
+ WriteLittleEndianValue<double>(global, 1.25);
for (int i = 9; i < 4444; i += 1111) {
- volatile double expected = *global + i;
+ volatile double expected = ReadLittleEndianValue<double>(global) + i;
r.Call(i);
- CHECK_EQ(expected, *global);
+ CHECK_EQ(expected, ReadLittleEndianValue<double>(global));
}
}
WASM_EXEC_TEST(MixedGlobals) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
int32_t* unused = r.builder().AddGlobal<int32_t>();
byte* memory = r.builder().AddMemory(kWasmPageSize);
@@ -2164,17 +2164,20 @@ WASM_EXEC_TEST(MixedGlobals) {
memory[7] = 0x99;
r.Call(1);
- CHECK(static_cast<int32_t>(0xEE55CCAA) == *var_int32);
- CHECK(static_cast<uint32_t>(0xEE55CCAA) == *var_uint32);
- CHECK(bit_cast<float>(0xEE55CCAA) == *var_float);
- CHECK(bit_cast<double>(0x99112233EE55CCAAULL) == *var_double);
+ CHECK(static_cast<int32_t>(0xEE55CCAA) ==
+ ReadLittleEndianValue<int32_t>(var_int32));
+ CHECK(static_cast<uint32_t>(0xEE55CCAA) ==
+ ReadLittleEndianValue<uint32_t>(var_uint32));
+ CHECK(bit_cast<float>(0xEE55CCAA) == ReadLittleEndianValue<float>(var_float));
+ CHECK(bit_cast<double>(0x99112233EE55CCAAULL) ==
+ ReadLittleEndianValue<double>(var_double));
USE(unused);
}
WASM_EXEC_TEST(CallEmpty) {
const int32_t kExpected = -414444;
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
// Build the target function.
WasmFunctionCompiler& target_func = r.NewFunction<int>();
@@ -2188,7 +2191,7 @@ WASM_EXEC_TEST(CallEmpty) {
}
WASM_EXEC_TEST(CallF32StackParameter) {
- WasmRunner<float> r(execution_mode);
+ WasmRunner<float> r(execution_tier);
// Build the target function.
ValueType param_types[20];
@@ -2211,7 +2214,7 @@ WASM_EXEC_TEST(CallF32StackParameter) {
}
WASM_EXEC_TEST(CallF64StackParameter) {
- WasmRunner<double> r(execution_mode);
+ WasmRunner<double> r(execution_tier);
// Build the target function.
ValueType param_types[20];
@@ -2234,7 +2237,7 @@ WASM_EXEC_TEST(CallF64StackParameter) {
}
WASM_EXEC_TEST(CallVoid) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
const byte kMemOffset = 8;
const int32_t kElemNum = kMemOffset / sizeof(int32_t);
@@ -2259,7 +2262,7 @@ WASM_EXEC_TEST(CallVoid) {
}
WASM_EXEC_TEST(Call_Int32Add) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
// Build the target function.
WasmFunctionCompiler& t = r.NewFunction<int32_t, int32_t, int32_t>();
@@ -2279,7 +2282,7 @@ WASM_EXEC_TEST(Call_Int32Add) {
}
WASM_EXEC_TEST(Call_Float32Sub) {
- WasmRunner<float, float, float> r(execution_mode);
+ WasmRunner<float, float, float> r(execution_tier);
// Build the target function.
WasmFunctionCompiler& target_func = r.NewFunction<float, float, float>();
@@ -2295,7 +2298,7 @@ WASM_EXEC_TEST(Call_Float32Sub) {
}
WASM_EXEC_TEST(Call_Float64Sub) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
double* memory =
r.builder().AddMemoryElems<double>(kWasmPageSize / sizeof(double));
@@ -2329,7 +2332,7 @@ WASM_EXEC_TEST(Call_Float64Sub) {
for (size_t i = 0; i < sizeof(__buf); ++i) vec.push_back(__buf[i]); \
} while (false)
-static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
+static void Run_WasmMixedCall_N(ExecutionTier execution_tier, int start) {
const int kExpected = 6333;
const int kElemSize = 8;
TestSignatures sigs;
@@ -2345,7 +2348,7 @@ static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
for (int which = 0; which < num_params; ++which) {
v8::internal::AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
MachineType* memtypes = &mixed[start];
MachineType result = memtypes[which];
@@ -2404,13 +2407,13 @@ static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
}
}
-WASM_EXEC_TEST(MixedCall_0) { Run_WasmMixedCall_N(execution_mode, 0); }
-WASM_EXEC_TEST(MixedCall_1) { Run_WasmMixedCall_N(execution_mode, 1); }
-WASM_EXEC_TEST(MixedCall_2) { Run_WasmMixedCall_N(execution_mode, 2); }
-WASM_EXEC_TEST(MixedCall_3) { Run_WasmMixedCall_N(execution_mode, 3); }
+WASM_EXEC_TEST(MixedCall_0) { Run_WasmMixedCall_N(execution_tier, 0); }
+WASM_EXEC_TEST(MixedCall_1) { Run_WasmMixedCall_N(execution_tier, 1); }
+WASM_EXEC_TEST(MixedCall_2) { Run_WasmMixedCall_N(execution_tier, 2); }
+WASM_EXEC_TEST(MixedCall_3) { Run_WasmMixedCall_N(execution_tier, 3); }
WASM_EXEC_TEST(AddCall) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
WasmFunctionCompiler& t1 = r.NewFunction<int32_t, int32_t, int32_t>();
BUILD(t1, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -2429,7 +2432,7 @@ WASM_EXEC_TEST(AddCall) {
WASM_EXEC_TEST(MultiReturnSub) {
EXPERIMENTAL_FLAG_SCOPE(mv);
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
ValueType storage[] = {kWasmI32, kWasmI32, kWasmI32, kWasmI32};
FunctionSig sig_ii_ii(2, 2, storage);
@@ -2449,7 +2452,7 @@ WASM_EXEC_TEST(MultiReturnSub) {
}
template <typename T>
-void RunMultiReturnSelect(WasmExecutionMode execution_mode, const T* inputs) {
+void RunMultiReturnSelect(ExecutionTier execution_tier, const T* inputs) {
EXPERIMENTAL_FLAG_SCOPE(mv);
ValueType type = ValueTypes::ValueTypeFor(MachineTypeForC<T>());
ValueType storage[] = {type, type, type, type, type, type};
@@ -2460,7 +2463,7 @@ void RunMultiReturnSelect(WasmExecutionMode execution_mode, const T* inputs) {
for (size_t i = 0; i < kNumParams; i++) {
for (size_t j = 0; j < kNumParams; j++) {
for (int k = 0; k < 2; k++) {
- WasmRunner<T, T, T, T, T> r(execution_mode);
+ WasmRunner<T, T, T, T, T> r(execution_tier);
WasmFunctionCompiler& r1 = r.NewFunction(&sig);
BUILD(r1, WASM_GET_LOCAL(i), WASM_GET_LOCAL(j));
@@ -2486,12 +2489,12 @@ void RunMultiReturnSelect(WasmExecutionMode execution_mode, const T* inputs) {
WASM_EXEC_TEST(MultiReturnSelect_i32) {
static const int32_t inputs[] = {3333333, 4444444, -55555555, -7777777};
- RunMultiReturnSelect<int32_t>(execution_mode, inputs);
+ RunMultiReturnSelect<int32_t>(execution_tier, inputs);
}
WASM_EXEC_TEST(MultiReturnSelect_f32) {
static const float inputs[] = {33.33333f, 444.4444f, -55555.555f, -77777.77f};
- RunMultiReturnSelect<float>(execution_mode, inputs);
+ RunMultiReturnSelect<float>(execution_tier, inputs);
}
WASM_EXEC_TEST(MultiReturnSelect_i64) {
@@ -2499,17 +2502,17 @@ WASM_EXEC_TEST(MultiReturnSelect_i64) {
// TODO(titzer): implement int64-lowering for multiple return values
static const int64_t inputs[] = {33333338888, 44444446666, -555555553333,
-77777771111};
- RunMultiReturnSelect<int64_t>(execution_mode, inputs);
+ RunMultiReturnSelect<int64_t>(execution_tier, inputs);
#endif
}
WASM_EXEC_TEST(MultiReturnSelect_f64) {
static const double inputs[] = {3.333333, 44444.44, -55.555555, -7777.777};
- RunMultiReturnSelect<double>(execution_mode, inputs);
+ RunMultiReturnSelect<double>(execution_tier, inputs);
}
WASM_EXEC_TEST(ExprBlock2a) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_I32V_1(1))),
WASM_I32V_1(1)));
CHECK_EQ(1, r.Call(0));
@@ -2517,7 +2520,7 @@ WASM_EXEC_TEST(ExprBlock2a) {
}
WASM_EXEC_TEST(ExprBlock2b) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_I32V_1(1))),
WASM_I32V_1(2)));
CHECK_EQ(2, r.Call(0));
@@ -2525,7 +2528,7 @@ WASM_EXEC_TEST(ExprBlock2b) {
}
WASM_EXEC_TEST(ExprBlock2c) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_BRV_IFD(0, WASM_I32V_1(1), WASM_GET_LOCAL(0)),
WASM_I32V_1(1)));
CHECK_EQ(1, r.Call(0));
@@ -2533,7 +2536,7 @@ WASM_EXEC_TEST(ExprBlock2c) {
}
WASM_EXEC_TEST(ExprBlock2d) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_BRV_IFD(0, WASM_I32V_1(1), WASM_GET_LOCAL(0)),
WASM_I32V_1(2)));
CHECK_EQ(2, r.Call(0));
@@ -2541,7 +2544,7 @@ WASM_EXEC_TEST(ExprBlock2d) {
}
WASM_EXEC_TEST(ExprBlock_ManualSwitch) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I32V_1(1)),
WASM_BRV(1, WASM_I32V_1(11))),
WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I32V_1(2)),
@@ -2563,7 +2566,7 @@ WASM_EXEC_TEST(ExprBlock_ManualSwitch) {
}
WASM_EXEC_TEST(ExprBlock_ManualSwitch_brif) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(
WASM_BRV_IFD(0, WASM_I32V_1(11),
WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I32V_1(1))),
@@ -2586,7 +2589,7 @@ WASM_EXEC_TEST(ExprBlock_ManualSwitch_brif) {
}
WASM_EXEC_TEST(If_nested) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
BUILD(
r,
@@ -2602,7 +2605,7 @@ WASM_EXEC_TEST(If_nested) {
}
WASM_EXEC_TEST(ExprBlock_if) {
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_IF_ELSE_I(WASM_GET_LOCAL(0),
WASM_BRV(0, WASM_I32V_1(11)),
@@ -2613,7 +2616,7 @@ WASM_EXEC_TEST(ExprBlock_if) {
}
WASM_EXEC_TEST(ExprBlock_nested_ifs) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_IF_ELSE_I(
WASM_GET_LOCAL(0),
@@ -2630,7 +2633,7 @@ WASM_EXEC_TEST(ExprBlock_nested_ifs) {
WASM_EXEC_TEST(SimpleCallIndirect) {
TestSignatures sigs;
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
WasmFunctionCompiler& t1 = r.NewFunction(sigs.i_ii());
BUILD(t1, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -2664,7 +2667,7 @@ WASM_EXEC_TEST(SimpleCallIndirect) {
WASM_EXEC_TEST(MultipleCallIndirect) {
TestSignatures sigs;
- WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t, int32_t> r(execution_tier);
WasmFunctionCompiler& t1 = r.NewFunction(sigs.i_ii());
BUILD(t1, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -2707,7 +2710,7 @@ WASM_EXEC_TEST(MultipleCallIndirect) {
WASM_EXEC_TEST(CallIndirect_EmptyTable) {
TestSignatures sigs;
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
// One function.
WasmFunctionCompiler& t1 = r.NewFunction(sigs.i_ii());
@@ -2730,7 +2733,7 @@ WASM_EXEC_TEST(CallIndirect_EmptyTable) {
WASM_EXEC_TEST(CallIndirect_canonical) {
TestSignatures sigs;
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
WasmFunctionCompiler& t1 = r.NewFunction(sigs.i_ii());
BUILD(t1, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -2772,63 +2775,63 @@ WASM_EXEC_TEST(CallIndirect_canonical) {
}
WASM_EXEC_TEST(F32Floor) {
- WasmRunner<float, float> r(execution_mode);
+ WasmRunner<float, float> r(execution_tier);
BUILD(r, WASM_F32_FLOOR(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(floorf(*i), r.Call(*i)); }
}
WASM_EXEC_TEST(F32Ceil) {
- WasmRunner<float, float> r(execution_mode);
+ WasmRunner<float, float> r(execution_tier);
BUILD(r, WASM_F32_CEIL(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(ceilf(*i), r.Call(*i)); }
}
WASM_EXEC_TEST(F32Trunc) {
- WasmRunner<float, float> r(execution_mode);
+ WasmRunner<float, float> r(execution_tier);
BUILD(r, WASM_F32_TRUNC(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(truncf(*i), r.Call(*i)); }
}
WASM_EXEC_TEST(F32NearestInt) {
- WasmRunner<float, float> r(execution_mode);
+ WasmRunner<float, float> r(execution_tier);
BUILD(r, WASM_F32_NEARESTINT(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(nearbyintf(*i), r.Call(*i)); }
}
WASM_EXEC_TEST(F64Floor) {
- WasmRunner<double, double> r(execution_mode);
+ WasmRunner<double, double> r(execution_tier);
BUILD(r, WASM_F64_FLOOR(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(floor(*i), r.Call(*i)); }
}
WASM_EXEC_TEST(F64Ceil) {
- WasmRunner<double, double> r(execution_mode);
+ WasmRunner<double, double> r(execution_tier);
BUILD(r, WASM_F64_CEIL(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ceil(*i), r.Call(*i)); }
}
WASM_EXEC_TEST(F64Trunc) {
- WasmRunner<double, double> r(execution_mode);
+ WasmRunner<double, double> r(execution_tier);
BUILD(r, WASM_F64_TRUNC(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(trunc(*i), r.Call(*i)); }
}
WASM_EXEC_TEST(F64NearestInt) {
- WasmRunner<double, double> r(execution_mode);
+ WasmRunner<double, double> r(execution_tier);
BUILD(r, WASM_F64_NEARESTINT(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(nearbyint(*i), r.Call(*i)); }
}
WASM_EXEC_TEST(F32Min) {
- WasmRunner<float, float, float> r(execution_mode);
+ WasmRunner<float, float, float> r(execution_tier);
BUILD(r, WASM_F32_MIN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT32_INPUTS(i) {
@@ -2837,7 +2840,7 @@ WASM_EXEC_TEST(F32Min) {
}
WASM_EXEC_TEST(F64Min) {
- WasmRunner<double, double, double> r(execution_mode);
+ WasmRunner<double, double, double> r(execution_tier);
BUILD(r, WASM_F64_MIN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT64_INPUTS(i) {
@@ -2846,7 +2849,7 @@ WASM_EXEC_TEST(F64Min) {
}
WASM_EXEC_TEST(F32Max) {
- WasmRunner<float, float, float> r(execution_mode);
+ WasmRunner<float, float, float> r(execution_tier);
BUILD(r, WASM_F32_MAX(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT32_INPUTS(i) {
@@ -2855,7 +2858,7 @@ WASM_EXEC_TEST(F32Max) {
}
WASM_EXEC_TEST(F64Max) {
- WasmRunner<double, double, double> r(execution_mode);
+ WasmRunner<double, double, double> r(execution_tier);
BUILD(r, WASM_F64_MAX(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT64_INPUTS(i) {
@@ -2867,7 +2870,7 @@ WASM_EXEC_TEST(F64Max) {
}
WASM_EXEC_TEST(I32SConvertF32) {
- WasmRunner<int32_t, float> r(execution_mode);
+ WasmRunner<int32_t, float> r(execution_tier);
BUILD(r, WASM_I32_SCONVERT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
@@ -2881,7 +2884,7 @@ WASM_EXEC_TEST(I32SConvertF32) {
WASM_EXEC_TEST(I32SConvertSatF32) {
EXPERIMENTAL_FLAG_SCOPE(sat_f2i_conversions);
- WasmRunner<int32_t, float> r(execution_mode);
+ WasmRunner<int32_t, float> r(execution_tier);
BUILD(r, WASM_I32_SCONVERT_SAT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
@@ -2897,7 +2900,7 @@ WASM_EXEC_TEST(I32SConvertSatF32) {
}
WASM_EXEC_TEST(I32SConvertF64) {
- WasmRunner<int32_t, double> r(execution_mode);
+ WasmRunner<int32_t, double> r(execution_tier);
BUILD(r, WASM_I32_SCONVERT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
@@ -2911,7 +2914,7 @@ WASM_EXEC_TEST(I32SConvertF64) {
WASM_EXEC_TEST(I32SConvertSatF64) {
EXPERIMENTAL_FLAG_SCOPE(sat_f2i_conversions);
- WasmRunner<int32_t, double> r(execution_mode);
+ WasmRunner<int32_t, double> r(execution_tier);
BUILD(r, WASM_I32_SCONVERT_SAT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
int32_t expected =
@@ -2926,7 +2929,7 @@ WASM_EXEC_TEST(I32SConvertSatF64) {
}
WASM_EXEC_TEST(I32UConvertF32) {
- WasmRunner<uint32_t, float> r(execution_mode);
+ WasmRunner<uint32_t, float> r(execution_tier);
BUILD(r, WASM_I32_UCONVERT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
if (is_inbounds<uint32_t>(*i)) {
@@ -2939,7 +2942,7 @@ WASM_EXEC_TEST(I32UConvertF32) {
WASM_EXEC_TEST(I32UConvertSatF32) {
EXPERIMENTAL_FLAG_SCOPE(sat_f2i_conversions);
- WasmRunner<uint32_t, float> r(execution_mode);
+ WasmRunner<uint32_t, float> r(execution_tier);
BUILD(r, WASM_I32_UCONVERT_SAT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
int32_t expected =
@@ -2954,7 +2957,7 @@ WASM_EXEC_TEST(I32UConvertSatF32) {
}
WASM_EXEC_TEST(I32UConvertF64) {
- WasmRunner<uint32_t, double> r(execution_mode);
+ WasmRunner<uint32_t, double> r(execution_tier);
BUILD(r, WASM_I32_UCONVERT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
if (is_inbounds<uint32_t>(*i)) {
@@ -2967,7 +2970,7 @@ WASM_EXEC_TEST(I32UConvertF64) {
WASM_EXEC_TEST(I32UConvertSatF64) {
EXPERIMENTAL_FLAG_SCOPE(sat_f2i_conversions);
- WasmRunner<uint32_t, double> r(execution_mode);
+ WasmRunner<uint32_t, double> r(execution_tier);
BUILD(r, WASM_I32_UCONVERT_SAT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
int32_t expected =
@@ -2982,7 +2985,7 @@ WASM_EXEC_TEST(I32UConvertSatF64) {
}
WASM_EXEC_TEST(F64CopySign) {
- WasmRunner<double, double, double> r(execution_mode);
+ WasmRunner<double, double, double> r(execution_tier);
BUILD(r, WASM_F64_COPYSIGN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT64_INPUTS(i) {
@@ -2991,7 +2994,7 @@ WASM_EXEC_TEST(F64CopySign) {
}
WASM_EXEC_TEST(F32CopySign) {
- WasmRunner<float, float, float> r(execution_mode);
+ WasmRunner<float, float, float> r(execution_tier);
BUILD(r, WASM_F32_COPYSIGN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT32_INPUTS(i) {
@@ -2999,12 +3002,12 @@ WASM_EXEC_TEST(F32CopySign) {
}
}
-static void CompileCallIndirectMany(WasmExecutionMode mode, ValueType param) {
+static void CompileCallIndirectMany(ExecutionTier tier, ValueType param) {
// Make sure we don't run out of registers when compiling indirect calls
// with many many parameters.
TestSignatures sigs;
for (byte num_params = 0; num_params < 40; ++num_params) {
- WasmRunner<void> r(mode);
+ WasmRunner<void> r(tier);
FunctionSig* sig = sigs.many(r.zone(), kWasmStmt, param, num_params);
r.builder().AddSignature(sig);
@@ -3025,19 +3028,19 @@ static void CompileCallIndirectMany(WasmExecutionMode mode, ValueType param) {
}
WASM_COMPILED_EXEC_TEST(Compile_Wasm_CallIndirect_Many_i32) {
- CompileCallIndirectMany(execution_mode, kWasmI32);
+ CompileCallIndirectMany(execution_tier, kWasmI32);
}
WASM_COMPILED_EXEC_TEST(Compile_Wasm_CallIndirect_Many_f32) {
- CompileCallIndirectMany(execution_mode, kWasmF32);
+ CompileCallIndirectMany(execution_tier, kWasmF32);
}
WASM_COMPILED_EXEC_TEST(Compile_Wasm_CallIndirect_Many_f64) {
- CompileCallIndirectMany(execution_mode, kWasmF64);
+ CompileCallIndirectMany(execution_tier, kWasmF64);
}
WASM_EXEC_TEST(Int32RemS_dead) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)), WASM_DROP,
WASM_ZERO);
const int32_t kMin = std::numeric_limits<int32_t>::min();
@@ -3050,7 +3053,7 @@ WASM_EXEC_TEST(Int32RemS_dead) {
}
WASM_EXEC_TEST(BrToLoopWithValue) {
- WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
// Subtracts <1> times 3 from <0> and returns the result.
BUILD(r,
// loop i32
@@ -3070,7 +3073,7 @@ WASM_EXEC_TEST(BrToLoopWithValue) {
WASM_EXEC_TEST(BrToLoopWithoutValue) {
// This was broken in the interpreter, see http://crbug.com/715454
- WasmRunner<int32_t, int32_t> r(execution_mode);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(
r, kExprLoop, kLocalI32, // loop i32
WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_ONE)), // dec <0>
@@ -3081,31 +3084,31 @@ WASM_EXEC_TEST(BrToLoopWithoutValue) {
}
WASM_EXEC_TEST(LoopsWithValues) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
BUILD(r, WASM_LOOP_I(WASM_LOOP_I(WASM_ONE), WASM_ONE, kExprI32Add));
CHECK_EQ(2, r.Call());
}
WASM_EXEC_TEST(InvalidStackAfterUnreachable) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
BUILD(r, kExprUnreachable, kExprI32Add);
CHECK_TRAP32(r.Call());
}
WASM_EXEC_TEST(InvalidStackAfterBr) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
BUILD(r, WASM_BRV(0, WASM_I32V_1(27)), kExprI32Add);
CHECK_EQ(27, r.Call());
}
WASM_EXEC_TEST(InvalidStackAfterReturn) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
BUILD(r, WASM_RETURN1(WASM_I32V_1(17)), kExprI32Add);
CHECK_EQ(17, r.Call());
}
WASM_EXEC_TEST(BranchOverUnreachableCode) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
BUILD(r,
// Start a block which breaks in the middle (hence unreachable code
// afterwards) and continue execution after this block.
@@ -3116,7 +3119,7 @@ WASM_EXEC_TEST(BranchOverUnreachableCode) {
}
WASM_EXEC_TEST(BranchOverUnreachableCodeInLoop0) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
BUILD(r,
WASM_BLOCK_I(
// Start a loop which breaks in the middle (hence unreachable code
@@ -3130,7 +3133,7 @@ WASM_EXEC_TEST(BranchOverUnreachableCodeInLoop0) {
}
WASM_EXEC_TEST(BranchOverUnreachableCodeInLoop1) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
BUILD(r,
WASM_BLOCK_I(
// Start a loop which breaks in the middle (hence unreachable code
@@ -3143,7 +3146,7 @@ WASM_EXEC_TEST(BranchOverUnreachableCodeInLoop1) {
}
WASM_EXEC_TEST(BranchOverUnreachableCodeInLoop2) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
BUILD(r,
WASM_BLOCK_I(
// Start a loop which breaks in the middle (hence unreachable code
@@ -3157,13 +3160,13 @@ WASM_EXEC_TEST(BranchOverUnreachableCodeInLoop2) {
}
WASM_EXEC_TEST(BlockInsideUnreachable) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
BUILD(r, WASM_RETURN1(WASM_I32V_1(17)), WASM_BLOCK(WASM_BR(0)));
CHECK_EQ(17, r.Call());
}
WASM_EXEC_TEST(IfInsideUnreachable) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
BUILD(
r, WASM_RETURN1(WASM_I32V_1(17)),
WASM_IF_ELSE_I(WASM_ONE, WASM_BRV(0, WASM_ONE), WASM_RETURN1(WASM_ONE)));
@@ -3177,9 +3180,8 @@ WASM_EXEC_TEST(IfInsideUnreachable) {
// not overwritten.
template <typename ctype>
void BinOpOnDifferentRegisters(
- WasmExecutionMode execution_mode, ValueType type,
- Vector<const ctype> inputs, WasmOpcode opcode,
- std::function<ctype(ctype, ctype, bool*)> expect_fn) {
+ ExecutionTier execution_tier, ValueType type, Vector<const ctype> inputs,
+ WasmOpcode opcode, std::function<ctype(ctype, ctype, bool*)> expect_fn) {
static constexpr int kMaxNumLocals = 8;
for (int num_locals = 1; num_locals < kMaxNumLocals; ++num_locals) {
// {init_locals_code} is shared by all code generated in the loop below.
@@ -3202,7 +3204,7 @@ void BinOpOnDifferentRegisters(
}
for (int lhs = 0; lhs < num_locals; ++lhs) {
for (int rhs = 0; rhs < num_locals; ++rhs) {
- WasmRunner<int32_t> r(execution_mode);
+ WasmRunner<int32_t> r(execution_tier);
ctype* memory =
r.builder().AddMemoryElems<ctype>(kWasmPageSize / sizeof(ctype));
for (int i = 0; i < num_locals; ++i) {
@@ -3226,8 +3228,7 @@ void BinOpOnDifferentRegisters(
ctype value =
i == lhs ? lhs_value
: i == rhs ? rhs_value : static_cast<ctype>(i + 47);
- WriteLittleEndianValue<ctype>(
- reinterpret_cast<Address>(&memory[i]), value);
+ WriteLittleEndianValue<ctype>(&memory[i], value);
}
bool trap = false;
int64_t expect = expect_fn(lhs_value, rhs_value, &trap);
@@ -3236,14 +3237,12 @@ void BinOpOnDifferentRegisters(
continue;
}
CHECK_EQ(0, r.Call());
- CHECK_EQ(expect, ReadLittleEndianValue<ctype>(
- reinterpret_cast<Address>(&memory[0])));
+ CHECK_EQ(expect, ReadLittleEndianValue<ctype>(&memory[0]));
for (int i = 0; i < num_locals; ++i) {
ctype value =
i == lhs ? lhs_value
: i == rhs ? rhs_value : static_cast<ctype>(i + 47);
- CHECK_EQ(value, ReadLittleEndianValue<ctype>(
- reinterpret_cast<Address>(&memory[i + 1])));
+ CHECK_EQ(value, ReadLittleEndianValue<ctype>(&memory[i + 1]));
}
}
}
@@ -3260,37 +3259,37 @@ static constexpr int64_t kSome64BitInputs[] = {
WASM_EXEC_TEST(I32AddOnDifferentRegisters) {
BinOpOnDifferentRegisters<int32_t>(
- execution_mode, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32Add,
+ execution_tier, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32Add,
[](int32_t lhs, int32_t rhs, bool* trap) { return lhs + rhs; });
}
WASM_EXEC_TEST(I32SubOnDifferentRegisters) {
BinOpOnDifferentRegisters<int32_t>(
- execution_mode, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32Sub,
+ execution_tier, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32Sub,
[](int32_t lhs, int32_t rhs, bool* trap) { return lhs - rhs; });
}
WASM_EXEC_TEST(I32MulOnDifferentRegisters) {
BinOpOnDifferentRegisters<int32_t>(
- execution_mode, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32Mul,
+ execution_tier, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32Mul,
[](int32_t lhs, int32_t rhs, bool* trap) { return lhs * rhs; });
}
WASM_EXEC_TEST(I32ShlOnDifferentRegisters) {
BinOpOnDifferentRegisters<int32_t>(
- execution_mode, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32Shl,
+ execution_tier, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32Shl,
[](int32_t lhs, int32_t rhs, bool* trap) { return lhs << (rhs & 31); });
}
WASM_EXEC_TEST(I32ShrSOnDifferentRegisters) {
BinOpOnDifferentRegisters<int32_t>(
- execution_mode, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32ShrS,
+ execution_tier, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32ShrS,
[](int32_t lhs, int32_t rhs, bool* trap) { return lhs >> (rhs & 31); });
}
WASM_EXEC_TEST(I32ShrUOnDifferentRegisters) {
BinOpOnDifferentRegisters<int32_t>(
- execution_mode, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32ShrU,
+ execution_tier, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32ShrU,
[](int32_t lhs, int32_t rhs, bool* trap) {
return static_cast<uint32_t>(lhs) >> (rhs & 31);
});
@@ -3298,7 +3297,7 @@ WASM_EXEC_TEST(I32ShrUOnDifferentRegisters) {
WASM_EXEC_TEST(I32DivSOnDifferentRegisters) {
BinOpOnDifferentRegisters<int32_t>(
- execution_mode, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32DivS,
+ execution_tier, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32DivS,
[](int32_t lhs, int32_t rhs, bool* trap) {
*trap = rhs == 0;
return *trap ? 0 : lhs / rhs;
@@ -3307,7 +3306,7 @@ WASM_EXEC_TEST(I32DivSOnDifferentRegisters) {
WASM_EXEC_TEST(I32DivUOnDifferentRegisters) {
BinOpOnDifferentRegisters<int32_t>(
- execution_mode, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32DivU,
+ execution_tier, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32DivU,
[](uint32_t lhs, uint32_t rhs, bool* trap) {
*trap = rhs == 0;
return *trap ? 0 : lhs / rhs;
@@ -3316,7 +3315,7 @@ WASM_EXEC_TEST(I32DivUOnDifferentRegisters) {
WASM_EXEC_TEST(I32RemSOnDifferentRegisters) {
BinOpOnDifferentRegisters<int32_t>(
- execution_mode, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32RemS,
+ execution_tier, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32RemS,
[](int32_t lhs, int32_t rhs, bool* trap) {
*trap = rhs == 0;
return *trap || rhs == -1 ? 0 : lhs % rhs;
@@ -3325,7 +3324,7 @@ WASM_EXEC_TEST(I32RemSOnDifferentRegisters) {
WASM_EXEC_TEST(I32RemUOnDifferentRegisters) {
BinOpOnDifferentRegisters<int32_t>(
- execution_mode, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32RemU,
+ execution_tier, kWasmI32, ArrayVector(kSome32BitInputs), kExprI32RemU,
[](uint32_t lhs, uint32_t rhs, bool* trap) {
*trap = rhs == 0;
return *trap ? 0 : lhs % rhs;
@@ -3334,37 +3333,37 @@ WASM_EXEC_TEST(I32RemUOnDifferentRegisters) {
WASM_EXEC_TEST(I64AddOnDifferentRegisters) {
BinOpOnDifferentRegisters<int64_t>(
- execution_mode, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64Add,
+ execution_tier, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64Add,
[](int64_t lhs, int64_t rhs, bool* trap) { return lhs + rhs; });
}
WASM_EXEC_TEST(I64SubOnDifferentRegisters) {
BinOpOnDifferentRegisters<int64_t>(
- execution_mode, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64Sub,
+ execution_tier, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64Sub,
[](int64_t lhs, int64_t rhs, bool* trap) { return lhs - rhs; });
}
WASM_EXEC_TEST(I64MulOnDifferentRegisters) {
BinOpOnDifferentRegisters<int64_t>(
- execution_mode, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64Mul,
+ execution_tier, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64Mul,
[](int64_t lhs, int64_t rhs, bool* trap) { return lhs * rhs; });
}
WASM_EXEC_TEST(I64ShlOnDifferentRegisters) {
BinOpOnDifferentRegisters<int64_t>(
- execution_mode, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64Shl,
+ execution_tier, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64Shl,
[](int64_t lhs, int64_t rhs, bool* trap) { return lhs << (rhs & 63); });
}
WASM_EXEC_TEST(I64ShrSOnDifferentRegisters) {
BinOpOnDifferentRegisters<int64_t>(
- execution_mode, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64ShrS,
+ execution_tier, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64ShrS,
[](int64_t lhs, int64_t rhs, bool* trap) { return lhs >> (rhs & 63); });
}
WASM_EXEC_TEST(I64ShrUOnDifferentRegisters) {
BinOpOnDifferentRegisters<int64_t>(
- execution_mode, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64ShrU,
+ execution_tier, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64ShrU,
[](int64_t lhs, int64_t rhs, bool* trap) {
return static_cast<uint64_t>(lhs) >> (rhs & 63);
});
@@ -3372,7 +3371,7 @@ WASM_EXEC_TEST(I64ShrUOnDifferentRegisters) {
WASM_EXEC_TEST(I64DivSOnDifferentRegisters) {
BinOpOnDifferentRegisters<int64_t>(
- execution_mode, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64DivS,
+ execution_tier, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64DivS,
[](int64_t lhs, int64_t rhs, bool* trap) {
*trap = rhs == 0 ||
(rhs == -1 && lhs == std::numeric_limits<int64_t>::min());
@@ -3382,7 +3381,7 @@ WASM_EXEC_TEST(I64DivSOnDifferentRegisters) {
WASM_EXEC_TEST(I64DivUOnDifferentRegisters) {
BinOpOnDifferentRegisters<int64_t>(
- execution_mode, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64DivU,
+ execution_tier, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64DivU,
[](uint64_t lhs, uint64_t rhs, bool* trap) {
*trap = rhs == 0;
return *trap ? 0 : lhs / rhs;
@@ -3391,7 +3390,7 @@ WASM_EXEC_TEST(I64DivUOnDifferentRegisters) {
WASM_EXEC_TEST(I64RemSOnDifferentRegisters) {
BinOpOnDifferentRegisters<int64_t>(
- execution_mode, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64RemS,
+ execution_tier, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64RemS,
[](int64_t lhs, int64_t rhs, bool* trap) {
*trap = rhs == 0;
return *trap || rhs == -1 ? 0 : lhs % rhs;
@@ -3400,7 +3399,7 @@ WASM_EXEC_TEST(I64RemSOnDifferentRegisters) {
WASM_EXEC_TEST(I64RemUOnDifferentRegisters) {
BinOpOnDifferentRegisters<int64_t>(
- execution_mode, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64RemU,
+ execution_tier, kWasmI64, ArrayVector(kSome64BitInputs), kExprI64RemU,
[](uint64_t lhs, uint64_t rhs, bool* trap) {
*trap = rhs == 0;
return *trap ? 0 : lhs % rhs;
@@ -3408,7 +3407,7 @@ WASM_EXEC_TEST(I64RemUOnDifferentRegisters) {
}
TEST(Liftoff_tier_up) {
- WasmRunner<int32_t, int32_t, int32_t> r(WasmExecutionMode::kExecuteLiftoff);
+ WasmRunner<int32_t, int32_t, int32_t> r(ExecutionTier::kBaseline);
WasmFunctionCompiler& add = r.NewFunction<int32_t, int32_t, int32_t>("add");
BUILD(add, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index e9ee63d93c..39d7e1a5be 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/objects-inl.h"
#include "src/v8.h"
#include "src/vector.h"
@@ -88,7 +88,7 @@ enum class CompilationState {
kFailed,
};
-class TestResolver : public i::wasm::CompilationResultResolver {
+class TestResolver : public CompilationResultResolver {
public:
explicit TestResolver(CompilationState* state) : state_(state) {}
@@ -109,12 +109,13 @@ class StreamTester {
StreamTester() : zone_(&allocator_, "StreamTester") {
v8::Isolate* isolate = CcTest::isolate();
i::Isolate* i_isolate = CcTest::i_isolate();
+ i::HandleScope internal_scope(i_isolate);
v8::Local<v8::Context> context = isolate->GetCurrentContext();
stream_ = i_isolate->wasm_engine()->StartStreamingCompilation(
- i_isolate, v8::Utils::OpenHandle(*context),
- base::make_unique<TestResolver>(&state_));
+ i_isolate, kAllWasmFeatures, v8::Utils::OpenHandle(*context),
+ std::make_shared<TestResolver>(&state_));
}
std::shared_ptr<StreamingDecoder> stream() { return stream_; }
@@ -151,8 +152,6 @@ class StreamTester {
TEST(name) { \
MockPlatform platform; \
CcTest::InitializeVM(); \
- v8::HandleScope handle_scope(CcTest::isolate()); \
- i::HandleScope internal_scope(CcTest::i_isolate()); \
RunStream_##name(); \
} \
void RunStream_##name()
@@ -212,8 +211,9 @@ STREAM_TEST(TestAllBytesArriveAOTCompilerFinishesFirst) {
size_t GetFunctionOffset(i::Isolate* isolate, const uint8_t* buffer,
size_t size, size_t index) {
- ModuleResult result = SyncDecodeWasmModule(isolate, buffer, buffer + size,
- false, ModuleOrigin::kWasmOrigin);
+ ModuleResult result = DecodeWasmModule(
+ kAllWasmFeatures, buffer, buffer + size, false, ModuleOrigin::kWasmOrigin,
+ isolate->counters(), isolate->allocator());
CHECK(result.ok());
const WasmFunction* func = &result.val->functions[1];
return func->code.offset();
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index ef0a0e545e..ec93639e17 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -242,7 +242,7 @@ std::vector<WasmValue> wasmVec(Args... args) {
} // namespace
WASM_COMPILED_EXEC_TEST(WasmCollectPossibleBreakpoints) {
- WasmRunner<int> runner(execution_mode);
+ WasmRunner<int> runner(execution_tier);
BUILD(runner, WASM_NOP, WASM_I32_ADD(WASM_ZERO, WASM_ONE));
@@ -269,7 +269,7 @@ WASM_COMPILED_EXEC_TEST(WasmCollectPossibleBreakpoints) {
}
WASM_COMPILED_EXEC_TEST(WasmSimpleBreak) {
- WasmRunner<int> runner(execution_mode);
+ WasmRunner<int> runner(execution_tier);
Isolate* isolate = runner.main_isolate();
BUILD(runner, WASM_NOP, WASM_I32_ADD(WASM_I32V_1(11), WASM_I32V_1(3)));
@@ -290,7 +290,7 @@ WASM_COMPILED_EXEC_TEST(WasmSimpleBreak) {
}
WASM_COMPILED_EXEC_TEST(WasmSimpleStepping) {
- WasmRunner<int> runner(execution_mode);
+ WasmRunner<int> runner(execution_tier);
BUILD(runner, WASM_I32_ADD(WASM_I32V_1(11), WASM_I32V_1(3)));
Isolate* isolate = runner.main_isolate();
@@ -317,7 +317,7 @@ WASM_COMPILED_EXEC_TEST(WasmSimpleStepping) {
}
WASM_COMPILED_EXEC_TEST(WasmStepInAndOut) {
- WasmRunner<int, int> runner(execution_mode);
+ WasmRunner<int, int> runner(execution_tier);
WasmFunctionCompiler& f2 = runner.NewFunction<void>();
f2.AllocateLocal(kWasmI32);
@@ -357,7 +357,7 @@ WASM_COMPILED_EXEC_TEST(WasmStepInAndOut) {
}
WASM_COMPILED_EXEC_TEST(WasmGetLocalsAndStack) {
- WasmRunner<void, int> runner(execution_mode);
+ WasmRunner<void, int> runner(execution_tier);
runner.AllocateLocal(kWasmI64);
runner.AllocateLocal(kWasmF32);
runner.AllocateLocal(kWasmF64);
diff --git a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
index 16c525945f..d927de34ca 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
@@ -91,7 +91,7 @@ static ArgPassingHelper<T> GetHelper(
// Pass int32_t, return int32_t.
TEST(TestArgumentPassing_int32) {
- WasmRunner<int32_t, int32_t> runner(kExecuteTurbofan);
+ WasmRunner<int32_t, int32_t> runner(ExecutionTier::kOptimized);
WasmFunctionCompiler& f2 = runner.NewFunction<int32_t, int32_t>();
auto helper = GetHelper(
@@ -107,7 +107,7 @@ TEST(TestArgumentPassing_int32) {
// Pass int64_t, return double.
TEST(TestArgumentPassing_double_int64) {
- WasmRunner<double, int32_t, int32_t> runner(kExecuteTurbofan);
+ WasmRunner<double, int32_t, int32_t> runner(ExecutionTier::kOptimized);
WasmFunctionCompiler& f2 = runner.NewFunction<double, int64_t>();
auto helper = GetHelper(
@@ -140,7 +140,7 @@ TEST(TestArgumentPassing_double_int64) {
// Pass double, return int64_t.
TEST(TestArgumentPassing_int64_double) {
// Outer function still returns double.
- WasmRunner<double, double> runner(kExecuteTurbofan);
+ WasmRunner<double, double> runner(ExecutionTier::kOptimized);
WasmFunctionCompiler& f2 = runner.NewFunction<int64_t, double>();
auto helper = GetHelper(
@@ -159,7 +159,7 @@ TEST(TestArgumentPassing_int64_double) {
// Pass float, return double.
TEST(TestArgumentPassing_float_double) {
- WasmRunner<double, float> runner(kExecuteTurbofan);
+ WasmRunner<double, float> runner(ExecutionTier::kOptimized);
WasmFunctionCompiler& f2 = runner.NewFunction<double, float>();
auto helper = GetHelper(
@@ -177,7 +177,7 @@ TEST(TestArgumentPassing_float_double) {
// Pass two doubles, return double.
TEST(TestArgumentPassing_double_double) {
- WasmRunner<double, double, double> runner(kExecuteTurbofan);
+ WasmRunner<double, double, double> runner(ExecutionTier::kOptimized);
WasmFunctionCompiler& f2 = runner.NewFunction<double, double, double>();
auto helper = GetHelper(runner, f2,
@@ -197,7 +197,7 @@ TEST(TestArgumentPassing_double_double) {
TEST(TestArgumentPassing_AllTypes) {
// The second and third argument will be combined to an i64.
WasmRunner<double, int32_t, int32_t, int32_t, float, double> runner(
- kExecuteTurbofan);
+ ExecutionTier::kOptimized);
WasmFunctionCompiler& f2 =
runner.NewFunction<double, int32_t, int64_t, float, double>();
diff --git a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
index 9475332ad0..a2c53ab210 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
@@ -5,7 +5,7 @@
#include <stdlib.h>
#include <string.h>
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/objects-inl.h"
#include "src/snapshot/code-serializer.h"
#include "src/version.h"
@@ -147,9 +147,10 @@ class WasmSerializationTest {
HandleScope scope(serialization_isolate);
testing::SetupIsolateForWasmModule(serialization_isolate);
+ auto enabled_features = WasmFeaturesFromIsolate(serialization_isolate);
MaybeHandle<WasmModuleObject> maybe_module_object =
serialization_isolate->wasm_engine()->SyncCompile(
- serialization_isolate, &thrower,
+ serialization_isolate, enabled_features, &thrower,
ModuleWireBytes(buffer.begin(), buffer.end()));
Handle<WasmModuleObject> module_object =
maybe_module_object.ToHandleChecked();
@@ -269,46 +270,75 @@ TEST(BlockWasmCodeGenAtDeserialization) {
Cleanup();
}
-TEST(TransferrableWasmModules) {
+namespace {
+
+void TestTransferrableWasmModules(bool should_share) {
+ i::wasm::WasmEngine::InitializeOncePerProcess();
v8::internal::AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
ZoneBuffer buffer(&zone);
WasmSerializationTest::BuildWireBytes(&zone, &buffer);
- Isolate* from_isolate = CcTest::InitIsolateOnce();
- ErrorThrower thrower(from_isolate, "");
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* from_isolate = v8::Isolate::New(create_params);
std::vector<v8::WasmCompiledModule::TransferrableModule> store;
+ std::shared_ptr<NativeModule> original_native_module;
{
- HandleScope scope(from_isolate);
- testing::SetupIsolateForWasmModule(from_isolate);
-
- MaybeHandle<WasmModuleObject> module_object =
- from_isolate->wasm_engine()->SyncCompile(
- from_isolate, &thrower,
+ v8::HandleScope scope(from_isolate);
+ LocalContext env(from_isolate);
+
+ Isolate* from_i_isolate = reinterpret_cast<Isolate*>(from_isolate);
+ testing::SetupIsolateForWasmModule(from_i_isolate);
+ ErrorThrower thrower(from_i_isolate, "TestTransferrableWasmModules");
+ auto enabled_features = WasmFeaturesFromIsolate(from_i_isolate);
+ MaybeHandle<WasmModuleObject> maybe_module_object =
+ from_i_isolate->wasm_engine()->SyncCompile(
+ from_i_isolate, enabled_features, &thrower,
ModuleWireBytes(buffer.begin(), buffer.end()));
+ Handle<WasmModuleObject> module_object =
+ maybe_module_object.ToHandleChecked();
v8::Local<v8::WasmCompiledModule> v8_module =
- v8::Local<v8::WasmCompiledModule>::Cast(v8::Utils::ToLocal(
- Handle<JSObject>::cast(module_object.ToHandleChecked())));
+ v8::Local<v8::WasmCompiledModule>::Cast(
+ v8::Utils::ToLocal(Handle<JSObject>::cast(module_object)));
store.push_back(v8_module->GetTransferrableModule());
+ original_native_module = module_object->managed_native_module()->get();
}
{
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator =
- from_isolate->array_buffer_allocator();
v8::Isolate* to_isolate = v8::Isolate::New(create_params);
{
- v8::HandleScope new_scope(to_isolate);
- v8::Local<v8::Context> deserialization_context =
- v8::Context::New(to_isolate);
- deserialization_context->Enter();
- v8::MaybeLocal<v8::WasmCompiledModule> mod =
+ v8::HandleScope scope(to_isolate);
+ LocalContext env(to_isolate);
+
+ v8::MaybeLocal<v8::WasmCompiledModule> transferred_module =
v8::WasmCompiledModule::FromTransferrableModule(to_isolate, store[0]);
- CHECK(!mod.IsEmpty());
+ CHECK(!transferred_module.IsEmpty());
+ Handle<WasmModuleObject> module_object = Handle<WasmModuleObject>::cast(
+ v8::Utils::OpenHandle(*transferred_module.ToLocalChecked()));
+ std::shared_ptr<NativeModule> transferred_native_module =
+ module_object->managed_native_module()->get();
+ bool is_sharing = (original_native_module == transferred_native_module);
+ CHECK_EQ(should_share, is_sharing);
}
to_isolate->Dispose();
}
+ original_native_module.reset();
+ from_isolate->Dispose();
+}
+
+} // namespace
+
+UNINITIALIZED_TEST(TransferrableWasmModulesCloned) {
+ FlagScope<bool> flag_scope_code(&FLAG_wasm_shared_code, false);
+ TestTransferrableWasmModules(false);
+}
+
+UNINITIALIZED_TEST(TransferrableWasmModulesShared) {
+ FlagScope<bool> flag_scope_engine(&FLAG_wasm_shared_engine, true);
+ FlagScope<bool> flag_scope_code(&FLAG_wasm_shared_code, true);
+ TestTransferrableWasmModules(true);
}
#undef EMIT_CODE_WITH_END
diff --git a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
index 8d9131ad75..5e70edf830 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
@@ -5,6 +5,7 @@
#include <memory>
#include "src/objects-inl.h"
+#include "src/wasm/function-compiler.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
@@ -25,8 +26,7 @@ namespace test_wasm_shared_engine {
class SharedEngine {
public:
explicit SharedEngine(size_t max_committed = kMaxWasmCodeMemory)
- : wasm_engine_(base::make_unique<WasmEngine>(
- base::make_unique<WasmCodeManager>(max_committed))) {}
+ : wasm_engine_(base::make_unique<WasmEngine>()) {}
~SharedEngine() {
// Ensure no remaining uses exist.
CHECK(wasm_engine_.unique());
@@ -83,20 +83,9 @@ class SharedEngineIsolate {
return instance.ToHandleChecked();
}
- // TODO(mstarzinger): Switch over to a public API for sharing modules via the
- // {v8::WasmCompiledModule::TransferrableModule} class once it is ready.
Handle<WasmInstanceObject> ImportInstance(SharedModule shared_module) {
- Vector<const byte> wire_bytes = shared_module->wire_bytes();
- Handle<Script> script = CreateWasmScript(isolate(), wire_bytes);
Handle<WasmModuleObject> module_object =
- WasmModuleObject::New(isolate(), shared_module, script);
-
- // TODO(6792): Wrappers below might be cloned using {Factory::CopyCode}.
- // This requires unlocking the code space here. This should eventually be
- // moved into the allocator.
- CodeSpaceMemoryModificationScope modification_scope(isolate()->heap());
- CompileJsToWasmWrappers(isolate(), module_object);
-
+ isolate()->wasm_engine()->ImportNativeModule(isolate(), shared_module);
ErrorThrower thrower(isolate(), "ImportInstance");
MaybeHandle<WasmInstanceObject> instance =
isolate()->wasm_engine()->SyncInstantiate(isolate(), &thrower,
@@ -117,6 +106,26 @@ class SharedEngineIsolate {
std::unique_ptr<Zone> zone_;
};
+// Helper class representing a Thread running its own instance of an Isolate
+// with a shared WebAssembly engine available at construction time.
+class SharedEngineThread : public v8::base::Thread {
+ public:
+ SharedEngineThread(SharedEngine* engine,
+ std::function<void(SharedEngineIsolate&)> callback)
+ : Thread(Options("SharedEngineThread")),
+ engine_(engine),
+ callback_(callback) {}
+
+ virtual void Run() {
+ SharedEngineIsolate isolate(engine_);
+ callback_(isolate);
+ }
+
+ private:
+ SharedEngine* engine_;
+ std::function<void(SharedEngineIsolate&)> callback_;
+};
+
namespace {
ZoneBuffer* BuildReturnConstantModule(Zone* zone, int constant) {
@@ -132,6 +141,62 @@ ZoneBuffer* BuildReturnConstantModule(Zone* zone, int constant) {
return buffer;
}
+class MockInstantiationResolver : public InstantiationResultResolver {
+ public:
+ explicit MockInstantiationResolver(Handle<Object>* out_instance)
+ : out_instance_(out_instance) {}
+ virtual void OnInstantiationSucceeded(Handle<WasmInstanceObject> result) {
+ *out_instance_->location() = *result;
+ }
+ virtual void OnInstantiationFailed(Handle<Object> error_reason) {
+ UNREACHABLE();
+ }
+
+ private:
+ Handle<Object>* out_instance_;
+};
+
+class MockCompilationResolver : public CompilationResultResolver {
+ public:
+ MockCompilationResolver(SharedEngineIsolate& isolate,
+ Handle<Object>* out_instance)
+ : isolate_(isolate), out_instance_(out_instance) {}
+ virtual void OnCompilationSucceeded(Handle<WasmModuleObject> result) {
+ isolate_.isolate()->wasm_engine()->AsyncInstantiate(
+ isolate_.isolate(),
+ base::make_unique<MockInstantiationResolver>(out_instance_), result,
+ {});
+ }
+ virtual void OnCompilationFailed(Handle<Object> error_reason) {
+ UNREACHABLE();
+ }
+
+ private:
+ SharedEngineIsolate& isolate_;
+ Handle<Object>* out_instance_;
+};
+
+void PumpMessageLoop(SharedEngineIsolate& isolate) {
+ v8::platform::PumpMessageLoop(i::V8::GetCurrentPlatform(),
+ isolate.v8_isolate(),
+ platform::MessageLoopBehavior::kWaitForWork);
+ isolate.isolate()->RunMicrotasks();
+}
+
+Handle<WasmInstanceObject> CompileAndInstantiateAsync(
+ SharedEngineIsolate& isolate, ZoneBuffer* buffer) {
+ Handle<Object> maybe_instance = handle(Smi::kZero, isolate.isolate());
+ auto enabled_features = WasmFeaturesFromIsolate(isolate.isolate());
+ isolate.isolate()->wasm_engine()->AsyncCompile(
+ isolate.isolate(), enabled_features,
+ base::make_unique<MockCompilationResolver>(isolate, &maybe_instance),
+ ModuleWireBytes(buffer->begin(), buffer->end()), true);
+ while (!maybe_instance->IsWasmInstanceObject()) PumpMessageLoop(isolate);
+ Handle<WasmInstanceObject> instance =
+ Handle<WasmInstanceObject>::cast(maybe_instance);
+ return instance;
+}
+
} // namespace
TEST(SharedEngineUseCount) {
@@ -192,6 +257,111 @@ TEST(SharedEngineRunImported) {
CHECK_EQ(1, module.use_count());
}
+TEST(SharedEngineRunThreadedBuildingSync) {
+ SharedEngine engine;
+ SharedEngineThread thread1(&engine, [](SharedEngineIsolate& isolate) {
+ HandleScope scope(isolate.isolate());
+ ZoneBuffer* buffer = BuildReturnConstantModule(isolate.zone(), 23);
+ Handle<WasmInstanceObject> instance = isolate.CompileAndInstantiate(buffer);
+ CHECK_EQ(23, isolate.Run(instance));
+ });
+ SharedEngineThread thread2(&engine, [](SharedEngineIsolate& isolate) {
+ HandleScope scope(isolate.isolate());
+ ZoneBuffer* buffer = BuildReturnConstantModule(isolate.zone(), 42);
+ Handle<WasmInstanceObject> instance = isolate.CompileAndInstantiate(buffer);
+ CHECK_EQ(42, isolate.Run(instance));
+ });
+ thread1.Start();
+ thread2.Start();
+ thread1.Join();
+ thread2.Join();
+}
+
+TEST(SharedEngineRunThreadedBuildingAsync) {
+ SharedEngine engine;
+ SharedEngineThread thread1(&engine, [](SharedEngineIsolate& isolate) {
+ HandleScope scope(isolate.isolate());
+ ZoneBuffer* buffer = BuildReturnConstantModule(isolate.zone(), 23);
+ Handle<WasmInstanceObject> instance =
+ CompileAndInstantiateAsync(isolate, buffer);
+ CHECK_EQ(23, isolate.Run(instance));
+ });
+ SharedEngineThread thread2(&engine, [](SharedEngineIsolate& isolate) {
+ HandleScope scope(isolate.isolate());
+ ZoneBuffer* buffer = BuildReturnConstantModule(isolate.zone(), 42);
+ Handle<WasmInstanceObject> instance =
+ CompileAndInstantiateAsync(isolate, buffer);
+ CHECK_EQ(42, isolate.Run(instance));
+ });
+ thread1.Start();
+ thread2.Start();
+ thread1.Join();
+ thread2.Join();
+}
+
+TEST(SharedEngineRunThreadedExecution) {
+ SharedEngine engine;
+ SharedModule module;
+ {
+ SharedEngineIsolate isolate(&engine);
+ HandleScope scope(isolate.isolate());
+ ZoneBuffer* buffer = BuildReturnConstantModule(isolate.zone(), 23);
+ Handle<WasmInstanceObject> instance = isolate.CompileAndInstantiate(buffer);
+ module = isolate.ExportInstance(instance);
+ }
+ SharedEngineThread thread1(&engine, [module](SharedEngineIsolate& isolate) {
+ HandleScope scope(isolate.isolate());
+ Handle<WasmInstanceObject> instance = isolate.ImportInstance(module);
+ CHECK_EQ(23, isolate.Run(instance));
+ });
+ SharedEngineThread thread2(&engine, [module](SharedEngineIsolate& isolate) {
+ HandleScope scope(isolate.isolate());
+ Handle<WasmInstanceObject> instance = isolate.ImportInstance(module);
+ CHECK_EQ(23, isolate.Run(instance));
+ });
+ thread1.Start();
+ thread2.Start();
+ thread1.Join();
+ thread2.Join();
+}
+
+TEST(SharedEngineRunThreadedTierUp) {
+ SharedEngine engine;
+ SharedModule module;
+ {
+ SharedEngineIsolate isolate(&engine);
+ HandleScope scope(isolate.isolate());
+ ZoneBuffer* buffer = BuildReturnConstantModule(isolate.zone(), 23);
+ Handle<WasmInstanceObject> instance = isolate.CompileAndInstantiate(buffer);
+ module = isolate.ExportInstance(instance);
+ }
+ constexpr int kNumberOfThreads = 5;
+ std::list<SharedEngineThread> threads;
+ for (int i = 0; i < kNumberOfThreads; ++i) {
+ threads.emplace_back(&engine, [module](SharedEngineIsolate& isolate) {
+ constexpr int kNumberOfIterations = 100;
+ HandleScope scope(isolate.isolate());
+ Handle<WasmInstanceObject> instance = isolate.ImportInstance(module);
+ for (int j = 0; j < kNumberOfIterations; ++j) {
+ CHECK_EQ(23, isolate.Run(instance));
+ }
+ });
+ }
+ threads.emplace_back(&engine, [module](SharedEngineIsolate& isolate) {
+ HandleScope scope(isolate.isolate());
+ Handle<WasmInstanceObject> instance = isolate.ImportInstance(module);
+ ErrorThrower thrower(isolate.isolate(), "Forced Tier Up");
+ WasmFeatures detected = kNoWasmFeatures;
+ WasmCompilationUnit::CompileWasmFunction(
+ isolate.isolate(), module.get(), &detected, &thrower,
+ GetModuleEnv(module->compilation_state()),
+ &module->module()->functions[0], ExecutionTier::kOptimized);
+ CHECK_EQ(23, isolate.Run(instance));
+ });
+ for (auto& thread : threads) thread.Start();
+ for (auto& thread : threads) thread.Join();
+}
+
} // namespace test_wasm_shared_engine
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index 227b90187a..2bed7e64db 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/assembler-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
@@ -35,7 +35,7 @@ namespace {
void PrintStackTrace(v8::Isolate* isolate, v8::Local<v8::StackTrace> stack) {
printf("Stack Trace (length %d):\n", stack->GetFrameCount());
for (int i = 0, e = stack->GetFrameCount(); i != e; ++i) {
- v8::Local<v8::StackFrame> frame = stack->GetFrame(i);
+ v8::Local<v8::StackFrame> frame = stack->GetFrame(isolate, i);
v8::Local<v8::String> script = frame->GetScriptName();
v8::Local<v8::String> func = frame->GetFunctionName();
printf(
@@ -68,7 +68,7 @@ void CheckExceptionInfos(v8::internal::Isolate* i_isolate, Handle<Object> exc,
CHECK_EQ(N, stack->GetFrameCount());
for (int frameNr = 0; frameNr < N; ++frameNr) {
- v8::Local<v8::StackFrame> frame = stack->GetFrame(frameNr);
+ v8::Local<v8::StackFrame> frame = stack->GetFrame(v8_isolate, frameNr);
v8::String::Utf8Value funName(v8_isolate, frame->GetFunctionName());
CHECK_CSTREQ(excInfos[frameNr].func_name, *funName);
// Line and column are 1-based in v8::StackFrame, just as in ExceptionInfo.
@@ -111,7 +111,7 @@ WASM_EXEC_TEST(CollectDetailedWasmStack_ExplicitThrowFromJs) {
*v8::Local<v8::Function>::Cast(CompileRun(source))));
ManuallyImportedJSFunction import = {sigs.v_v(), js_function};
uint32_t js_throwing_index = 0;
- WasmRunner<void> r(execution_mode, &import);
+ WasmRunner<void> r(execution_tier, &import);
// Add a nop such that we don't always get position 1.
BUILD(r, WASM_NOP, WASM_CALL_FUNCTION0(js_throwing_index));
@@ -157,7 +157,7 @@ WASM_EXEC_TEST(CollectDetailedWasmStack_WasmError) {
int unreachable_pos = 1 << (8 * pos_shift);
TestSignatures sigs;
// Create a WasmRunner with stack checks and traps enabled.
- WasmRunner<int> r(execution_mode, 0, "main", kRuntimeExceptionSupport);
+ WasmRunner<int> r(execution_tier, 0, "main", kRuntimeExceptionSupport);
std::vector<byte> code(unreachable_pos + 1, kExprNop);
code[unreachable_pos] = kExprUnreachable;
diff --git a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
index 08ad5d31f8..ad9b6d3b56 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/assembler-inl.h"
#include "src/trap-handler/trap-handler.h"
#include "test/cctest/cctest.h"
@@ -54,7 +54,7 @@ void CheckExceptionInfos(v8::internal::Isolate* i_isolate, Handle<Object> exc,
CHECK_EQ(N, stack->GetFrameCount());
for (int frameNr = 0; frameNr < N; ++frameNr) {
- v8::Local<v8::StackFrame> frame = stack->GetFrame(frameNr);
+ v8::Local<v8::StackFrame> frame = stack->GetFrame(v8_isolate, frameNr);
v8::String::Utf8Value funName(v8_isolate, frame->GetFunctionName());
CHECK_CSTREQ(excInfos[frameNr].func_name, *funName);
CHECK_EQ(excInfos[frameNr].line_nr, frame->GetLineNumber());
@@ -69,7 +69,7 @@ void CheckExceptionInfos(v8::internal::Isolate* i_isolate, Handle<Object> exc,
// Trigger a trap for executing unreachable.
WASM_EXEC_TEST(Unreachable) {
// Create a WasmRunner with stack checks and traps enabled.
- WasmRunner<void> r(execution_mode, 0, "main", kRuntimeExceptionSupport);
+ WasmRunner<void> r(execution_tier, 0, "main", kRuntimeExceptionSupport);
TestSignatures sigs;
BUILD(r, WASM_UNREACHABLE);
@@ -103,7 +103,7 @@ WASM_EXEC_TEST(Unreachable) {
// Trigger a trap for loading from out-of-bounds.
WASM_EXEC_TEST(IllegalLoad) {
- WasmRunner<void> r(execution_mode, 0, "main", kRuntimeExceptionSupport);
+ WasmRunner<void> r(execution_tier, 0, "main", kRuntimeExceptionSupport);
TestSignatures sigs;
r.builder().AddMemory(0L);
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index d7a87849a2..5f623a46cc 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -14,13 +14,13 @@ namespace internal {
namespace wasm {
TestingModuleBuilder::TestingModuleBuilder(
- Zone* zone, ManuallyImportedJSFunction* maybe_import,
- WasmExecutionMode mode, RuntimeExceptionSupport exception_support,
- LowerSimd lower_simd)
+ Zone* zone, ManuallyImportedJSFunction* maybe_import, ExecutionTier tier,
+ RuntimeExceptionSupport exception_support, LowerSimd lower_simd)
: test_module_(std::make_shared<WasmModule>()),
test_module_ptr_(test_module_.get()),
isolate_(CcTest::InitIsolateOnce()),
- execution_mode_(mode),
+ enabled_features_(WasmFeaturesFromIsolate(isolate_)),
+ execution_tier_(tier),
runtime_exception_support_(exception_support),
lower_simd_(lower_simd) {
WasmJs::Install(isolate_, true);
@@ -47,14 +47,13 @@ TestingModuleBuilder::TestingModuleBuilder(
trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler
: kNoTrapHandler);
auto wasm_to_js_wrapper = native_module_->AddCodeCopy(
- code.ToHandleChecked(), wasm::WasmCode::kWasmToJsWrapper,
- maybe_import_index);
+ code.ToHandleChecked(), WasmCode::kWasmToJsWrapper, maybe_import_index);
ImportedFunctionEntry(instance_object_, maybe_import_index)
.set_wasm_to_js(*maybe_import->js_function, wasm_to_js_wrapper);
}
- if (mode == kExecuteInterpreter) {
+ if (tier == ExecutionTier::kInterpreter) {
interpreter_ = WasmDebugInfo::SetupForTesting(instance_object_);
}
}
@@ -69,7 +68,7 @@ byte* TestingModuleBuilder::AddMemory(uint32_t size) {
test_module_->has_memory = true;
uint32_t alloc_size = RoundUp(size, kWasmPageSize);
Handle<JSArrayBuffer> new_buffer;
- CHECK(wasm::NewArrayBuffer(isolate_, alloc_size).ToHandle(&new_buffer));
+ CHECK(NewArrayBuffer(isolate_, alloc_size).ToHandle(&new_buffer));
CHECK(!new_buffer.is_null());
mem_start_ = reinterpret_cast<byte*>(new_buffer->backing_store());
mem_size_ = size;
@@ -195,7 +194,7 @@ ModuleEnv TestingModuleBuilder::CreateModuleEnv() {
return {
test_module_ptr_,
trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler : kNoTrapHandler,
- runtime_exception_support_};
+ runtime_exception_support_, lower_simd()};
}
const WasmGlobal* TestingModuleBuilder::AddGlobal(ValueType type) {
@@ -214,8 +213,9 @@ Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
isolate_->factory()->NewScript(isolate_->factory()->empty_string());
script->set_type(Script::TYPE_WASM);
ModuleEnv env = CreateModuleEnv();
- Handle<WasmModuleObject> module_object = WasmModuleObject::New(
- isolate_, test_module_, env, {}, script, Handle<ByteArray>::null());
+ Handle<WasmModuleObject> module_object =
+ WasmModuleObject::New(isolate_, enabled_features_, test_module_, env, {},
+ script, Handle<ByteArray>::null());
// This method is called when we initialize TestEnvironment. We don't
// have a memory yet, so we won't create it here. We'll update the
// interpreter when we get a memory. We do have globals, though.
@@ -230,14 +230,18 @@ Handle<WasmInstanceObject> TestingModuleBuilder::InitInstanceObject() {
void TestBuildingGraphWithBuilder(compiler::WasmGraphBuilder* builder,
Zone* zone, FunctionSig* sig,
const byte* start, const byte* end) {
+ WasmFeatures unused_detected_features;
+ FunctionBody body(sig, 0, start, end);
DecodeResult result =
- BuildTFGraph(zone->allocator(), builder, sig, start, end);
+ BuildTFGraph(zone->allocator(), kAllWasmFeatures, nullptr, builder,
+ &unused_detected_features, body, nullptr);
if (result.failed()) {
#ifdef DEBUG
if (!FLAG_trace_wasm_decoder) {
// Retry the compilation with the tracing flag on, to help in debugging.
FLAG_trace_wasm_decoder = true;
- result = BuildTFGraph(zone->allocator(), builder, sig, start, end);
+ result = BuildTFGraph(zone->allocator(), kAllWasmFeatures, nullptr,
+ builder, &unused_detected_features, body, nullptr);
}
#endif
@@ -398,6 +402,13 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
interpreter_->SetFunctionCodeForTesting(function_, start, end);
}
+ // TODO(wasm): tests that go through JS depend on having a compiled version
+ // of each function, even if the execution tier is the interpreter. Fix.
+ auto tier = builder_->execution_tier();
+ if (tier == ExecutionTier::kInterpreter) {
+ tier = ExecutionTier::kOptimized;
+ }
+
Vector<const uint8_t> wire_bytes = builder_->instance_object()
->module_object()
->native_module()
@@ -416,18 +427,15 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
FunctionBody func_body{function_->sig, function_->code.offset(),
func_wire_bytes.start(), func_wire_bytes.end()};
- WasmCompilationUnit::CompilationMode comp_mode =
- builder_->execution_mode() == WasmExecutionMode::kExecuteLiftoff
- ? WasmCompilationUnit::CompilationMode::kLiftoff
- : WasmCompilationUnit::CompilationMode::kTurbofan;
NativeModule* native_module =
builder_->instance_object()->module_object()->native_module();
- WasmCompilationUnit unit(isolate(), &module_env, native_module, func_body,
- func_name, function_->func_index, comp_mode,
- isolate()->counters(), builder_->lower_simd());
- unit.ExecuteCompilation();
- wasm::WasmCode* wasm_code = unit.FinishCompilation(&thrower);
- if (wasm::WasmCode::ShouldBeLogged(isolate())) {
+ WasmCompilationUnit unit(isolate()->wasm_engine(), &module_env, native_module,
+ func_body, func_name, function_->func_index,
+ isolate()->counters(), tier);
+ WasmFeatures unused_detected_features;
+ unit.ExecuteCompilation(&unused_detected_features);
+ WasmCode* wasm_code = unit.FinishCompilation(&thrower);
+ if (WasmCode::ShouldBeLogged(isolate())) {
wasm_code->LogCode(isolate());
}
CHECK(!thrower.error());
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index ca1c922dd4..899dc06268 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -33,6 +33,7 @@
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-tier.h"
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone.h"
@@ -48,14 +49,6 @@ namespace wasm {
constexpr uint32_t kMaxFunctions = 10;
constexpr uint32_t kMaxGlobalsSize = 128;
-enum WasmExecutionMode {
- kExecuteInterpreter,
- kExecuteTurbofan,
- kExecuteLiftoff
-};
-
-enum LowerSimd : bool { kLowerSimd = true, kNoLowerSimd = false };
-
using compiler::CallDescriptor;
using compiler::MachineTypeForC;
using compiler::Node;
@@ -90,7 +83,7 @@ struct ManuallyImportedJSFunction {
// the interpreter.
class TestingModuleBuilder {
public:
- TestingModuleBuilder(Zone*, ManuallyImportedJSFunction*, WasmExecutionMode,
+ TestingModuleBuilder(Zone*, ManuallyImportedJSFunction*, ExecutionTier,
RuntimeExceptionSupport, LowerSimd);
void ChangeOriginToAsmjs() { test_module_->origin = kAsmJsOrigin; }
@@ -203,7 +196,7 @@ class TestingModuleBuilder {
Handle<WasmInstanceObject> instance_object() const {
return instance_object_;
}
- wasm::WasmCode* GetFunctionCode(uint32_t index) const {
+ WasmCode* GetFunctionCode(uint32_t index) const {
return native_module_->code(index);
}
Address globals_start() const {
@@ -217,7 +210,7 @@ class TestingModuleBuilder {
ModuleEnv CreateModuleEnv();
- WasmExecutionMode execution_mode() const { return execution_mode_; }
+ ExecutionTier execution_tier() const { return execution_tier_; }
RuntimeExceptionSupport runtime_exception_support() const {
return runtime_exception_support_;
@@ -227,12 +220,13 @@ class TestingModuleBuilder {
std::shared_ptr<WasmModule> test_module_;
WasmModule* test_module_ptr_;
Isolate* isolate_;
+ WasmFeatures enabled_features_;
uint32_t global_offset = 0;
byte* mem_start_ = nullptr;
uint32_t mem_size_ = 0;
V8_ALIGNED(16) byte globals_data_[kMaxGlobalsSize];
WasmInterpreter* interpreter_ = nullptr;
- WasmExecutionMode execution_mode_;
+ ExecutionTier execution_tier_;
Handle<WasmInstanceObject> instance_object_;
NativeModule* native_module_ = nullptr;
bool linked_ = false;
@@ -265,7 +259,7 @@ class WasmFunctionWrapper : private compiler::GraphAndBuilders {
Init(call_descriptor, MachineTypeForC<ReturnType>(), param_vec);
}
- void SetInnerCode(wasm::WasmCode* code) {
+ void SetInnerCode(WasmCode* code) {
intptr_t address = static_cast<intptr_t>(code->instruction_start());
compiler::NodeProperties::ChangeOp(
inner_code_node_,
@@ -347,11 +341,11 @@ class WasmFunctionCompiler : public compiler::GraphAndBuilders {
class WasmRunnerBase : public HandleAndZoneScope {
public:
WasmRunnerBase(ManuallyImportedJSFunction* maybe_import,
- WasmExecutionMode execution_mode, int num_params,
+ ExecutionTier execution_tier, int num_params,
RuntimeExceptionSupport runtime_exception_support,
LowerSimd lower_simd)
: zone_(&allocator_, ZONE_NAME),
- builder_(&zone_, maybe_import, execution_mode,
+ builder_(&zone_, maybe_import, execution_tier,
runtime_exception_support, lower_simd),
wrapper_(&zone_, num_params) {}
@@ -428,13 +422,13 @@ class WasmRunnerBase : public HandleAndZoneScope {
template <typename ReturnType, typename... ParamTypes>
class WasmRunner : public WasmRunnerBase {
public:
- WasmRunner(WasmExecutionMode execution_mode,
+ WasmRunner(ExecutionTier execution_tier,
ManuallyImportedJSFunction* maybe_import = nullptr,
const char* main_fn_name = "main",
RuntimeExceptionSupport runtime_exception_support =
kNoRuntimeExceptionSupport,
LowerSimd lower_simd = kNoLowerSimd)
- : WasmRunnerBase(maybe_import, execution_mode, sizeof...(ParamTypes),
+ : WasmRunnerBase(maybe_import, execution_tier, sizeof...(ParamTypes),
runtime_exception_support, lower_simd) {
NewFunction<ReturnType, ParamTypes...>(main_fn_name);
if (!interpret()) {
@@ -442,8 +436,8 @@ class WasmRunner : public WasmRunnerBase {
}
}
- WasmRunner(WasmExecutionMode execution_mode, LowerSimd lower_simd)
- : WasmRunner(execution_mode, nullptr, "main", kNoRuntimeExceptionSupport,
+ WasmRunner(ExecutionTier execution_tier, LowerSimd lower_simd)
+ : WasmRunner(execution_tier, nullptr, "main", kNoRuntimeExceptionSupport,
lower_simd) {}
ReturnType Call(ParamTypes... p) {
@@ -501,18 +495,20 @@ class WasmRunner : public WasmRunnerBase {
};
// A macro to define tests that run in different engine configurations.
-#define WASM_EXEC_TEST(name) \
- void RunWasm_##name(WasmExecutionMode execution_mode); \
- TEST(RunWasmTurbofan_##name) { RunWasm_##name(kExecuteTurbofan); } \
- TEST(RunWasmLiftoff_##name) { RunWasm_##name(kExecuteLiftoff); } \
- TEST(RunWasmInterpreter_##name) { RunWasm_##name(kExecuteInterpreter); } \
- void RunWasm_##name(WasmExecutionMode execution_mode)
-
-#define WASM_COMPILED_EXEC_TEST(name) \
- void RunWasm_##name(WasmExecutionMode execution_mode); \
- TEST(RunWasmTurbofan_##name) { RunWasm_##name(kExecuteTurbofan); } \
- TEST(RunWasmLiftoff_##name) { RunWasm_##name(kExecuteLiftoff); } \
- void RunWasm_##name(WasmExecutionMode execution_mode)
+#define WASM_EXEC_TEST(name) \
+ void RunWasm_##name(ExecutionTier execution_tier); \
+ TEST(RunWasmTurbofan_##name) { RunWasm_##name(ExecutionTier::kOptimized); } \
+ TEST(RunWasmLiftoff_##name) { RunWasm_##name(ExecutionTier::kBaseline); } \
+ TEST(RunWasmInterpreter_##name) { \
+ RunWasm_##name(ExecutionTier::kInterpreter); \
+ } \
+ void RunWasm_##name(ExecutionTier execution_tier)
+
+#define WASM_COMPILED_EXEC_TEST(name) \
+ void RunWasm_##name(ExecutionTier execution_tier); \
+ TEST(RunWasmTurbofan_##name) { RunWasm_##name(ExecutionTier::kOptimized); } \
+ TEST(RunWasmLiftoff_##name) { RunWasm_##name(ExecutionTier::kBaseline); } \
+ void RunWasm_##name(ExecutionTier execution_tier)
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/common/assembler-tester.h b/deps/v8/test/common/assembler-tester.h
new file mode 100644
index 0000000000..0291e48efb
--- /dev/null
+++ b/deps/v8/test/common/assembler-tester.h
@@ -0,0 +1,46 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TEST_COMMON_ASSEMBLER_TESTER_H_
+#define V8_TEST_COMMON_ASSEMBLER_TESTER_H_
+
+#include "src/assembler.h"
+
+namespace v8 {
+namespace internal {
+
+static inline uint8_t* AllocateAssemblerBuffer(
+ size_t* allocated,
+ size_t requested = v8::internal::AssemblerBase::kMinimalBufferSize,
+ void* address = nullptr) {
+ size_t page_size = v8::internal::AllocatePageSize();
+ size_t alloc_size = RoundUp(requested, page_size);
+ void* result = v8::internal::AllocatePages(
+ address, alloc_size, page_size, v8::PageAllocator::kReadWriteExecute);
+ CHECK(result);
+ *allocated = alloc_size;
+ return static_cast<uint8_t*>(result);
+}
+
+static inline void MakeAssemblerBufferExecutable(uint8_t* buffer,
+ size_t allocated) {
+ bool result = v8::internal::SetPermissions(buffer, allocated,
+ v8::PageAllocator::kReadExecute);
+ CHECK(result);
+
+ // Flush the instruction cache as part of making the buffer executable.
+ Assembler::FlushICache(buffer, allocated);
+}
+
+static inline void MakeAssemblerBufferWritable(uint8_t* buffer,
+ size_t allocated) {
+ bool result = v8::internal::SetPermissions(buffer, allocated,
+ v8::PageAllocator::kReadWrite);
+ CHECK(result);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TEST_COMMON_ASSEMBLER_TESTER_H_
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index a42c96dd24..1015701e3b 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -591,10 +591,12 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define SIG_ENTRY_x(r) kWasmFunctionTypeCode, 0, 1, r
#define SIG_ENTRY_x_x(r, a) kWasmFunctionTypeCode, 1, a, 1, r
#define SIG_ENTRY_x_xx(r, a, b) kWasmFunctionTypeCode, 2, a, b, 1, r
+#define SIG_ENTRY_xx_xx(r, s, a, b) kWasmFunctionTypeCode, 2, a, b, 2, r, s
#define SIG_ENTRY_x_xxx(r, a, b, c) kWasmFunctionTypeCode, 3, a, b, c, 1, r
#define SIZEOF_SIG_ENTRY_x 4
#define SIZEOF_SIG_ENTRY_x_x 5
#define SIZEOF_SIG_ENTRY_x_xx 6
+#define SIZEOF_SIG_ENTRY_xx_xx 7
#define SIZEOF_SIG_ENTRY_x_xxx 7
#define WASM_BRV(depth, ...) __VA_ARGS__, kExprBr, static_cast<byte>(depth)
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index d091aa5cd2..9dfbe6fe1a 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -28,8 +28,9 @@ uint32_t GetInitialMemSize(const WasmModule* module) {
MaybeHandle<WasmInstanceObject> CompileAndInstantiateForTesting(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes) {
- MaybeHandle<WasmModuleObject> module =
- isolate->wasm_engine()->SyncCompile(isolate, thrower, bytes);
+ auto enabled_features = WasmFeaturesFromIsolate(isolate);
+ MaybeHandle<WasmModuleObject> module = isolate->wasm_engine()->SyncCompile(
+ isolate, enabled_features, thrower, bytes);
DCHECK_EQ(thrower->error(), module.is_null());
if (module.is_null()) return {};
@@ -42,8 +43,10 @@ std::shared_ptr<WasmModule> DecodeWasmModuleForTesting(
const byte* module_end, ModuleOrigin origin, bool verify_functions) {
// Decode the module, but don't verify function bodies, since we'll
// be compiling them anyway.
- ModuleResult decoding_result = SyncDecodeWasmModule(
- isolate, module_start, module_end, verify_functions, origin);
+ auto enabled_features = WasmFeaturesFromIsolate(isolate);
+ ModuleResult decoding_result = DecodeWasmModule(
+ enabled_features, module_start, module_end, verify_functions, origin,
+ isolate->counters(), isolate->allocator());
if (decoding_result.failed()) {
// Module verification failed. throw.
diff --git a/deps/v8/test/debugger/debug/debug-liveedit-arrow-function-at-start.js b/deps/v8/test/debugger/debug/debug-liveedit-arrow-function-at-start.js
new file mode 100644
index 0000000000..ce0fe39ad0
--- /dev/null
+++ b/deps/v8/test/debugger/debug/debug-liveedit-arrow-function-at-start.js
@@ -0,0 +1,13 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// ()=>42 will have the same start and end position as the top-level script.
+var foo = eval("()=>{ return 42 }");
+assertEquals(42, foo());
+
+%LiveEditPatchScript(foo, "()=>{ return 13 }");
+
+assertEquals(13, foo());
diff --git a/deps/v8/test/debugger/debug/wasm/frame-inspection.js b/deps/v8/test/debugger/debug/wasm/frame-inspection.js
index 45fa8a96f4..b91a466a10 100644
--- a/deps/v8/test/debugger/debug/wasm/frame-inspection.js
+++ b/deps/v8/test/debugger/debug/wasm/frame-inspection.js
@@ -25,7 +25,6 @@ function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
++break_count;
try {
- var break_id = exec_state.break_id;
var frame_count = exec_state.frameCount();
assertEquals(expected_frames.length, frame_count, 'frame count');
diff --git a/deps/v8/test/debugger/debugger.status b/deps/v8/test/debugger/debugger.status
index ab4ed47366..8500344fb5 100644
--- a/deps/v8/test/debugger/debugger.status
+++ b/deps/v8/test/debugger/debugger.status
@@ -18,8 +18,9 @@
'debug/es6/debug-promises/reject-with-invalid-reject': [FAIL],
# Issue 5651: Context mismatch in ScopeIterator::Type() for eval default
- # parameter value
- 'debug/es6/debug-scope-default-param-with-eval': [FAIL],
+ # parameter value (the test causes indexing a FixedArray out of bounds ->
+ # CRASH is also a reasonable outcome).
+ 'debug/es6/debug-scope-default-param-with-eval': [FAIL, CRASH],
# Slow tests
'debug/debug-scopes': [PASS, SLOW],
diff --git a/deps/v8/test/fuzzer/multi-return.cc b/deps/v8/test/fuzzer/multi-return.cc
index 4f109228a5..0be812c8dd 100644
--- a/deps/v8/test/fuzzer/multi-return.cc
+++ b/deps/v8/test/fuzzer/multi-return.cc
@@ -19,6 +19,7 @@
#include "src/optimized-compilation-info.h"
#include "src/simulator.h"
#include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-objects.h"
@@ -160,7 +161,8 @@ std::unique_ptr<wasm::NativeModule> AllocateNativeModule(i::Isolate* isolate,
// WasmCallDescriptor assumes that code is on the native heap and not
// within a code object.
return isolate->wasm_engine()->code_manager()->NewNativeModule(
- isolate, code_size, false, std::move(module), env);
+ isolate, i::wasm::kAllWasmFeatures, code_size, false, std::move(module),
+ env);
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
@@ -306,6 +308,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
&wrapper_info, i_isolate, wrapper_desc, caller.graph(),
AssemblerOptions::Default(i_isolate), caller.Export())
.ToHandleChecked();
+
auto fn = GeneratedCode<int32_t>::FromCode(*wrapper_code);
int result = fn.Call();
diff --git a/deps/v8/test/fuzzer/regexp-builtins.cc b/deps/v8/test/fuzzer/regexp-builtins.cc
index 495604b071..c4ff115d72 100644
--- a/deps/v8/test/fuzzer/regexp-builtins.cc
+++ b/deps/v8/test/fuzzer/regexp-builtins.cc
@@ -319,12 +319,12 @@ std::string GenerateSourceString(FuzzerArgs* args, const std::string& test) {
return ss.str();
}
-void PrintExceptionMessage(v8::TryCatch* try_catch) {
+void PrintExceptionMessage(v8::Isolate* isolate, v8::TryCatch* try_catch) {
CHECK(try_catch->HasCaught());
static const int kBufferLength = 256;
char buffer[kBufferLength + 1];
try_catch->Message()->Get()->WriteOneByte(
- reinterpret_cast<uint8_t*>(&buffer[0]), 0, kBufferLength);
+ isolate, reinterpret_cast<uint8_t*>(&buffer[0]), 0, kBufferLength);
fprintf(stderr, "%s\n", buffer);
}
@@ -337,9 +337,10 @@ bool ResultsAreIdentical(FuzzerArgs* args) {
"assertEquals(fast.re.lastIndex, slow.re.lastIndex);\n";
v8::Local<v8::Value> result;
- v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(args->isolate));
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(args->isolate);
+ v8::TryCatch try_catch(isolate);
if (!CompileRun(args->context, source.c_str()).ToLocal(&result)) {
- PrintExceptionMessage(&try_catch);
+ PrintExceptionMessage(isolate, &try_catch);
args->isolate->clear_pending_exception();
return false;
}
@@ -349,14 +350,15 @@ bool ResultsAreIdentical(FuzzerArgs* args) {
void CompileRunAndVerify(FuzzerArgs* args, const std::string& source) {
v8::Local<v8::Value> result;
- v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(args->isolate));
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(args->isolate);
+ v8::TryCatch try_catch(isolate);
if (!CompileRun(args->context, source.c_str()).ToLocal(&result)) {
args->isolate->clear_pending_exception();
// No need to verify result if an exception was thrown here, since that
// implies a syntax error somewhere in the pattern or string. We simply
// ignore those.
if (kVerbose) {
- PrintExceptionMessage(&try_catch);
+ PrintExceptionMessage(isolate, &try_catch);
fprintf(stderr, "Failed to run script:\n```\n%s\n```\n", source.c_str());
}
return;
diff --git a/deps/v8/test/fuzzer/wasm-async.cc b/deps/v8/test/fuzzer/wasm-async.cc
index 5ceb8d8bf8..8e140b71f2 100644
--- a/deps/v8/test/fuzzer/wasm-async.cc
+++ b/deps/v8/test/fuzzer/wasm-async.cc
@@ -68,8 +68,10 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
testing::SetupIsolateForWasmModule(i_isolate);
bool done = false;
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
i_isolate->wasm_engine()->AsyncCompile(
- i_isolate, base::make_unique<AsyncFuzzerResolver>(i_isolate, &done),
+ i_isolate, enabled_features,
+ std::make_shared<AsyncFuzzerResolver>(i_isolate, &done),
ModuleWireBytes(data, data + size), false);
// Wait for the promise to resolve.
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index 93c03a92db..a9f4382cd1 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -43,8 +43,10 @@ class DataRange {
// lead to OOM because the end might not be reached.
// Define move constructor and move assignment, disallow copy constructor and
// copy assignment (below).
- DataRange(DataRange&& other) : DataRange(other.data_) { other.data_ = {}; }
- DataRange& operator=(DataRange&& other) {
+ DataRange(DataRange&& other) V8_NOEXCEPT : DataRange(other.data_) {
+ other.data_ = {};
+ }
+ DataRange& operator=(DataRange&& other) V8_NOEXCEPT {
data_ = other.data_;
other.data_ = {};
return *this;
@@ -380,9 +382,9 @@ class WasmGenerator {
void set_global(DataRange& data) { global_op<kWasmStmt>(data); }
- template <ValueType T1, ValueType T2>
+ template <ValueType... Types>
void sequence(DataRange& data) {
- Generate<T1, T2>(data);
+ Generate<Types...>(data);
}
void current_memory(DataRange& data) {
@@ -472,6 +474,9 @@ void WasmGenerator::Generate<kWasmStmt>(DataRange& data) {
constexpr generate_fn alternates[] = {
&WasmGenerator::sequence<kWasmStmt, kWasmStmt>,
+ &WasmGenerator::sequence<kWasmStmt, kWasmStmt, kWasmStmt, kWasmStmt>,
+ &WasmGenerator::sequence<kWasmStmt, kWasmStmt, kWasmStmt, kWasmStmt,
+ kWasmStmt, kWasmStmt, kWasmStmt, kWasmStmt>,
&WasmGenerator::block<kWasmStmt>,
&WasmGenerator::loop<kWasmStmt>,
&WasmGenerator::if_<kWasmStmt, kIf>,
@@ -508,7 +513,9 @@ void WasmGenerator::Generate<kWasmI32>(DataRange& data) {
}
constexpr generate_fn alternates[] = {
+ &WasmGenerator::sequence<kWasmI32, kWasmStmt>,
&WasmGenerator::sequence<kWasmStmt, kWasmI32>,
+ &WasmGenerator::sequence<kWasmStmt, kWasmI32, kWasmStmt>,
&WasmGenerator::op<kExprI32Eqz, kWasmI32>,
&WasmGenerator::op<kExprI32Eq, kWasmI32, kWasmI32>,
@@ -597,7 +604,9 @@ void WasmGenerator::Generate<kWasmI64>(DataRange& data) {
}
constexpr generate_fn alternates[] = {
+ &WasmGenerator::sequence<kWasmI64, kWasmStmt>,
&WasmGenerator::sequence<kWasmStmt, kWasmI64>,
+ &WasmGenerator::sequence<kWasmStmt, kWasmI64, kWasmStmt>,
&WasmGenerator::op<kExprI64Add, kWasmI64, kWasmI64>,
&WasmGenerator::op<kExprI64Sub, kWasmI64, kWasmI64>,
@@ -652,7 +661,9 @@ void WasmGenerator::Generate<kWasmF32>(DataRange& data) {
}
constexpr generate_fn alternates[] = {
+ &WasmGenerator::sequence<kWasmF32, kWasmStmt>,
&WasmGenerator::sequence<kWasmStmt, kWasmF32>,
+ &WasmGenerator::sequence<kWasmStmt, kWasmF32, kWasmStmt>,
&WasmGenerator::op<kExprF32Add, kWasmF32, kWasmF32>,
&WasmGenerator::op<kExprF32Sub, kWasmF32, kWasmF32>,
@@ -683,7 +694,9 @@ void WasmGenerator::Generate<kWasmF64>(DataRange& data) {
}
constexpr generate_fn alternates[] = {
+ &WasmGenerator::sequence<kWasmF64, kWasmStmt>,
&WasmGenerator::sequence<kWasmStmt, kWasmF64>,
+ &WasmGenerator::sequence<kWasmStmt, kWasmF64, kWasmStmt>,
&WasmGenerator::op<kExprF64Add, kWasmF64, kWasmF64>,
&WasmGenerator::op<kExprF64Sub, kWasmF64, kWasmF64>,
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index f84e700fc9..c253da9cb5 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -10,7 +10,7 @@
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone.h"
#include "test/common/wasm/flag-utils.h"
@@ -153,9 +153,10 @@ std::ostream& operator<<(std::ostream& os, const PrintName& name) {
void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
bool compiles) {
constexpr bool kVerifyFunctions = false;
- ModuleResult module_res =
- SyncDecodeWasmModule(isolate, wire_bytes.start(), wire_bytes.end(),
- kVerifyFunctions, ModuleOrigin::kWasmOrigin);
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(isolate);
+ ModuleResult module_res = DecodeWasmModule(
+ enabled_features, wire_bytes.start(), wire_bytes.end(), kVerifyFunctions,
+ ModuleOrigin::kWasmOrigin, isolate->counters(), isolate->allocator());
CHECK(module_res.ok());
WasmModule* module = module_res.val.get();
CHECK_NOT_NULL(module);
@@ -181,7 +182,7 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
os << ", undefined";
}
os << ", " << (module->mem_export ? "true" : "false");
- if (FLAG_experimental_wasm_threads && module->has_shared_memory) {
+ if (module->has_shared_memory) {
os << ", shared";
}
os << ");\n";
@@ -208,7 +209,8 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
// Add locals.
BodyLocalDecls decls(&tmp_zone);
- DecodeLocalDecls(&decls, func_code.start(), func_code.end());
+ DecodeLocalDecls(enabled_features, &decls, func_code.start(),
+ func_code.end());
if (!decls.type_list.empty()) {
os << " ";
for (size_t pos = 0, count = 1, locals = decls.type_list.size();
@@ -284,6 +286,7 @@ int WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
ModuleWireBytes wire_bytes(buffer.begin(), buffer.end());
// Compile with Turbofan here. Liftoff will be tested later.
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
MaybeHandle<WasmModuleObject> compiled_module;
{
// Explicitly enable Liftoff, disable tiering and set the tier_mask. This
@@ -292,7 +295,7 @@ int WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
FlagScope<bool> no_tier_up(&FLAG_wasm_tier_up, false);
FlagScope<int> tier_mask_scope(&FLAG_wasm_tier_mask_for_testing, tier_mask);
compiled_module = i_isolate->wasm_engine()->SyncCompile(
- i_isolate, &interpreter_thrower, wire_bytes);
+ i_isolate, enabled_features, &interpreter_thrower, wire_bytes);
}
bool compiles = !compiled_module.is_null();
@@ -300,8 +303,8 @@ int WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
GenerateTestCase(i_isolate, wire_bytes, compiles);
}
- bool validates =
- i_isolate->wasm_engine()->SyncValidate(i_isolate, wire_bytes);
+ bool validates = i_isolate->wasm_engine()->SyncValidate(
+ i_isolate, enabled_features, wire_bytes);
CHECK_EQ(compiles, validates);
CHECK_IMPLIES(require_valid, validates);
@@ -332,6 +335,16 @@ int WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
return 0;
}
+ // The WebAssembly spec allows the sign bit of NaN to be non-deterministic.
+ // This sign bit can make the difference between an infinite loop and
+ // terminating code. With possible non-determinism we cannot guarantee that
+ // the generated code will not go into an infinite loop and cause a timeout in
+ // Clusterfuzz. Therefore we do not execute the generated code if the result
+ // may be non-deterministic.
+ if (possible_nondeterminism) {
+ return 0;
+ }
+
bool expect_exception =
result_interpreter == static_cast<int32_t>(0xDEADBEEF);
@@ -349,20 +362,14 @@ int WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
"main", num_args, compiler_args.get());
}
- // The WebAssembly spec allows the sign bit of NaN to be non-deterministic.
- // This sign bit may cause result_interpreter to be different than
- // result_compiled. Therefore we do not check the equality of the results
- // if the execution may have produced a NaN at some point.
- if (!possible_nondeterminism) {
- if (expect_exception != i_isolate->has_pending_exception()) {
- const char* exception_text[] = {"no exception", "exception"};
- FATAL("interpreter: %s; compiled: %s", exception_text[expect_exception],
- exception_text[i_isolate->has_pending_exception()]);
- }
-
- if (!expect_exception) CHECK_EQ(result_interpreter, result_compiled);
+ if (expect_exception != i_isolate->has_pending_exception()) {
+ const char* exception_text[] = {"no exception", "exception"};
+ FATAL("interpreter: %s; compiled: %s", exception_text[expect_exception],
+ exception_text[i_isolate->has_pending_exception()]);
}
+ if (!expect_exception) CHECK_EQ(result_interpreter, result_compiled);
+
// Cleanup any pending exception.
i_isolate->clear_pending_exception();
return 0;
diff --git a/deps/v8/test/fuzzer/wasm.cc b/deps/v8/test/fuzzer/wasm.cc
index 75a6dd9865..fb9135b0f1 100644
--- a/deps/v8/test/fuzzer/wasm.cc
+++ b/deps/v8/test/fuzzer/wasm.cc
@@ -42,9 +42,11 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
i::HandleScope scope(i_isolate);
i::wasm::ErrorThrower thrower(i_isolate, "wasm fuzzer");
i::Handle<i::WasmModuleObject> module_object;
- bool compiles = i_isolate->wasm_engine()
- ->SyncCompile(i_isolate, &thrower, wire_bytes)
- .ToHandle(&module_object);
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
+ bool compiles =
+ i_isolate->wasm_engine()
+ ->SyncCompile(i_isolate, enabled_features, &thrower, wire_bytes)
+ .ToHandle(&module_object);
if (i::FLAG_wasm_fuzzer_gen_test) {
i::wasm::fuzzer::GenerateTestCase(i_isolate, wire_bytes, compiles);
diff --git a/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt b/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
index 9173fe7e70..a24ba5c370 100644
--- a/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
+++ b/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
@@ -30,6 +30,11 @@ expression: Object(Symbol(42))
description : Symbol
overflow : false
properties : [
+ [0] : {
+ name : description
+ type : string
+ value : 42
+ }
]
type : object
}
diff --git a/deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt b/deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt
index 0ecd0b82ef..2e6e589b25 100644
--- a/deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt
+++ b/deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt
@@ -12,7 +12,7 @@ Check script with url:
isModule : false
length : 16
scriptId : <scriptId>
- sourceMapURL :
+ sourceMapURL :
startColumn : 0
startLine : 0
url : prefix://url
@@ -31,7 +31,7 @@ Check script with sourceURL comment:
isModule : false
length : 37
scriptId : <scriptId>
- sourceMapURL :
+ sourceMapURL :
startColumn : 0
startLine : 0
url : foo.js
@@ -49,7 +49,7 @@ Check script failed to parse:
isModule : false
length : 15
scriptId : <scriptId>
- sourceMapURL :
+ sourceMapURL :
startColumn : 0
startLine : 0
url : prefix://url
@@ -67,7 +67,7 @@ Check script failed to parse with sourceURL comment:
isModule : false
length : 36
scriptId : <scriptId>
- sourceMapURL :
+ sourceMapURL :
startColumn : 0
startLine : 0
url : foo.js
@@ -96,14 +96,14 @@ Test runtime stack trace:
}
[1] : {
columnNumber : 0
- functionName :
+ functionName :
lineNumber : 0
scriptId : <scriptId>
url : boo.js
}
[2] : {
columnNumber : 4
- functionName :
+ functionName :
lineNumber : 4
scriptId : <scriptId>
url : prefix://url
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index 93a8b1d3f2..dbb4493e66 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -7,6 +7,8 @@
#endif // !defined(_WIN32) && !defined(_WIN64)
#include <locale.h>
+#include <string>
+#include <vector>
#include "include/libplatform/libplatform.h"
#include "include/v8.h"
@@ -38,10 +40,10 @@ void Exit() {
Terminate();
}
-v8::internal::Vector<uint16_t> ToVector(v8::Local<v8::String> str) {
- v8::internal::Vector<uint16_t> buffer =
- v8::internal::Vector<uint16_t>::New(str->Length());
- str->Write(buffer.start(), 0, str->Length());
+std::vector<uint16_t> ToVector(v8::Isolate* isolate,
+ v8::Local<v8::String> str) {
+ std::vector<uint16_t> buffer(str->Length());
+ str->Write(isolate, buffer.data(), 0, str->Length());
return buffer;
}
@@ -50,24 +52,24 @@ v8::Local<v8::String> ToV8String(v8::Isolate* isolate, const char* str) {
.ToLocalChecked();
}
-v8::Local<v8::String> ToV8String(v8::Isolate* isolate, const char* str,
- int length) {
- return v8::String::NewFromUtf8(isolate, str, v8::NewStringType::kNormal,
- length)
+v8::Local<v8::String> ToV8String(v8::Isolate* isolate,
+ const std::string& buffer) {
+ int length = static_cast<int>(buffer.size());
+ return v8::String::NewFromUtf8(isolate, buffer.data(),
+ v8::NewStringType::kNormal, length)
.ToLocalChecked();
}
v8::Local<v8::String> ToV8String(v8::Isolate* isolate,
- const v8::internal::Vector<uint16_t>& buffer) {
- return v8::String::NewFromTwoByte(isolate, buffer.start(),
- v8::NewStringType::kNormal, buffer.length())
+ const std::vector<uint16_t>& buffer) {
+ int length = static_cast<int>(buffer.size());
+ return v8::String::NewFromTwoByte(isolate, buffer.data(),
+ v8::NewStringType::kNormal, length)
.ToLocalChecked();
}
-v8::internal::Vector<uint16_t> ToVector(
- const v8_inspector::StringView& string) {
- v8::internal::Vector<uint16_t> buffer =
- v8::internal::Vector<uint16_t>::New(static_cast<int>(string.length()));
+std::vector<uint16_t> ToVector(const v8_inspector::StringView& string) {
+ std::vector<uint16_t> buffer(string.length());
for (size_t i = 0; i < string.length(); i++) {
if (string.is8Bit())
buffer[i] = string.characters8()[i];
@@ -105,7 +107,7 @@ class FrontendChannelImpl : public v8_inspector::V8Inspector::Channel {
class SendMessageTask : public TaskRunner::Task {
public:
SendMessageTask(FrontendChannelImpl* channel,
- const v8::internal::Vector<uint16_t>& message)
+ const std::vector<uint16_t>& message)
: channel_(channel), message_(message) {}
virtual ~SendMessageTask() {}
bool is_priority_task() final { return false; }
@@ -124,7 +126,7 @@ class FrontendChannelImpl : public v8_inspector::V8Inspector::Channel {
->Call(context, context->Global(), 1, &message);
}
FrontendChannelImpl* channel_;
- v8::internal::Vector<uint16_t> message_;
+ std::vector<uint16_t> message_;
};
TaskRunner* task_runner_;
@@ -160,19 +162,18 @@ void RunSyncTask(TaskRunner* task_runner, T callback) {
class SendMessageToBackendTask : public TaskRunner::Task {
public:
- SendMessageToBackendTask(int session_id,
- const v8::internal::Vector<uint16_t>& message)
+ SendMessageToBackendTask(int session_id, const std::vector<uint16_t>& message)
: session_id_(session_id), message_(message) {}
bool is_priority_task() final { return true; }
private:
void Run(IsolateData* data) override {
- v8_inspector::StringView message_view(message_.start(), message_.length());
+ v8_inspector::StringView message_view(message_.data(), message_.size());
data->SendMessage(session_id_, message_view);
}
int session_id_;
- v8::internal::Vector<uint16_t> message_;
+ std::vector<uint16_t> message_;
};
void RunAsyncTask(TaskRunner* task_runner,
@@ -200,21 +201,23 @@ void RunAsyncTask(TaskRunner* task_runner,
class ExecuteStringTask : public TaskRunner::Task {
public:
- ExecuteStringTask(int context_group_id,
- const v8::internal::Vector<uint16_t>& expression,
+ ExecuteStringTask(v8::Isolate* isolate, int context_group_id,
+ const std::vector<uint16_t>& expression,
v8::Local<v8::String> name,
v8::Local<v8::Integer> line_offset,
v8::Local<v8::Integer> column_offset,
v8::Local<v8::Boolean> is_module)
: expression_(expression),
- name_(ToVector(name)),
+ name_(ToVector(isolate, name)),
line_offset_(line_offset.As<v8::Int32>()->Value()),
column_offset_(column_offset.As<v8::Int32>()->Value()),
is_module_(is_module->Value()),
context_group_id_(context_group_id) {}
- ExecuteStringTask(const v8::internal::Vector<const char>& expression,
- int context_group_id)
+ ExecuteStringTask(const std::string& expression, int context_group_id)
: expression_utf8_(expression), context_group_id_(context_group_id) {}
+
+ virtual ~ExecuteStringTask() {
+ }
bool is_priority_task() override { return false; }
void Run(IsolateData* data) override {
v8::MicrotasksScope microtasks_scope(data->isolate(),
@@ -233,11 +236,10 @@ class ExecuteStringTask : public TaskRunner::Task {
/* is_wasm */ v8::Local<v8::Boolean>(),
v8::Boolean::New(data->isolate(), is_module_));
v8::Local<v8::String> source;
- if (expression_.length())
+ if (expression_.size() != 0)
source = ToV8String(data->isolate(), expression_);
else
- source = ToV8String(data->isolate(), expression_utf8_.start(),
- expression_utf8_.length());
+ source = ToV8String(data->isolate(), expression_utf8_);
v8::ScriptCompiler::Source scriptSource(source, origin);
v8::Isolate::SafeForTerminationScope allowTermination(data->isolate());
@@ -248,14 +250,19 @@ class ExecuteStringTask : public TaskRunner::Task {
v8::MaybeLocal<v8::Value> result;
result = script->Run(context);
} else {
- data->RegisterModule(context, name_, &scriptSource);
+ // Register Module takes ownership of {buffer}, so we need to make a copy.
+ int length = static_cast<int>(name_.size());
+ v8::internal::Vector<uint16_t> buffer =
+ v8::internal::Vector<uint16_t>::New(length);
+ std::copy(name_.begin(), name_.end(), buffer.start());
+ data->RegisterModule(context, buffer, &scriptSource);
}
}
private:
- v8::internal::Vector<uint16_t> expression_;
- v8::internal::Vector<const char> expression_utf8_;
- v8::internal::Vector<uint16_t> name_;
+ std::vector<uint16_t> expression_;
+ std::string expression_utf8_;
+ std::vector<uint16_t> name_;
int32_t line_offset_ = 0;
int32_t column_offset_ = 0;
bool is_module_ = false;
@@ -372,7 +379,7 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
}
static bool ReadFile(v8::Isolate* isolate, v8::Local<v8::Value> name,
- v8::internal::Vector<const char>* chars) {
+ std::string* chars) {
v8::String::Utf8Value str(isolate, name);
bool exists = false;
std::string filename(*str, str.length());
@@ -389,10 +396,11 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
fprintf(stderr, "Internal error: read gets one string argument.");
Exit();
}
- v8::internal::Vector<const char> chars;
+ std::string chars;
v8::Isolate* isolate = args.GetIsolate();
- if (ReadFile(isolate, args[0], &chars))
- args.GetReturnValue().Set(ToV8String(isolate, chars.start()));
+ if (ReadFile(isolate, args[0], &chars)) {
+ args.GetReturnValue().Set(ToV8String(isolate, chars));
+ }
}
static void Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -400,7 +408,7 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
fprintf(stderr, "Internal error: load gets one string argument.");
Exit();
}
- v8::internal::Vector<const char> chars;
+ std::string chars;
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
IsolateData* data = IsolateData::FromContext(context);
@@ -423,7 +431,8 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
}
backend_runner_->Append(new ExecuteStringTask(
- args[0].As<v8::Int32>()->Value(), ToVector(args[1].As<v8::String>()),
+ args.GetIsolate(), args[0].As<v8::Int32>()->Value(),
+ ToVector(args.GetIsolate(), args[1].As<v8::String>()),
args[2].As<v8::String>(), args[3].As<v8::Int32>(),
args[4].As<v8::Int32>(), args[5].As<v8::Boolean>()));
}
@@ -456,16 +465,18 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
"'reason', 'details').");
Exit();
}
- v8::internal::Vector<uint16_t> reason = ToVector(args[1].As<v8::String>());
- v8::internal::Vector<uint16_t> details = ToVector(args[2].As<v8::String>());
+ std::vector<uint16_t> reason =
+ ToVector(args.GetIsolate(), args[1].As<v8::String>());
+ std::vector<uint16_t> details =
+ ToVector(args.GetIsolate(), args[2].As<v8::String>());
int context_group_id = args[0].As<v8::Int32>()->Value();
- RunSyncTask(backend_runner_, [&context_group_id, &reason,
- &details](IsolateData* data) {
- data->SchedulePauseOnNextStatement(
- context_group_id,
- v8_inspector::StringView(reason.start(), reason.length()),
- v8_inspector::StringView(details.start(), details.length()));
- });
+ RunSyncTask(backend_runner_,
+ [&context_group_id, &reason, &details](IsolateData* data) {
+ data->SchedulePauseOnNextStatement(
+ context_group_id,
+ v8_inspector::StringView(reason.data(), reason.size()),
+ v8_inspector::StringView(details.data(), details.size()));
+ });
}
static void CancelPauseOnNextStatement(
@@ -530,14 +541,15 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
IsolateData::FromContext(context)->GetContextGroupId(context),
args.GetIsolate(), args[2].As<v8::Function>());
- v8::internal::Vector<uint16_t> state = ToVector(args[1].As<v8::String>());
+ std::vector<uint16_t> state =
+ ToVector(args.GetIsolate(), args[1].As<v8::String>());
int context_group_id = args[0].As<v8::Int32>()->Value();
int session_id = 0;
RunSyncTask(backend_runner_, [&context_group_id, &session_id, &channel,
&state](IsolateData* data) {
session_id = data->ConnectSession(
context_group_id,
- v8_inspector::StringView(state.start(), state.length()), channel);
+ v8_inspector::StringView(state.data(), state.size()), channel);
channel->set_session_id(session_id);
});
@@ -552,7 +564,7 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
Exit();
}
int session_id = args[0].As<v8::Int32>()->Value();
- v8::internal::Vector<uint16_t> state;
+ std::vector<uint16_t> state;
RunSyncTask(backend_runner_, [&session_id, &state](IsolateData* data) {
state = ToVector(data->DisconnectSession(session_id)->string());
});
@@ -568,7 +580,8 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
Exit();
}
backend_runner_->Append(new SendMessageToBackendTask(
- args[0].As<v8::Int32>()->Value(), ToVector(args[1].As<v8::String>())));
+ args[0].As<v8::Int32>()->Value(),
+ ToVector(args.GetIsolate(), args[1].As<v8::String>())));
}
static std::map<int, std::unique_ptr<FrontendChannelImpl>> channels_;
@@ -636,7 +649,8 @@ class SetTimeoutExtension : public IsolateData::SetupGlobalTask {
RunAsyncTask(
data->task_runner(), task_name_view,
new ExecuteStringTask(
- context_group_id, ToVector(args[0].As<v8::String>()),
+ isolate, context_group_id,
+ ToVector(isolate, args[0].As<v8::String>()),
v8::String::Empty(isolate), v8::Integer::New(isolate, 0),
v8::Integer::New(isolate, 0), v8::Boolean::New(isolate, false)));
}
@@ -778,10 +792,12 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
}
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
IsolateData* data = IsolateData::FromContext(context);
- v8::internal::Vector<uint16_t> reason = ToVector(args[0].As<v8::String>());
- v8_inspector::StringView reason_view(reason.start(), reason.length());
- v8::internal::Vector<uint16_t> details = ToVector(args[1].As<v8::String>());
- v8_inspector::StringView details_view(details.start(), details.length());
+ std::vector<uint16_t> reason =
+ ToVector(args.GetIsolate(), args[0].As<v8::String>());
+ v8_inspector::StringView reason_view(reason.data(), reason.size());
+ std::vector<uint16_t> details =
+ ToVector(args.GetIsolate(), args[1].As<v8::String>());
+ v8_inspector::StringView details_view(details.data(), details.size());
data->BreakProgram(data->GetContextGroupId(context), reason_view,
details_view);
}
@@ -808,10 +824,12 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
"Internal error: callWithScheduledBreak('reason', 'details').");
Exit();
}
- v8::internal::Vector<uint16_t> reason = ToVector(args[1].As<v8::String>());
- v8_inspector::StringView reason_view(reason.start(), reason.length());
- v8::internal::Vector<uint16_t> details = ToVector(args[2].As<v8::String>());
- v8_inspector::StringView details_view(details.start(), details.length());
+ std::vector<uint16_t> reason =
+ ToVector(args.GetIsolate(), args[1].As<v8::String>());
+ v8_inspector::StringView reason_view(reason.data(), reason.size());
+ std::vector<uint16_t> details =
+ ToVector(args.GetIsolate(), args[2].As<v8::String>());
+ v8_inspector::StringView details_view(details.data(), details.size());
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
IsolateData* data = IsolateData::FromContext(context);
int context_group_id = data->GetContextGroupId(context);
@@ -898,10 +916,10 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
IsolateData* data = IsolateData::FromContext(context);
- v8::internal::Vector<uint16_t> description =
- ToVector(args[0].As<v8::String>());
- v8_inspector::StringView description_view(description.start(),
- description.length());
+ std::vector<uint16_t> description =
+ ToVector(isolate, args[0].As<v8::String>());
+ v8_inspector::StringView description_view(description.data(),
+ description.size());
v8_inspector::V8StackTraceId id =
data->StoreCurrentStackTrace(description_view);
v8::Local<v8::ArrayBuffer> buffer =
@@ -955,10 +973,9 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
bool with_empty_stack = args[2].As<v8::Boolean>()->Value();
if (with_empty_stack) context->Exit();
- v8::internal::Vector<uint16_t> task_name =
- ToVector(args[1].As<v8::String>());
- v8_inspector::StringView task_name_view(task_name.start(),
- task_name.length());
+ std::vector<uint16_t> task_name =
+ ToVector(isolate, args[1].As<v8::String>());
+ v8_inspector::StringView task_name_view(task_name.data(), task_name.size());
RunAsyncTask(data->task_runner(), task_name_view,
new SetTimeoutTask(context_group_id, isolate,
@@ -1085,8 +1102,7 @@ int main(int argc, char* argv[]) {
if (argv[i] == nullptr || argv[i][0] == '-') continue;
bool exists = false;
- v8::internal::Vector<const char> chars =
- v8::internal::ReadFile(argv[i], &exists, true);
+ std::string chars = v8::internal::ReadFile(argv[i], &exists, true);
if (!exists) {
fprintf(stderr, "Internal error: script file doesn't exists: %s\n",
argv[i]);
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index a669cc41a1..57b9af57c2 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -12,10 +12,11 @@ namespace {
const int kIsolateDataIndex = 2;
const int kContextGroupIdIndex = 3;
-v8::internal::Vector<uint16_t> ToVector(v8::Local<v8::String> str) {
+v8::internal::Vector<uint16_t> ToVector(v8::Isolate* isolate,
+ v8::Local<v8::String> str) {
v8::internal::Vector<uint16_t> buffer =
v8::internal::Vector<uint16_t>::New(str->Length());
- str->Write(buffer.start(), 0, str->Length());
+ str->Write(isolate, buffer.start(), 0, str->Length());
return buffer;
}
@@ -137,7 +138,8 @@ v8::MaybeLocal<v8::Module> IsolateData::ModuleResolveCallback(
v8::Local<v8::Module> referrer) {
IsolateData* data = IsolateData::FromContext(context);
std::string str = *v8::String::Utf8Value(data->isolate(), specifier);
- return data->modules_[ToVector(specifier)].Get(data->isolate());
+ return data->modules_[ToVector(data->isolate(), specifier)].Get(
+ data->isolate());
}
int IsolateData::ConnectSession(int context_group_id,
@@ -249,7 +251,7 @@ int IsolateData::HandleMessage(v8::Local<v8::Message> message,
int script_id =
static_cast<int>(message->GetScriptOrigin().ScriptID()->Value());
if (!stack.IsEmpty() && stack->GetFrameCount() > 0) {
- int top_script_id = stack->GetFrame(0)->GetScriptId();
+ int top_script_id = stack->GetFrame(isolate, 0)->GetScriptId();
if (top_script_id == script_id) script_id = 0;
}
int line_number = message->GetLineNumber(context).FromMaybe(0);
@@ -258,13 +260,14 @@ int IsolateData::HandleMessage(v8::Local<v8::Message> message,
column_number = message->GetStartColumn(context).FromJust() + 1;
v8_inspector::StringView detailed_message;
- v8::internal::Vector<uint16_t> message_text_string = ToVector(message->Get());
+ v8::internal::Vector<uint16_t> message_text_string =
+ ToVector(isolate, message->Get());
v8_inspector::StringView message_text(message_text_string.start(),
message_text_string.length());
v8::internal::Vector<uint16_t> url_string;
if (message->GetScriptOrigin().ResourceName()->IsString()) {
- url_string =
- ToVector(message->GetScriptOrigin().ResourceName().As<v8::String>());
+ url_string = ToVector(
+ isolate, message->GetScriptOrigin().ResourceName().As<v8::String>());
}
v8_inspector::StringView url(url_string.start(), url_string.length());
@@ -432,7 +435,7 @@ namespace {
class StringBufferImpl : public v8_inspector::StringBuffer {
public:
StringBufferImpl(v8::Isolate* isolate, v8::Local<v8::String> string)
- : data_(ToVector(string)),
+ : data_(ToVector(isolate, string)),
view_(data_.start(), data_.length()) {}
const v8_inspector::StringView& string() override { return view_; }
@@ -449,6 +452,6 @@ std::unique_ptr<v8_inspector::StringBuffer> IsolateData::resourceNameToUrl(
v8::HandleScope handle_scope(isolate);
v8::Local<v8::String> name = ToString(isolate, resourceName);
v8::Local<v8::String> prefix = resource_name_prefix_.Get(isolate);
- v8::Local<v8::String> url = v8::String::Concat(prefix, name);
+ v8::Local<v8::String> url = v8::String::Concat(isolate, prefix, name);
return std::unique_ptr<StringBufferImpl>(new StringBufferImpl(isolate, url));
}
diff --git a/deps/v8/test/intl/collator/default-locale.js b/deps/v8/test/intl/collator/default-locale.js
index 5fc6ff4665..fd964f0620 100644
--- a/deps/v8/test/intl/collator/default-locale.js
+++ b/deps/v8/test/intl/collator/default-locale.js
@@ -48,8 +48,6 @@ var collatorBraket = new Intl.Collator({});
assertEquals(options.locale, collatorBraket.resolvedOptions().locale);
var collatorWithOptions = new Intl.Collator(undefined, {usage: 'search'});
-assertLanguageTag(%GetDefaultICULocale(),
- collatorWithOptions.resolvedOptions().locale);
-assertNotNull(
- %regexp_internal_match(/-u(-[a-zA-Z]+-[a-zA-Z]+)*-co-search/,
- collatorWithOptions.resolvedOptions().locale));
+var locale = collatorWithOptions.resolvedOptions().locale;
+assertLanguageTag(%GetDefaultICULocale(), locale);
+assertEquals(locale.indexOf('-co-search'), -1);
diff --git a/deps/v8/test/intl/collator/property-override.js b/deps/v8/test/intl/collator/property-override.js
index bed4d7773d..1e17b1e741 100644
--- a/deps/v8/test/intl/collator/property-override.js
+++ b/deps/v8/test/intl/collator/property-override.js
@@ -61,5 +61,3 @@ properties.forEach(function(prop) {
});
taintProperties(properties);
-
-var locale = Intl.Collator().resolvedOptions().locale;
diff --git a/deps/v8/test/intl/date-format/timezone.js b/deps/v8/test/intl/date-format/timezone.js
index af363711c7..57044d48d8 100644
--- a/deps/v8/test/intl/date-format/timezone.js
+++ b/deps/v8/test/intl/date-format/timezone.js
@@ -31,11 +31,15 @@
// var df = Intl.DateTimeFormat();
// assertEquals(getDefaultTimeZone(), df.resolvedOptions().timeZone);
-df = Intl.DateTimeFormat(undefined, {timeZone: 'UtC'});
-assertEquals('UTC', df.resolvedOptions().timeZone);
+[
+ 'UtC', 'gmt', 'Etc/UTC', 'Etc/GMT', 'Etc/GMT0', 'Etc/GMT+0',
+ 'etc/gmt-0', 'etc/zulu', 'Etc/universal', 'etc/greenwich'
+].forEach((timezone) => {
+ const df = Intl.DateTimeFormat(undefined, {timeZone: timezone});
+ assertEquals('UTC', df.resolvedOptions().timeZone);
+})
-df = Intl.DateTimeFormat(undefined, {timeZone: 'gmt'});
-assertEquals('UTC', df.resolvedOptions().timeZone);
+// See test/mjsunit/regress/regress-crbug-364374.js for additional/ tests.
df = Intl.DateTimeFormat(undefined, {timeZone: 'America/Los_Angeles'});
assertEquals('America/Los_Angeles', df.resolvedOptions().timeZone);
@@ -43,22 +47,29 @@ assertEquals('America/Los_Angeles', df.resolvedOptions().timeZone);
df = Intl.DateTimeFormat(undefined, {timeZone: 'Europe/Belgrade'});
assertEquals('Europe/Belgrade', df.resolvedOptions().timeZone);
-// Check Etc/XXX variants. They should work too.
-df = Intl.DateTimeFormat(undefined, {timeZone: 'Etc/UTC'});
-assertEquals('UTC', df.resolvedOptions().timeZone);
-
-df = Intl.DateTimeFormat(undefined, {timeZone: 'Etc/GMT'});
-assertEquals('UTC', df.resolvedOptions().timeZone);
-
df = Intl.DateTimeFormat(undefined, {timeZone: 'euRope/beLGRade'});
assertEquals('Europe/Belgrade', df.resolvedOptions().timeZone);
+// Etc/GMT-14 to Etc/GMT+12 are valid.
+df = Intl.DateTimeFormat(undefined, {timeZone: 'etc/gmt+12'});
+assertEquals('Etc/GMT+12', df.resolvedOptions().timeZone);
+
+df = Intl.DateTimeFormat(undefined, {timeZone: 'etc/gmt+9'});
+assertEquals('Etc/GMT+9', df.resolvedOptions().timeZone);
+
+df = Intl.DateTimeFormat(undefined, {timeZone: 'etc/gmt-9'});
+assertEquals('Etc/GMT-9', df.resolvedOptions().timeZone);
+
+df = Intl.DateTimeFormat(undefined, {timeZone: 'etc/gmt-14'});
+assertEquals('Etc/GMT-14', df.resolvedOptions().timeZone);
+
+assertThrows('Intl.DateTimeFormat(undefined, {timeZone: \'Etc/GMT+13\'})');
+
// : + - are not allowed, only / _ are.
assertThrows('Intl.DateTimeFormat(undefined, {timeZone: \'GMT+07:00\'})');
assertThrows('Intl.DateTimeFormat(undefined, {timeZone: \'GMT+0700\'})');
assertThrows('Intl.DateTimeFormat(undefined, {timeZone: \'GMT-05:00\'})');
assertThrows('Intl.DateTimeFormat(undefined, {timeZone: \'GMT-0500\'})');
-assertThrows('Intl.DateTimeFormat(undefined, {timeZone: \'Etc/GMT+0\'})');
assertThrows('Intl.DateTimeFormat(undefined, ' +
'{timeZone: \'America/Los-Angeles\'})');
diff --git a/deps/v8/test/intl/general/getCanonicalLocales.js b/deps/v8/test/intl/general/getCanonicalLocales.js
index dd01363c4f..0df6846ce6 100644
--- a/deps/v8/test/intl/general/getCanonicalLocales.js
+++ b/deps/v8/test/intl/general/getCanonicalLocales.js
@@ -2,24 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var locales = ['en-US', 'fr'];
-var result = Intl.getCanonicalLocales(locales);
-var len = result.length
+// Ignore the first tag when checking for duplicate subtags.
+assertDoesNotThrow(() => Intl.getCanonicalLocales("foobar-foobar"));
-// TODO(jshin): Remove the following when
-// https://github.com/tc39/test262/issues/745 is resolved and
-// test262 in v8 is updated.
+// Ignore duplicate subtags in different namespaces; eg, 'a' vs 'u'.
+assertDoesNotThrow(() => Intl.getCanonicalLocales("en-a-ca-Chinese-u-ca-Chinese"));
-assertEquals(Object.getPrototypeOf(result), Array.prototype);
-assertEquals(result.constructor, Array);
-
-for (var key in result) {
- var desc = Object.getOwnPropertyDescriptor(result, key);
- assertTrue(desc.writable);
- assertTrue(desc.configurable);
- assertTrue(desc.enumerable);
-}
-
-var desc = Object.getOwnPropertyDescriptor(result, 'length');
-assertTrue(desc.writable);
-assertEquals(result.push('de'), desc.value + 1);
+// Check duplicate subtags (after the first tag) are detected.
+assertThrows(() => Intl.getCanonicalLocales("en-foobar-foobar"), RangeError);
+assertThrows(() => Intl.getCanonicalLocales("en-u-ca-gregory-ca-chinese"), RangeError);
diff --git a/deps/v8/test/intl/general/grandfathered_tags_without_preferred_value.js b/deps/v8/test/intl/general/grandfathered_tags_without_preferred_value.js
new file mode 100644
index 0000000000..b4d529652f
--- /dev/null
+++ b/deps/v8/test/intl/general/grandfathered_tags_without_preferred_value.js
@@ -0,0 +1,27 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+[
+ // Grandfathered tags without a preferred value in the IANA language
+ // tag registry. Nonetheless, ICU cooks up a value when canonicalizing.
+ // v8 works around that ICU issue.
+ // See https://www.iana.org/assignments/language-subtag-registry/language-subtag-registry .
+ ["cel-gaulish", "cel-gaulish"],
+ ["i-default", "i-default"],
+ ["i-mingo", "i-mingo"],
+ ["i-enochian", "i-enochian"],
+ ["zh-min", "zh-min"],
+
+ // Matching should be case-insensitive.
+ ["I-default", "i-default"],
+ ["i-DEFAULT", "i-default"],
+ ["I-DEFAULT", "i-default"],
+ ["i-DEfauLT", "i-default"],
+ ["zh-Min", "zh-min"],
+ ["Zh-min", "zh-min"],
+].forEach(([inputLocale, expectedLocale]) => {
+ const canonicalLocales = Intl.getCanonicalLocales(inputLocale);
+ assertEquals(canonicalLocales.length, 1);
+ assertEquals(canonicalLocales[0], expectedLocale);
+})
diff --git a/deps/v8/test/intl/general/language_tags_with_preferred_values.js b/deps/v8/test/intl/general/language_tags_with_preferred_values.js
index 391db53a98..073a6c9aff 100644
--- a/deps/v8/test/intl/general/language_tags_with_preferred_values.js
+++ b/deps/v8/test/intl/general/language_tags_with_preferred_values.js
@@ -7,6 +7,11 @@
["sgn-de", "gsg"],
["sgn-de-u-co-phonebk", "gsg-u-co-phonebk"],
+ // Matching should be case-insensitive.
+ ["sgn-De", "gsg"],
+ ["sgn-BE-FR", "sfb"],
+ ["Sgn-bE-Fr", "sfb"],
+
// deprecated region tag
["und-Latn-dd", "und-Latn-DE"],
["und-dd-u-co-phonebk", "und-DE-u-co-phonebk"],
@@ -22,8 +27,8 @@
["jw", "jv"],
["aam", "aas"],
["aam-u-ca-gregory", "aas-u-ca-gregory"],
-].forEach(function (entry) {
- const canonicalLocales = Intl.getCanonicalLocales(entry[0]);
+].forEach(([inputLocale, expectedLocale]) => {
+ const canonicalLocales = Intl.getCanonicalLocales(inputLocale);
assertEquals(canonicalLocales.length, 1);
- assertEquals(canonicalLocales[0], entry[1]);
+ assertEquals(canonicalLocales[0], expectedLocale);
})
diff --git a/deps/v8/test/intl/list-format/constructor.js b/deps/v8/test/intl/list-format/constructor.js
new file mode 100644
index 0000000000..33a85fd79f
--- /dev/null
+++ b/deps/v8/test/intl/list-format/constructor.js
@@ -0,0 +1,108 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-list-format
+
+// ListFormat constructor can't be called as function.
+assertThrows(() => Intl.ListFormat(['sr']), TypeError);
+
+// Non-string locale.
+// assertThrows(() => new Intl.ListFormat(5), TypeError);
+
+// Invalid locale string.
+assertThrows(() => new Intl.ListFormat(['abcdefghi']), RangeError);
+
+assertDoesNotThrow(() => new Intl.ListFormat(['sr'], {}), TypeError);
+
+assertDoesNotThrow(() => new Intl.ListFormat([], {}));
+
+assertDoesNotThrow(() => new Intl.ListFormat(['fr', 'ar'], {}));
+
+assertDoesNotThrow(() => new Intl.ListFormat({0: 'ja', 1:'fr'}, {}));
+
+assertDoesNotThrow(() => new Intl.ListFormat({1: 'ja', 2:'fr'}, {}));
+
+assertDoesNotThrow(() => new Intl.ListFormat(['sr']));
+
+assertDoesNotThrow(() => new Intl.ListFormat());
+
+assertDoesNotThrow(
+ () => new Intl.ListFormat(
+ ['sr'], {
+ style: 'short',
+ type: 'unit'
+ }));
+
+
+assertDoesNotThrow(
+ () => new Intl.ListFormat(['sr'], {type: 'conjunction'}));
+
+assertDoesNotThrow(
+ () => new Intl.ListFormat(['sr'], {type: 'disjunction'}));
+
+assertDoesNotThrow(
+ () => new Intl.ListFormat(['sr'], {type: 'unit'}));
+
+assertThrows(
+ () => new Intl.ListFormat(['sr'], {type: 'standard'}),
+ RangeError);
+
+assertDoesNotThrow(
+ () => new Intl.ListFormat(['sr'], {style: 'long'}));
+
+assertDoesNotThrow(
+ () => new Intl.ListFormat(['sr'], {style: 'short'}));
+
+assertDoesNotThrow(
+ () => new Intl.ListFormat(['sr'], {style: 'narrow'}));
+
+assertThrows(
+ () => new Intl.ListFormat(['sr'], {style: 'giant'}),
+ RangeError);
+
+assertDoesNotThrow(
+ () => new Intl.ListFormat(['sr'], {type: 'conjunction', style: 'long'}));
+
+assertDoesNotThrow(
+ () => new Intl.ListFormat(['sr'], {type: 'conjunction', style: 'short'}));
+
+assertDoesNotThrow(
+ () => new Intl.ListFormat(['sr'], {type: 'conjunction', style: 'narrow'}));
+
+assertDoesNotThrow(
+ () => new Intl.ListFormat(['sr'], {type: 'disjunction', style: 'long'}));
+
+assertDoesNotThrow(
+ () => new Intl.ListFormat(['sr'], {type: 'disjunction', style: 'short'}));
+
+assertDoesNotThrow(
+ () => new Intl.ListFormat(['sr'], {type: 'disjunction', style: 'narrow'}));
+
+assertDoesNotThrow(
+ () => new Intl.ListFormat(['sr'], {type: 'unit', style: 'long'}));
+
+assertDoesNotThrow(
+ () => new Intl.ListFormat(['sr'], {type: 'unit', style: 'short'}));
+
+assertDoesNotThrow(
+ () => new Intl.ListFormat(['sr'], {type: 'unit', style: 'narrow'}));
+
+// Throws only once during construction.
+// Check for all getters to prevent regression.
+// Preserve the order of getter initialization.
+let getCount = 0;
+let style = -1;
+let type = -1;
+
+new Intl.ListFormat(['en-US'], {
+ get style() {
+ style = ++getCount;
+ },
+ get type() {
+ type = ++getCount;
+ }
+});
+
+assertEquals(1, type);
+assertEquals(2, style);
diff --git a/deps/v8/test/intl/list-format/format-en.js b/deps/v8/test/intl/list-format/format-en.js
new file mode 100644
index 0000000000..21eb99d06d
--- /dev/null
+++ b/deps/v8/test/intl/list-format/format-en.js
@@ -0,0 +1,119 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-list-format
+
+// The following test are not part of the comformance. Just some output in
+// English to verify the format does return something reasonable for English.
+// It may be changed when we update the CLDR data.
+// NOTE: These are UNSPECIFIED behavior in
+// http://tc39.github.io/proposal-intl-list-time/
+
+let enLongConjunction = new Intl.ListFormat(
+ ["en"], {style: "long", type: 'conjunction'});
+
+assertEquals('', enLongConjunction.format());
+ assertEquals('', enLongConjunction.format([]));
+assertEquals('a', enLongConjunction.format(['a']));
+assertEquals('b', enLongConjunction.format(['b']));
+assertEquals('a and b', enLongConjunction.format(['a', 'b']));
+assertEquals('a, b, and c', enLongConjunction.format(['a', 'b', 'c']));
+assertEquals('a, b, c, and d', enLongConjunction.format(['a', 'b', 'c', 'd']));
+assertEquals('a, b, c, d, and and', enLongConjunction.format(['a', 'b', 'c', 'd', 'and']));
+
+let enLongDisjunction = new Intl.ListFormat(
+ ["en"], {style: "long", type: 'disjunction'});
+
+assertEquals('', enLongDisjunction.format());
+assertEquals('', enLongDisjunction.format([]));
+assertEquals('a', enLongDisjunction.format(['a']));
+assertEquals('b', enLongDisjunction.format(['b']));
+assertEquals('a or b', enLongDisjunction.format(['a', 'b']));
+assertEquals('a, b, or c', enLongDisjunction.format(['a', 'b', 'c']));
+assertEquals('a, b, c, or d', enLongDisjunction.format(['a', 'b', 'c', 'd']));
+assertEquals('a, b, c, d, or or', enLongDisjunction.format(['a', 'b', 'c', 'd', 'or']));
+
+let enLongUnit = new Intl.ListFormat(
+ ["en"], {style: "long", type: 'unit'});
+
+assertEquals('', enLongUnit.format());
+assertEquals('', enLongUnit.format([]));
+assertEquals('a', enLongUnit.format(['a']));
+assertEquals('b', enLongUnit.format(['b']));
+assertEquals('a, b', enLongUnit.format(['a', 'b']));
+assertEquals('a, b, c', enLongUnit.format(['a', 'b', 'c']));
+assertEquals('a, b, c, d', enLongUnit.format(['a', 'b', 'c', 'd']));
+assertEquals('a, b, c, d, or', enLongUnit.format(['a', 'b', 'c', 'd', 'or']));
+
+let enShortConjunction = new Intl.ListFormat(
+ ["en"], {style: "short", type: 'conjunction'});
+
+assertEquals('', enShortConjunction.format());
+assertEquals('', enShortConjunction.format([]));
+assertEquals('a', enShortConjunction.format(['a']));
+assertEquals('b', enShortConjunction.format(['b']));
+assertEquals('a and b', enShortConjunction.format(['a', 'b']));
+assertEquals('a, b, and c', enShortConjunction.format(['a', 'b', 'c']));
+assertEquals('a, b, c, and d', enShortConjunction.format(['a', 'b', 'c', 'd']));
+assertEquals('a, b, c, d, and and', enShortConjunction.format(['a', 'b', 'c', 'd', 'and']));
+
+let enShortDisjunction = new Intl.ListFormat(
+ ["en"], {style: "short", type: 'disjunction'});
+
+assertEquals('', enShortDisjunction.format());
+assertEquals('', enShortDisjunction.format([]));
+assertEquals('a', enShortDisjunction.format(['a']));
+assertEquals('b', enShortDisjunction.format(['b']));
+assertEquals('a or b', enShortDisjunction.format(['a', 'b']));
+assertEquals('a, b, or c', enShortDisjunction.format(['a', 'b', 'c']));
+assertEquals('a, b, c, or d', enShortDisjunction.format(['a', 'b', 'c', 'd']));
+assertEquals('a, b, c, d, or or', enShortDisjunction.format(['a', 'b', 'c', 'd', 'or']));
+
+let enShortUnit = new Intl.ListFormat(
+ ["en"], {style: "short", type: 'unit'});
+
+assertEquals('', enShortUnit.format());
+assertEquals('', enShortUnit.format([]));
+assertEquals('a', enShortUnit.format(['a']));
+assertEquals('b', enShortUnit.format(['b']));
+assertEquals('a, b', enShortUnit.format(['a', 'b']));
+assertEquals('a, b, c', enShortUnit.format(['a', 'b', 'c']));
+assertEquals('a, b, c, d', enShortUnit.format(['a', 'b', 'c', 'd']));
+assertEquals('a, b, c, d, or', enShortUnit.format(['a', 'b', 'c', 'd', 'or']));
+
+let enNarrowConjunction = new Intl.ListFormat(
+ ["en"], {style: "narrow", type: 'conjunction'});
+
+assertEquals('', enNarrowConjunction.format());
+assertEquals('', enNarrowConjunction.format([]));
+assertEquals('a', enNarrowConjunction.format(['a']));
+assertEquals('b', enNarrowConjunction.format(['b']));
+assertEquals('a and b', enNarrowConjunction.format(['a', 'b']));
+assertEquals('a, b, and c', enNarrowConjunction.format(['a', 'b', 'c']));
+assertEquals('a, b, c, and d', enNarrowConjunction.format(['a', 'b', 'c', 'd']));
+assertEquals('a, b, c, d, and and', enNarrowConjunction.format(['a', 'b', 'c', 'd', 'and']));
+
+let enNarrowDisjunction = new Intl.ListFormat(
+ ["en"], {style: "narrow", type: 'disjunction'});
+
+assertEquals('', enNarrowDisjunction.format());
+assertEquals('', enNarrowDisjunction.format([]));
+assertEquals('a', enNarrowDisjunction.format(['a']));
+assertEquals('b', enNarrowDisjunction.format(['b']));
+assertEquals('a or b', enNarrowDisjunction.format(['a', 'b']));
+assertEquals('a, b, or c', enNarrowDisjunction.format(['a', 'b', 'c']));
+assertEquals('a, b, c, or d', enNarrowDisjunction.format(['a', 'b', 'c', 'd']));
+assertEquals('a, b, c, d, or or', enNarrowDisjunction.format(['a', 'b', 'c', 'd', 'or']));
+
+let enNarrowUnit = new Intl.ListFormat(
+ ["en"], {style: "narrow", type: 'unit'});
+
+assertEquals('', enNarrowUnit.format());
+assertEquals('', enNarrowUnit.format([]));
+assertEquals('a', enNarrowUnit.format(['a']));
+assertEquals('b', enNarrowUnit.format(['b']));
+assertEquals('a b', enNarrowUnit.format(['a', 'b']));
+assertEquals('a b c', enNarrowUnit.format(['a', 'b', 'c']));
+assertEquals('a b c d', enNarrowUnit.format(['a', 'b', 'c', 'd']));
+assertEquals('a b c d or', enNarrowUnit.format(['a', 'b', 'c', 'd', 'or']));
diff --git a/deps/v8/test/intl/list-format/format-to-parts.js b/deps/v8/test/intl/list-format/format-to-parts.js
new file mode 100644
index 0000000000..83473b6d0b
--- /dev/null
+++ b/deps/v8/test/intl/list-format/format-to-parts.js
@@ -0,0 +1,92 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-list-format
+
+function assertListFormat(listFormat, input) {
+ var result;
+ try {
+ result = listFormat.formatToParts(input);
+ } catch (e) {
+ fail('should not throw exception ' + e);
+ }
+ assertTrue(Array.isArray(result));
+ if (input) {
+ assertTrue(result.length >= input.length * 2 - 1);
+ for (var i = 0, j = 0; i < result.length; i++) {
+ assertEquals('string', typeof result[i].value);
+ assertEquals('string', typeof result[i].type);
+ assertTrue(result[i].type == 'literal' || result[i].type == 'element');
+ if (result[i].type == 'element') {
+ assertEquals(String(input[j++]), result[i].value);
+ if (i - 1 >= 0) {
+ assertEquals('literal', result[i - 1].type);
+ }
+ if (i + 1 < result.length) {
+ assertEquals('literal', result[i + 1].type);
+ }
+ }
+ if (result[i].type == 'literal') {
+ assertTrue(result[i].value.length > 0);
+ if (i - 1 >= 0) {
+ assertEquals('element', result[i - 1].type);
+ }
+ if (i + 1 < result.length) {
+ assertEquals('element', result[i + 1].type);
+ }
+ }
+ }
+ }
+}
+
+function testFormatter(listFormat) {
+
+ assertListFormat(listFormat, []);
+ assertListFormat(listFormat, undefined);
+ assertListFormat(listFormat, ['1']);
+ assertListFormat(listFormat, ['a']);
+ assertListFormat(listFormat, ['1', 'b']);
+ assertListFormat(listFormat, ['1', 'b', '3']);
+ assertListFormat(listFormat, ['a', 'b']);
+ assertListFormat(listFormat, ['a', 'b', 'c']);
+ assertListFormat(listFormat, ['a', 'b', 'c', 'd']);
+ assertListFormat(listFormat, ['作者', '譚永鋒', '1', (new Date()).toString()]);
+ assertListFormat(listFormat, ['作者', '譚永鋒', '1', 'b', '3']);
+ // Tricky cases
+ assertListFormat(listFormat, [' ', 'b', 'c', 'and']);
+ assertListFormat(listFormat, [' ', 'b', 'c', 'or']);
+ assertListFormat(listFormat, ['and']);
+ assertListFormat(listFormat, ['or']);
+
+ assertThrows(() => listFormat.formatToParts(null), TypeError);
+ assertThrows(() => listFormat.formatToParts([new Date()]), TypeError);
+ assertThrows(() => listFormat.formatToParts([1]), TypeError);
+ assertThrows(() => listFormat.formatToParts([1, 'b']), TypeError);
+ assertThrows(() => listFormat.formatToParts([1, 'b', 3]), TypeError);
+ assertThrows(() => listFormat.formatToParts([[3, 4]]), TypeError);
+ assertThrows(() => listFormat.formatToParts([undefined, 'world']), TypeError);
+ assertThrows(() => listFormat.formatToParts(['hello', undefined]), TypeError);
+ assertThrows(() => listFormat.formatToParts([undefined]), TypeError);
+ assertThrows(() => listFormat.formatToParts([null, 'world']), TypeError);
+ assertThrows(() => listFormat.formatToParts(['hello', null]), TypeError);
+ assertThrows(() => listFormat.formatToParts([null]), TypeError);
+
+}
+testFormatter(new Intl.ListFormat());
+testFormatter(new Intl.ListFormat(["en"]));
+testFormatter(new Intl.ListFormat(["en"], {style: 'long'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'short'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'narrow'}));
+testFormatter(new Intl.ListFormat(["en"], {type: 'conjunction'}));
+testFormatter(new Intl.ListFormat(["en"], {type: 'disjunction'}));
+testFormatter(new Intl.ListFormat(["en"], {type: 'unit'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'long', type: 'conjunction'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'short', type: 'conjunction'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'narrow', type: 'conjunction'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'long', type: 'disjunction'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'short', type: 'disjunction'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'narrow', type: 'disjunction'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'long', type: 'unit'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'short', type: 'unit'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'narrow', type: 'unit'}));
diff --git a/deps/v8/test/intl/list-format/format.js b/deps/v8/test/intl/list-format/format.js
new file mode 100644
index 0000000000..677cb22496
--- /dev/null
+++ b/deps/v8/test/intl/list-format/format.js
@@ -0,0 +1,63 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-list-format
+
+function assertListFormat(listFormat, input) {
+ try {
+ let result = listFormat.format(input);
+ assertEquals('string', typeof result);
+ if (input) {
+ for (var i = 0; i < input.length; i++) {
+ assertTrue(result.indexOf(input[i]) >= 0);
+ }
+ }
+ } catch (e) {
+ fail('should not throw exception ' + e);
+ }
+}
+
+function testFormatter(listFormat) {
+ assertListFormat(listFormat, []);
+ assertListFormat(listFormat, undefined);
+ assertListFormat(listFormat, ['1']);
+ assertListFormat(listFormat, ['a']);
+ assertListFormat(listFormat, ['1', 'b']);
+ assertListFormat(listFormat, ['1', 'b', '3']);
+ assertListFormat(listFormat, ['a', 'b']);
+ assertListFormat(listFormat, ['a', 'b', 'c']);
+ assertListFormat(listFormat, ['a', 'b', 'c', 'd']);
+ assertListFormat(listFormat, ['作者', '譚永鋒', '1', (new Date()).toString()]);
+ assertListFormat(listFormat, ['作者', '譚永鋒', '1', 'b', '3']);
+
+ assertThrows(() => listFormat.format(null), TypeError);
+ assertThrows(() => listFormat.format([new Date()]), TypeError);
+ assertThrows(() => listFormat.format([1]), TypeError);
+ assertThrows(() => listFormat.format([1, 'b']), TypeError);
+ assertThrows(() => listFormat.format([1, 'b', 3]), TypeError);
+ assertThrows(() => listFormat.format([[3, 4]]), TypeError);
+ assertThrows(() => listFormat.format([undefined, 'world']), TypeError);
+ assertThrows(() => listFormat.format(['hello', undefined]), TypeError);
+ assertThrows(() => listFormat.format([undefined]), TypeError);
+ assertThrows(() => listFormat.format([null, 'world']), TypeError);
+ assertThrows(() => listFormat.format(['hello', null]), TypeError);
+ assertThrows(() => listFormat.format([null]), TypeError);
+}
+testFormatter(new Intl.ListFormat());
+testFormatter(new Intl.ListFormat(["en"]));
+testFormatter(new Intl.ListFormat(["en"], {style: 'long'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'short'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'narrow'}));
+testFormatter(new Intl.ListFormat(["en"], {type: 'conjunction'}));
+testFormatter(new Intl.ListFormat(["en"], {type: 'disjunction'}));
+testFormatter(new Intl.ListFormat(["en"], {type: 'unit'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'long', type: 'conjunction'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'short', type: 'conjunction'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'narrow', type: 'conjunction'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'long', type: 'disjunction'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'short', type: 'disjunction'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'narrow', type: 'disjunction'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'long', type: 'unit'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'short', type: 'unit'}));
+testFormatter(new Intl.ListFormat(["en"], {style: 'narrow', type: 'unit'}));
diff --git a/deps/v8/test/intl/list-format/formatToParts-zh.js b/deps/v8/test/intl/list-format/formatToParts-zh.js
new file mode 100644
index 0000000000..a7204b0b29
--- /dev/null
+++ b/deps/v8/test/intl/list-format/formatToParts-zh.js
@@ -0,0 +1,157 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-list-format
+
+// The following test are not part of the comformance. Just some output in
+// Chinese to verify the format does return something reasonable for Chinese.
+// It may be changed when we update the CLDR data.
+// NOTE: These are UNSPECIFIED behavior in
+// http://tc39.github.io/proposal-intl-list-time/
+
+let zhLongConjunction = new Intl.ListFormat(
+ ["zh"], {style: "long", type: 'conjunction'});
+
+var parts;
+parts = zhLongConjunction.formatToParts();
+assertEquals(0, parts.length);
+
+parts = zhLongConjunction.formatToParts([]);
+assertEquals(0, parts.length);
+
+parts = zhLongConjunction.formatToParts(['譚永鋒']);
+assertEquals(1, parts.length);
+assertEquals('譚永鋒', parts[0].value);
+assertEquals('element', parts[0].type);
+
+parts = zhLongConjunction.formatToParts(['譚永鋒', '劉新宇']);
+assertEquals(3, parts.length);
+assertEquals('譚永鋒', parts[0].value);
+assertEquals('element', parts[0].type);
+assertEquals('和', parts[1].value);
+assertEquals('literal', parts[1].type);
+assertEquals('劉新宇', parts[2].value);
+assertEquals('element', parts[2].type);
+
+parts = zhLongConjunction.formatToParts(['黄子容', '譚永鋒', '劉新宇']);
+assertEquals(5, parts.length);
+assertEquals('黄子容', parts[0].value);
+assertEquals('element', parts[0].type);
+assertEquals('、', parts[1].value);
+assertEquals('literal', parts[1].type);
+assertEquals('譚永鋒', parts[2].value);
+assertEquals('element', parts[2].type);
+assertEquals('和', parts[3].value);
+assertEquals('literal', parts[3].type);
+assertEquals('劉新宇', parts[4].value);
+assertEquals('element', parts[4].type);
+
+parts = zhLongConjunction.formatToParts(['黄子容', '譚永鋒', '劉新宇', '朱君毅']);
+assertEquals(7, parts.length);
+assertEquals('黄子容', parts[0].value);
+assertEquals('element', parts[0].type);
+assertEquals('、', parts[1].value);
+assertEquals('literal', parts[1].type);
+assertEquals('譚永鋒', parts[2].value);
+assertEquals('element', parts[2].type);
+assertEquals('、', parts[3].value);
+assertEquals('literal', parts[3].type);
+assertEquals('劉新宇', parts[4].value);
+assertEquals('element', parts[4].type);
+assertEquals('和', parts[5].value);
+assertEquals('literal', parts[5].type);
+assertEquals('朱君毅', parts[6].value);
+assertEquals('element', parts[6].type);
+
+let zhShortDisjunction = new Intl.ListFormat(
+ ["zh"], {style: "short", type: 'disjunction'});
+parts = zhShortDisjunction.formatToParts();
+assertEquals(0, parts.length);
+
+parts = zhShortDisjunction.formatToParts([]);
+assertEquals(0, parts.length);
+
+parts = zhShortDisjunction.formatToParts(['譚永鋒']);
+assertEquals(1, parts.length);
+assertEquals('譚永鋒', parts[0].value);
+assertEquals('element', parts[0].type);
+
+parts = zhShortDisjunction.formatToParts(['譚永鋒', '劉新宇']);
+assertEquals(3, parts.length);
+assertEquals('譚永鋒', parts[0].value);
+assertEquals('element', parts[0].type);
+assertEquals('或', parts[1].value);
+assertEquals('literal', parts[1].type);
+assertEquals('劉新宇', parts[2].value);
+assertEquals('element', parts[2].type);
+
+parts = zhShortDisjunction.formatToParts(['黄子容', '譚永鋒', '劉新宇']);
+assertEquals(5, parts.length);
+assertEquals('黄子容', parts[0].value);
+assertEquals('element', parts[0].type);
+assertEquals('、', parts[1].value);
+assertEquals('literal', parts[1].type);
+assertEquals('譚永鋒', parts[2].value);
+assertEquals('element', parts[2].type);
+assertEquals('或', parts[3].value);
+assertEquals('literal', parts[3].type);
+assertEquals('劉新宇', parts[4].value);
+assertEquals('element', parts[4].type);
+
+parts = zhShortDisjunction.formatToParts(['黄子容', '譚永鋒', '劉新宇', '朱君毅']);
+assertEquals(7, parts.length);
+assertEquals('黄子容', parts[0].value);
+assertEquals('element', parts[0].type);
+assertEquals('、', parts[1].value);
+assertEquals('literal', parts[1].type);
+assertEquals('譚永鋒', parts[2].value);
+assertEquals('element', parts[2].type);
+assertEquals('、', parts[3].value);
+assertEquals('literal', parts[3].type);
+assertEquals('劉新宇', parts[4].value);
+assertEquals('element', parts[4].type);
+assertEquals('或', parts[5].value);
+assertEquals('literal', parts[5].type);
+assertEquals('朱君毅', parts[6].value);
+
+let zhNarrowUnit = new Intl.ListFormat(
+ ["zh"], {style: "narrow", type: 'unit'});
+
+parts = zhNarrowUnit.formatToParts();
+assertEquals(0, parts.length);
+
+parts = zhNarrowUnit.formatToParts([]);
+assertEquals(0, parts.length);
+
+parts = zhNarrowUnit.formatToParts(['3英哩']);
+assertEquals(1, parts.length);
+assertEquals('3英哩', parts[0].value);
+assertEquals('element', parts[0].type);
+
+parts = zhNarrowUnit.formatToParts(['3英哩', '4碼']);
+assertEquals(2, parts.length);
+assertEquals('3英哩', parts[0].value);
+assertEquals('element', parts[0].type);
+assertEquals('4碼', parts[1].value);
+assertEquals('element', parts[1].type);
+
+parts = zhNarrowUnit.formatToParts(['3英哩', '4碼', '5英尺']);
+assertEquals(3, parts.length);
+assertEquals('3英哩', parts[0].value);
+assertEquals('element', parts[0].type);
+assertEquals('4碼', parts[1].value);
+assertEquals('element', parts[1].type);
+assertEquals('5英尺', parts[2].value);
+assertEquals('element', parts[2].type);
+
+parts = zhNarrowUnit.formatToParts(['3英哩', '4碼', '5英尺','7英吋']);
+assertEquals(4, parts.length);
+assertEquals('3英哩', parts[0].value);
+assertEquals('element', parts[0].type);
+assertEquals('4碼', parts[1].value);
+assertEquals('element', parts[1].type);
+assertEquals('5英尺', parts[2].value);
+assertEquals('element', parts[2].type);
+assertEquals('7英吋', parts[3].value);
+assertEquals('element', parts[3].type);
diff --git a/deps/v8/test/intl/list-format/resolved-options.js b/deps/v8/test/intl/list-format/resolved-options.js
new file mode 100644
index 0000000000..270eb33e45
--- /dev/null
+++ b/deps/v8/test/intl/list-format/resolved-options.js
@@ -0,0 +1,155 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-list-format
+
+let listFormat = new Intl.ListFormat();
+// The default style is 'long'
+assertEquals('long', listFormat.resolvedOptions().style);
+
+// The default type is 'conjunction'
+assertEquals('conjunction', listFormat.resolvedOptions().type);
+
+assertEquals(
+ 'short',
+ (new Intl.ListFormat(['sr'], {style: 'short'}))
+ .resolvedOptions().style);
+
+assertEquals(
+ 'conjunction',
+ (new Intl.ListFormat(['sr'], {style: 'short'}))
+ .resolvedOptions().type);
+
+assertEquals(
+ 'narrow',
+ (new Intl.ListFormat(['sr'], {style: 'narrow'}))
+ .resolvedOptions().style);
+
+assertEquals(
+ 'conjunction',
+ (new Intl.ListFormat(['sr'], {style: 'narrow'}))
+ .resolvedOptions().type);
+
+assertEquals(
+ 'long',
+ (new Intl.ListFormat(['sr'], {style: 'long'}))
+ .resolvedOptions().style);
+
+assertEquals(
+ 'conjunction',
+ (new Intl.ListFormat(['sr'], {style: 'long'}))
+ .resolvedOptions().type);
+
+assertEquals(
+ 'conjunction',
+ (new Intl.ListFormat(['sr'], {type: 'conjunction'}))
+ .resolvedOptions().type);
+
+assertEquals(
+ 'long',
+ (new Intl.ListFormat(['sr'], {type: 'conjunction'}))
+ .resolvedOptions().style);
+
+assertEquals(
+ 'disjunction',
+ (new Intl.ListFormat(['sr'], {type: 'disjunction'}))
+ .resolvedOptions().type);
+
+assertEquals(
+ 'long',
+ (new Intl.ListFormat(['sr'], {type: 'disjunction'}))
+ .resolvedOptions().style);
+
+assertEquals(
+ 'unit',
+ (new Intl.ListFormat(['sr'], {type: 'unit'}))
+ .resolvedOptions().type);
+
+assertEquals(
+ 'long',
+ (new Intl.ListFormat(['sr'], {type: 'unit'}))
+ .resolvedOptions().style);
+
+assertEquals(
+ 'disjunction',
+ (new Intl.ListFormat(['sr'], {style: 'long', type: 'disjunction'}))
+ .resolvedOptions().type);
+
+assertEquals(
+ 'long',
+ (new Intl.ListFormat(['sr'], {style: 'long', type: 'disjunction'}))
+ .resolvedOptions().style);
+
+assertEquals(
+ 'disjunction',
+ (new Intl.ListFormat(['sr'], {style: 'short', type: 'disjunction'}))
+ .resolvedOptions().type);
+
+assertEquals(
+ 'short',
+ (new Intl.ListFormat(['sr'], {style: 'short', type: 'disjunction'}))
+ .resolvedOptions().style);
+
+assertEquals(
+ 'disjunction',
+ (new Intl.ListFormat(['sr'], {style: 'narrow', type: 'disjunction'}))
+ .resolvedOptions().type);
+
+assertEquals(
+ 'narrow',
+ (new Intl.ListFormat(['sr'], {style: 'narrow', type: 'disjunction'}))
+ .resolvedOptions().style);
+
+assertEquals(
+ 'unit',
+ (new Intl.ListFormat(['sr'], {style: 'long', type: 'unit'}))
+ .resolvedOptions().type);
+
+assertEquals(
+ 'long',
+ (new Intl.ListFormat(['sr'], {style: 'long', type: 'unit'}))
+ .resolvedOptions().style);
+
+assertEquals(
+ 'unit',
+ (new Intl.ListFormat(['sr'], {style: 'short', type: 'unit'}))
+ .resolvedOptions().type);
+
+assertEquals(
+ 'short',
+ (new Intl.ListFormat(['sr'], {style: 'short', type: 'unit'}))
+ .resolvedOptions().style);
+
+assertEquals(
+ 'unit',
+ (new Intl.ListFormat(['sr'], {style: 'narrow', type: 'unit'}))
+ .resolvedOptions().type);
+
+assertEquals(
+ 'narrow',
+ (new Intl.ListFormat(['sr'], {style: 'narrow', type: 'unit'}))
+ .resolvedOptions().style);
+
+assertEquals(
+ 'ar',
+ (new Intl.ListFormat(['ar'])).resolvedOptions().locale);
+
+assertEquals(
+ 'ar',
+ (new Intl.ListFormat(['ar', 'en'])).resolvedOptions().locale);
+
+assertEquals(
+ 'fr',
+ (new Intl.ListFormat(['fr', 'en'])).resolvedOptions().locale);
+
+assertEquals(
+ 'ar',
+ (new Intl.ListFormat(['xyz', 'ar'])).resolvedOptions().locale);
+
+// The following is not working yet because it depend on the getAvailableLocales
+// work in another path set.
+// TODO(ftang): uncomment the following once that patchset is checked in.
+// assertEquals(
+// 'ar',
+// (new Intl.ListFormat(['i-default', 'ar'])).resolvedOptions().locale);
diff --git a/deps/v8/test/intl/locale/locale-constructor.js b/deps/v8/test/intl/locale/locale-constructor.js
index 170cc81c36..3da9e291be 100644
--- a/deps/v8/test/intl/locale/locale-constructor.js
+++ b/deps/v8/test/intl/locale/locale-constructor.js
@@ -9,6 +9,11 @@ assertThrows(() => Intl.Locale('sr'), TypeError);
// Non-string locale.
assertThrows(() => new Intl.Locale(5), TypeError);
+assertThrows(() => new Intl.Locale(Symbol()), TypeError);
+assertThrows(() => new Intl.Locale(null), TypeError);
+assertThrows(() => new Intl.Locale(undefined), TypeError);
+assertThrows(() => new Intl.Locale(false), TypeError);
+assertThrows(() => new Intl.Locale(true), TypeError);
// Invalid locale string.
assertThrows(() => new Intl.Locale('abcdefghi'), RangeError);
diff --git a/deps/v8/test/intl/locale/maximize_minimize.js b/deps/v8/test/intl/locale/maximize_minimize.js
new file mode 100644
index 0000000000..823a6670e3
--- /dev/null
+++ b/deps/v8/test/intl/locale/maximize_minimize.js
@@ -0,0 +1,138 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-locale
+
+// Make sure that maximize and minimize of all locales work reasonbly.
+
+assertEquals(new Intl.Locale("zh-TW").maximize().toString(), "zh-Hant-TW",
+ "zh-TW should maximize to zh-Hant-TW");
+assertEquals(new Intl.Locale("zh-Hant-TW").minimize().toString(), "zh-TW",
+ "zh-Hant-TW should minimize to zh-TW");
+assertEquals(new Intl.Locale("zh-Hans-CN").minimize().toString(), "zh",
+ "zh-Hans-CN should minimize to zh");
+assertEquals(new Intl.Locale("zh-CN").minimize().toString(), "zh",
+ "zh-CN should minimize to zh");
+assertEquals(new Intl.Locale("zh-Hans").minimize().toString(), "zh",
+ "zh-Hans should minimize to zh");
+
+function assertExpandRoundTrip(loc) {
+ assertEquals(
+ loc.toString(), loc.maximize().minimize().toString(), loc.toString());
+ assertEquals(
+ loc.toString(), loc.minimize().toString(), loc.toString());
+ assertTrue(
+ loc.maximize().toString().length > loc.toString().length, loc.toString());
+}
+
+var simpleLocales = [
+ "af", "agq", "ak", "am", "ar", "asa", "ast", "as", "az", "bas", "bem", "be",
+ "bez", "bg", "bm", "bn", "bo", "br", "brx", "bs", "ca", "ccp", "ce", "cgg",
+ "chr", "ckb", "cs", "cu", "cy", "dav", "da", "de", "dje", "dsb", "dua", "dyo",
+ "dz", "ebu", "ee", "el", "en", "eo", "es", "et", "eu", "ewo", "fa", "ff",
+ "fil", "fi", "fo", "fr", "fur", "fy", "ga", "gd", "gl", "gsw", "gu", "guz",
+ "gv", "haw", "ha", "he", "hi", "hr", "hsb", "hu", "hy", "id", "ig", "ii",
+ "is", "it", "ja", "jgo", "jmc", "kab", "kam", "ka", "kde", "kea", "khq", "ki",
+ "kkj", "kk", "kln", "kl", "km", "kn", "kok", "ko", "ksb", "ksf", "ksh", "ks",
+ "kw", "ky", "lag", "lb", "lg", "lkt", "ln", "lo", "lrc", "lt", "luo", "lu",
+ "luy", "lv", "mas", "mer", "mfe", "mgh", "mgo", "mg", "mk", "ml", "mn", "mr",
+ "ms", "mt", "mua", "my", "mzn", "naq", "nb", "nds", "nd", "ne", "nl", "nmg",
+ "nnh", "nn", "nus", "nyn", "om", "or", "os", "pa", "pl", "prg", "ps", "pt",
+ "qu", "rm", "rn", "rof", "ro", "ru", "rwk", "rw", "sah", "saq", "sbp", "sd",
+ "seh", "ses", "se", "sg", "shi", "si", "sk", "sl", "smn", "sn", "so", "sq",
+ "sr", "sv", "sw", "ta", "teo", "te", "tg", "th", "ti", "tk", "to", "tr", "tt",
+ "twq", "tzm", "ug", "uk", "ur", "uz", "vai", "vi", "vo", "vun", "wae", "wo",
+ "xog", "yav", "yi", "yo", "yue", "zgh", "zh", "zu"];
+for (var i = 0; i < simpleLocales.length; i++) {
+ assertExpandRoundTrip(new Intl.Locale(simpleLocales[i]));
+}
+
+function assertReduceRoundTrip(loc) {
+ assertEquals(
+ loc.minimize().toString(), loc.maximize().minimize().toString(),
+ loc.toString());
+ assertEquals(
+ loc.maximize().toString(), loc.minimize().maximize().toString(),
+ loc.toString());
+ assertTrue(
+ loc.maximize().toString().length >= loc.toString().length, loc.toString());
+ assertTrue(
+ loc.minimize().toString().length <= loc.toString().length, loc.toString());
+}
+
+var complexLocales = [
+ "af-NA", "af-ZA", "agq-CM", "ak-GH", "am-ET", "ar-001", "ar-AE", "ar-BH",
+ "ar-DJ", "ar-DZ", "ar-EG", "ar-EH", "ar-ER", "ar-IL", "ar-IQ", "ar-JO",
+ "ar-KM", "ar-KW", "ar-LB", "ar-LY", "ar-MA", "ar-MR", "ar-OM", "ar-PS",
+ "ar-QA", "ar-SA", "ar-SD", "ar-SO", "ar-SS", "ar-SY", "ar-TD", "ar-TN",
+ "ar-YE", "asa-TZ", "as-IN", "ast-ES", "az-Cyrl-AZ", "az-Cyrl", "az-Latn-AZ",
+ "az-Latn", "bas-CM", "be-BY", "bem-ZM", "bez-TZ", "bg-BG", "bm-ML", "bn-BD",
+ "bn-IN", "bo-CN", "bo-IN", "br-FR", "brx-IN", "bs-Cyrl-BA", "bs-Cyrl",
+ "bs-Latn-BA", "bs-Latn", "ca-AD", "ca-ES", "ca-FR", "ca-IT",
+ "ccp-BD", "ccp-IN", "ce-RU", "cgg-UG", "chr-US", "ckb-Arab-IQ", "ckb-Arab-IR",
+ "ckb-Arab", "ckb-IQ", "ckb-IR", "ckb-Latn-IQ", "ckb-Latn", "cs-CZ", "cu-RU",
+ "cy-GB", "da-DK", "da-GL", "dav-KE", "de-AT", "de-BE", "de-CH", "de-DE",
+ "de-IT", "de-LI", "de-LU", "dje-NE", "dsb-DE", "dua-CM", "dyo-SN", "dz-BT",
+ "ebu-KE", "ee-GH", "ee-TG", "el-CY", "el-GR", "en-001", "en-150", "en-AG",
+ "en-AI", "en-AS", "en-AT", "en-AU", "en-BB", "en-BE", "en-BI", "en-BM",
+ "en-BS", "en-BW", "en-BZ", "en-CA", "en-CC", "en-CH", "en-CK", "en-CM",
+ "en-CX", "en-CY", "en-DE", "en-DG", "en-DK", "en-DM", "en-ER", "en-FI",
+ "en-FJ", "en-FK", "en-FM", "en-GB", "en-GD", "en-GG", "en-GH", "en-GI",
+ "en-GM", "en-GU", "en-GY", "en-HK", "en-IE", "en-IL", "en-IM", "en-IN",
+ "en-IO", "en-JE", "en-JM", "en-KE", "en-KI", "en-KN", "en-KY", "en-LC",
+ "en-LR", "en-LS", "en-MG", "en-MH", "en-MO", "en-MP", "en-MS", "en-MT",
+ "en-MU", "en-MW", "en-MY", "en-NA", "en-NF", "en-NG", "en-NL", "en-NR",
+ "en-NU", "en-NZ", "en-PG", "en-PH", "en-PK", "en-PN", "en-PR", "en-PW",
+ "en-RW", "en-SB", "en-SC", "en-SD", "en-SE", "en-SG", "en-SH", "en-SI",
+ "en-SL", "en-SS", "en-SX", "en-SZ", "en-TC", "en-TK", "en-TO", "en-TT",
+ "en-TV", "en-TZ", "en-UG", "en-UM", "en-US", "en-VC",
+ "en-VG", "en-VI", "en-VU", "en-WS", "en-ZA", "en-ZM", "en-ZW", "eo-001",
+ "es-419", "es-AR", "es-BO", "es-BR", "es-BZ", "es-CL", "es-CO", "es-CR",
+ "es-CU", "es-DO", "es-EA", "es-EC", "es-ES", "es-GQ", "es-GT", "es-HN",
+ "es-IC", "es-MX", "es-NI", "es-PA", "es-PE", "es-PH", "es-PR", "es-PY",
+ "es-SV", "es-US", "es-UY", "es-VE", "et-EE", "eu-ES", "ewo-CM", "fa-AF",
+ "fa-IR", "ff-CM", "ff-GN", "ff-MR", "ff-SN", "fi-FI", "fil-PH", "fo-DK",
+ "fo-FO", "fr-BE", "fr-BF", "fr-BI", "fr-BJ", "fr-BL", "fr-CA", "fr-CD",
+ "fr-CF", "fr-CG", "fr-CH", "fr-CI", "fr-CM", "fr-DJ", "fr-DZ", "fr-FR",
+ "fr-GA", "fr-GF", "fr-GN", "fr-GP", "fr-GQ", "fr-HT", "fr-KM", "fr-LU",
+ "fr-MA", "fr-MC", "fr-MF", "fr-MG", "fr-ML", "fr-MQ", "fr-MR", "fr-MU",
+ "fr-NC", "fr-NE", "fr-PF", "fr-PM", "fr-RE", "fr-RW", "fr-SC", "fr-SN",
+ "fr-SY", "fr-TD", "fr-TG", "fr-TN", "fr-VU", "fr-WF", "fr-YT", "fur-IT",
+ "fy-NL", "ga-IE", "gd-GB", "gl-ES", "gsw-CH", "gsw-FR", "gsw-LI", "gu-IN",
+ "guz-KE", "gv-IM", "ha-GH", "ha-NE", "ha-NG", "haw-US", "he-IL", "hi-IN",
+ "hr-BA", "hr-HR", "hsb-DE", "hu-HU", "hy-AM", "id-ID", "ig-NG", "ii-CN",
+ "is-IS", "it-CH", "it-IT", "it-SM", "it-VA", "ja-JP", "jgo-CM", "jmc-TZ",
+ "kab-DZ", "ka-GE", "kam-KE", "kde-TZ", "kea-CV", "khq-ML", "ki-KE",
+ "kkj-CM", "kk-KZ", "kl-GL", "kln-KE", "km-KH", "kn-IN", "kok-IN", "ko-KP",
+ "ko-KR", "ksb-TZ", "ksf-CM", "ksh-DE", "ks-IN", "kw-GB", "ky-KG", "lag-TZ",
+ "lb-LU", "lg-UG", "lkt-US", "ln-AO", "ln-CD", "ln-CF", "ln-CG", "lo-LA",
+ "lrc-IQ", "lrc-IR", "lt-LT", "lu-CD", "luo-KE", "luy-KE", "lv-LV", "mas-KE",
+ "mas-TZ", "mer-KE", "mfe-MU", "mgh-MZ", "mg-MG", "mgo-CM", "mk-MK", "ml-IN",
+ "mn-MN", "mr-IN", "ms-BN", "ms-MY", "ms-SG", "mt-MT", "mua-CM", "my-MM",
+ "mzn-IR", "naq-NA", "nb-NO", "nb-SJ", "nds-DE", "nds-NL", "nd-ZW", "ne-IN",
+ "ne-NP", "nl-AW", "nl-BE", "nl-BQ", "nl-CW", "nl-NL", "nl-SR", "nl-SX",
+ "nmg-CM", "nnh-CM", "nn-NO", "nus-SS", "nyn-UG", "om-ET", "om-KE",
+ "or-IN", "os-GE", "os-RU", "pa-Arab-PK", "pa-Guru-IN", "pa-Guru",
+ "pl-PL", "prg-001", "ps-AF", "pt-AO", "pt-BR", "pt-CH", "pt-CV", "pt-GQ",
+ "pt-GW", "pt-LU", "pt-MO", "pt-MZ", "pt-PT", "pt-ST", "pt-TL", "qu-BO",
+ "qu-EC", "qu-PE", "rm-CH", "rn-BI", "rof-TZ", "ro-MD", "ro-RO", "ru-BY",
+ "ru-KG", "ru-KZ", "ru-MD", "ru-RU", "ru-UA", "rwk-TZ", "rw-RW", "sah-RU",
+ "saq-KE", "sbp-TZ", "sd-PK", "se-FI", "seh-MZ", "se-NO", "se-SE", "ses-ML",
+ "sg-CF", "shi-Latn-MA", "shi-Latn", "shi-Tfng-MA", "shi-Tfng", "si-LK",
+ "sk-SK", "sl-SI", "smn-FI", "sn-ZW", "so-DJ", "so-ET", "so-KE", "so-SO",
+ "sq-AL", "sq-MK", "sq-XK", "sr-Cyrl-BA", "sr-Cyrl-ME", "sr-Cyrl-RS",
+ "sr-Cyrl-XK", "sr-Cyrl", "sr-Latn-BA", "sr-Latn-ME", "sr-Latn-RS",
+ "sr-Latn-XK", "sr-Latn", "sv-AX", "sv-FI", "sv-SE", "sw-CD", "sw-KE",
+ "sw-TZ", "sw-UG", "ta-IN", "ta-LK", "ta-MY", "ta-SG", "te-IN", "teo-KE",
+ "teo-UG", "tg-TJ", "th-TH", "ti-ER", "ti-ET", "tk-TM", "to-TO", "tr-CY",
+ "tr-TR", "tt-RU", "twq-NE", "tzm-MA", "ug-CN", "uk-UA", "ur-IN", "ur-PK",
+ "uz-Arab-AF", "uz-Cyrl-UZ", "uz-Cyrl", "uz-Latn-UZ", "uz-Latn",
+ "vai-Latn-LR", "vai-Latn", "vai-Vaii-LR", "vai-Vaii", "vi-VN", "vo-001",
+ "vun-TZ", "wae-CH", "wo-SN", "xog-UG", "yav-CM", "yi-001", "yo-BJ", "yo-NG",
+ "yue-Hans-CN", "yue-Hant-HK", "yue-Hant", "zgh-MA", "zh-Hans-CN",
+ "zh-Hans-HK", "zh-Hans-MO", "zh-Hans-SG", "zh-Hans", "zh-Hant-HK",
+ "zh-Hant-MO", "zh-Hant-TW", "zu-ZA"];
+for (var i = 0; i < complexLocales.length; i++) {
+ assertReduceRoundTrip(new Intl.Locale(complexLocales[i]));
+}
diff --git a/deps/v8/src/torque/TorqueBaseListener.cpp b/deps/v8/test/intl/locale/regress-8032.js
index 259494041a..b8219b1b50 100644
--- a/deps/v8/src/torque/TorqueBaseListener.cpp
+++ b/deps/v8/test/intl/locale/regress-8032.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Generated from Torque.g4 by ANTLR 4.7.1
+// Flags: --harmony-locale
-#include "TorqueBaseListener.h"
+assertThrows(() => new Intl.Locale(''), RangeError);
diff --git a/deps/v8/test/intl/number-format/options.js b/deps/v8/test/intl/number-format/options.js
new file mode 100644
index 0000000000..80ddc025d1
--- /dev/null
+++ b/deps/v8/test/intl/number-format/options.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(() => new Intl.NumberFormat('en', null));
+assertDoesNotThrow(() => new Intl.NumberFormat('en', undefined));
+
+for (let key of [false, true, "foo", Symbol, 1]) {
+ assertDoesNotThrow(() => new Intl.NumberFormat('en', key));
+}
+
+assertDoesNotThrow(() => new Intl.NumberFormat('en', {}));
+assertDoesNotThrow(() => new Intl.NumberFormat('en', new Proxy({}, {})));
diff --git a/deps/v8/test/intl/regress-8030.js b/deps/v8/test/intl/regress-8030.js
new file mode 100644
index 0000000000..eac6b84f81
--- /dev/null
+++ b/deps/v8/test/intl/regress-8030.js
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-relative-time-format
+
+var locales = ["tlh", "id", "en"];
+var referenceRelativeTimeFormat = new Intl.RelativeTimeFormat(locales);
+var referenceFormatted = referenceRelativeTimeFormat.format(3, "day");
+
+class MyFormat extends Intl.RelativeTimeFormat {
+ constructor(locales, options) {
+ super(locales, options);
+ // could initialize MyRelativeTimeFormat properties
+ }
+ // could add methods to MyRelativeTimeFormat.prototype
+}
+
+var format = new MyFormat(locales);
+var actual = format.format(3, "day");
+assertEquals(actual, referenceFormatted);
diff --git a/deps/v8/test/intl/regress-8031.js b/deps/v8/test/intl/regress-8031.js
new file mode 100644
index 0000000000..0898026d99
--- /dev/null
+++ b/deps/v8/test/intl/regress-8031.js
@@ -0,0 +1,22 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-list-format
+
+var locales = ["tlh", "id", "en"];
+var input = ["a", "b", "c"];
+var referenceListFormat = new Intl.ListFormat(locales);
+var referenceFormatted = referenceListFormat.format(input);
+
+class MyFormat extends Intl.ListFormat {
+ constructor(locales, options) {
+ super(locales, options);
+ // could initialize MyListFormat properties
+ }
+ // could add methods to MyListFormat.prototype
+}
+
+var format = new MyFormat(locales);
+var actual = format.format(input);
+assertEquals(actual, referenceFormatted);
diff --git a/deps/v8/test/intl/regress-8725514.js b/deps/v8/test/intl/regress-8725514.js
new file mode 100644
index 0000000000..82f884a093
--- /dev/null
+++ b/deps/v8/test/intl/regress-8725514.js
@@ -0,0 +1,10 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Object.prototype.__defineGetter__('x', function () {
+ return -2147483648;
+ });
+
+var f = ["x-u-foo"];
+Intl.NumberFormat(f);
diff --git a/deps/v8/src/torque/TorqueVisitor.cpp b/deps/v8/test/intl/regress-875643.js
index 15104d7a61..2625c8110f 100644
--- a/deps/v8/src/torque/TorqueVisitor.cpp
+++ b/deps/v8/test/intl/regress-875643.js
@@ -2,6 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Generated from Torque.g4 by ANTLR 4.7.1
-
-#include "TorqueVisitor.h"
+new Intl.NumberFormat(undefined, false)
diff --git a/deps/v8/test/intl/relative-time-format/format-en.js b/deps/v8/test/intl/relative-time-format/format-en.js
new file mode 100644
index 0000000000..cd58d65355
--- /dev/null
+++ b/deps/v8/test/intl/relative-time-format/format-en.js
@@ -0,0 +1,502 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-relative-time-format
+
+// The following test are not part of the comformance. Just some output in
+// English to verify the format does return something reasonable for English.
+// It may be changed when we update the CLDR data.
+// NOTE: These are UNSPECIFIED behavior in
+// http://tc39.github.io/proposal-intl-relative-time/
+
+let longAuto = new Intl.RelativeTimeFormat(
+ "en", {style: "long", localeMatcher: 'lookup', numeric: 'auto'});
+
+assertEquals('3 seconds ago', longAuto.format(-3, 'second'));
+assertEquals('2 seconds ago', longAuto.format(-2, 'second'));
+assertEquals('1 second ago', longAuto.format(-1, 'second'));
+assertEquals('now', longAuto.format(0, 'second'));
+assertEquals('now', longAuto.format(-0, 'second'));
+assertEquals('in 1 second', longAuto.format(1, 'second'));
+assertEquals('in 2 seconds', longAuto.format(2, 'second'));
+assertEquals('in 345 seconds', longAuto.format(345, 'second'));
+
+assertEquals('3 minutes ago', longAuto.format(-3, 'minute'));
+assertEquals('2 minutes ago', longAuto.format(-2, 'minute'));
+assertEquals('1 minute ago', longAuto.format(-1, 'minute'));
+assertEquals('in 0 minutes', longAuto.format(0, 'minute'));
+assertEquals('0 minutes ago', longAuto.format(-0, 'minute'));
+assertEquals('in 1 minute', longAuto.format(1, 'minute'));
+assertEquals('in 2 minutes', longAuto.format(2, 'minute'));
+assertEquals('in 345 minutes', longAuto.format(345, 'minute'));
+
+assertEquals('3 hours ago', longAuto.format(-3, 'hour'));
+assertEquals('2 hours ago', longAuto.format(-2, 'hour'));
+assertEquals('1 hour ago', longAuto.format(-1, 'hour'));
+assertEquals('in 0 hours', longAuto.format(0, 'hour'));
+assertEquals('0 hours ago', longAuto.format(-0, 'hour'));
+assertEquals('in 1 hour', longAuto.format(1, 'hour'));
+assertEquals('in 2 hours', longAuto.format(2, 'hour'));
+assertEquals('in 345 hours', longAuto.format(345, 'hour'));
+
+assertEquals('3 days ago', longAuto.format(-3, 'day'));
+assertEquals('2 days ago', longAuto.format(-2, 'day'));
+assertEquals('yesterday', longAuto.format(-1, 'day'));
+assertEquals('today', longAuto.format(0, 'day'));
+assertEquals('today', longAuto.format(-0, 'day'));
+assertEquals('tomorrow', longAuto.format(1, 'day'));
+assertEquals('in 2 days', longAuto.format(2, 'day'));
+assertEquals('in 345 days', longAuto.format(345, 'day'));
+
+assertEquals('3 weeks ago', longAuto.format(-3, 'week'));
+assertEquals('2 weeks ago', longAuto.format(-2, 'week'));
+assertEquals('last week', longAuto.format(-1, 'week'));
+assertEquals('this week', longAuto.format(0, 'week'));
+assertEquals('this week', longAuto.format(-0, 'week'));
+assertEquals('next week', longAuto.format(1, 'week'));
+assertEquals('in 2 weeks', longAuto.format(2, 'week'));
+assertEquals('in 345 weeks', longAuto.format(345, 'week'));
+
+assertEquals('3 months ago', longAuto.format(-3, 'month'));
+assertEquals('2 months ago', longAuto.format(-2, 'month'));
+assertEquals('last month', longAuto.format(-1, 'month'));
+assertEquals('this month', longAuto.format(0, 'month'));
+assertEquals('this month', longAuto.format(-0, 'month'));
+assertEquals('next month', longAuto.format(1, 'month'));
+assertEquals('in 2 months', longAuto.format(2, 'month'));
+assertEquals('in 345 months', longAuto.format(345, 'month'));
+
+// "quarter" is not working in ICU now
+// Tracked by ICU bug in http://bugs.icu-project.org/trac/ticket/12171
+/*
+assertEquals('3 quarters ago', longAuto.format(-3, 'quarter'));
+assertEquals('2 quarters ago', longAuto.format(-2, 'quarter'));
+assertEquals('last quarter', longAuto.format(-1, 'quarter'));
+assertEquals('this quarter', longAuto.format(0, 'quarter'));
+assertEquals('this quarter', longAuto.format(-0, 'quarter'));
+assertEquals('next quarter', longAuto.format(1, 'quarter'));
+assertEquals('in 2 quarters', longAuto.format(2, 'quarter'));
+assertEquals('in 345 quarters', longAuto.format(345, 'quarter'));
+*/
+
+assertEquals('3 years ago', longAuto.format(-3, 'year'));
+assertEquals('2 years ago', longAuto.format(-2, 'year'));
+assertEquals('last year', longAuto.format(-1, 'year'));
+assertEquals('this year', longAuto.format(0, 'year'));
+assertEquals('this year', longAuto.format(-0, 'year'));
+assertEquals('next year', longAuto.format(1, 'year'));
+assertEquals('in 2 years', longAuto.format(2, 'year'));
+assertEquals('in 345 years', longAuto.format(345, 'year'));
+
+let shortAuto = new Intl.RelativeTimeFormat(
+ "en", {style: "short", localeMatcher: 'lookup', numeric: 'auto'});
+
+assertEquals('3 sec. ago', shortAuto.format(-3, 'second'));
+assertEquals('2 sec. ago', shortAuto.format(-2, 'second'));
+assertEquals('1 sec. ago', shortAuto.format(-1, 'second'));
+assertEquals('now', shortAuto.format(0, 'second'));
+assertEquals('now', shortAuto.format(-0, 'second'));
+assertEquals('in 1 sec.', shortAuto.format(1, 'second'));
+assertEquals('in 2 sec.', shortAuto.format(2, 'second'));
+assertEquals('in 345 sec.', shortAuto.format(345, 'second'));
+
+assertEquals('3 min. ago', shortAuto.format(-3, 'minute'));
+assertEquals('2 min. ago', shortAuto.format(-2, 'minute'));
+assertEquals('1 min. ago', shortAuto.format(-1, 'minute'));
+assertEquals('in 0 min.', shortAuto.format(0, 'minute'));
+assertEquals('0 min. ago', shortAuto.format(-0, 'minute'));
+assertEquals('in 1 min.', shortAuto.format(1, 'minute'));
+assertEquals('in 2 min.', shortAuto.format(2, 'minute'));
+assertEquals('in 345 min.', shortAuto.format(345, 'minute'));
+
+assertEquals('3 hr. ago', shortAuto.format(-3, 'hour'));
+assertEquals('2 hr. ago', shortAuto.format(-2, 'hour'));
+assertEquals('1 hr. ago', shortAuto.format(-1, 'hour'));
+assertEquals('in 0 hr.', shortAuto.format(0, 'hour'));
+assertEquals('0 hr. ago', shortAuto.format(-0, 'hour'));
+assertEquals('in 1 hr.', shortAuto.format(1, 'hour'));
+assertEquals('in 2 hr.', shortAuto.format(2, 'hour'));
+assertEquals('in 345 hr.', shortAuto.format(345, 'hour'));
+
+assertEquals('3 days ago', shortAuto.format(-3, 'day'));
+assertEquals('2 days ago', shortAuto.format(-2, 'day'));
+assertEquals('yesterday', shortAuto.format(-1, 'day'));
+assertEquals('today', shortAuto.format(0, 'day'));
+assertEquals('today', shortAuto.format(-0, 'day'));
+assertEquals('tomorrow', shortAuto.format(1, 'day'));
+assertEquals('in 2 days', shortAuto.format(2, 'day'));
+assertEquals('in 345 days', shortAuto.format(345, 'day'));
+
+assertEquals('3 wk. ago', shortAuto.format(-3, 'week'));
+assertEquals('2 wk. ago', shortAuto.format(-2, 'week'));
+assertEquals('last wk.', shortAuto.format(-1, 'week'));
+assertEquals('this wk.', shortAuto.format(0, 'week'));
+assertEquals('this wk.', shortAuto.format(-0, 'week'));
+assertEquals('next wk.', shortAuto.format(1, 'week'));
+assertEquals('in 2 wk.', shortAuto.format(2, 'week'));
+assertEquals('in 345 wk.', shortAuto.format(345, 'week'));
+
+assertEquals('3 mo. ago', shortAuto.format(-3, 'month'));
+assertEquals('2 mo. ago', shortAuto.format(-2, 'month'));
+assertEquals('last mo.', shortAuto.format(-1, 'month'));
+assertEquals('this mo.', shortAuto.format(0, 'month'));
+assertEquals('this mo.', shortAuto.format(-0, 'month'));
+assertEquals('next mo.', shortAuto.format(1, 'month'));
+assertEquals('in 2 mo.', shortAuto.format(2, 'month'));
+assertEquals('in 345 mo.', shortAuto.format(345, 'month'));
+
+// "quarter" is not working in ICU now
+/*
+assertEquals('3 qtrs. ago', shortAuto.format(-3, 'quarter'));
+assertEquals('2 qtrs. ago', shortAuto.format(-2, 'quarter'));
+assertEquals('last qtr.', shortAuto.format(-1, 'quarter'));
+assertEquals('this qtr.', shortAuto.format(0, 'quarter'));
+assertEquals('this qtr.', shortAuto.format(-0, 'quarter'));
+assertEquals('next qtr.', shortAuto.format(1, 'quarter'));
+assertEquals('in 2 qtrs.', shortAuto.format(2, 'quarter'));
+assertEquals('in 345 qtrs.', shortAuto.format(345, 'quarter'));
+*/
+
+assertEquals('3 yr. ago', shortAuto.format(-3, 'year'));
+assertEquals('2 yr. ago', shortAuto.format(-2, 'year'));
+assertEquals('last yr.', shortAuto.format(-1, 'year'));
+assertEquals('this yr.', shortAuto.format(0, 'year'));
+assertEquals('this yr.', shortAuto.format(-0, 'year'));
+assertEquals('next yr.', shortAuto.format(1, 'year'));
+assertEquals('in 2 yr.', shortAuto.format(2, 'year'));
+assertEquals('in 345 yr.', shortAuto.format(345, 'year'));
+
+// Somehow in the 'en' locale, there are no valeu for -narrow
+let narrowAuto = new Intl.RelativeTimeFormat(
+ "en", {style: "narrow", localeMatcher: 'lookup', numeric: 'auto'});
+
+assertEquals('3 sec. ago', narrowAuto.format(-3, 'second'));
+assertEquals('2 sec. ago', narrowAuto.format(-2, 'second'));
+assertEquals('1 sec. ago', narrowAuto.format(-1, 'second'));
+assertEquals('now', narrowAuto.format(0, 'second'));
+assertEquals('now', narrowAuto.format(-0, 'second'));
+assertEquals('in 1 sec.', narrowAuto.format(1, 'second'));
+assertEquals('in 2 sec.', narrowAuto.format(2, 'second'));
+assertEquals('in 345 sec.', narrowAuto.format(345, 'second'));
+
+assertEquals('3 min. ago', narrowAuto.format(-3, 'minute'));
+assertEquals('2 min. ago', narrowAuto.format(-2, 'minute'));
+assertEquals('1 min. ago', narrowAuto.format(-1, 'minute'));
+assertEquals('in 0 min.', narrowAuto.format(0, 'minute'));
+assertEquals('0 min. ago', narrowAuto.format(-0, 'minute'));
+assertEquals('in 1 min.', narrowAuto.format(1, 'minute'));
+assertEquals('in 2 min.', narrowAuto.format(2, 'minute'));
+assertEquals('in 345 min.', narrowAuto.format(345, 'minute'));
+
+assertEquals('3 hr. ago', narrowAuto.format(-3, 'hour'));
+assertEquals('2 hr. ago', narrowAuto.format(-2, 'hour'));
+assertEquals('1 hr. ago', narrowAuto.format(-1, 'hour'));
+assertEquals('in 0 hr.', narrowAuto.format(0, 'hour'));
+assertEquals('0 hr. ago', narrowAuto.format(-0, 'hour'));
+assertEquals('in 1 hr.', narrowAuto.format(1, 'hour'));
+assertEquals('in 2 hr.', narrowAuto.format(2, 'hour'));
+assertEquals('in 345 hr.', narrowAuto.format(345, 'hour'));
+
+assertEquals('3 days ago', narrowAuto.format(-3, 'day'));
+assertEquals('2 days ago', narrowAuto.format(-2, 'day'));
+assertEquals('yesterday', narrowAuto.format(-1, 'day'));
+assertEquals('today', narrowAuto.format(0, 'day'));
+assertEquals('today', narrowAuto.format(-0, 'day'));
+assertEquals('tomorrow', narrowAuto.format(1, 'day'));
+assertEquals('in 2 days', narrowAuto.format(2, 'day'));
+assertEquals('in 345 days', narrowAuto.format(345, 'day'));
+
+assertEquals('3 wk. ago', narrowAuto.format(-3, 'week'));
+assertEquals('2 wk. ago', narrowAuto.format(-2, 'week'));
+assertEquals('last wk.', narrowAuto.format(-1, 'week'));
+assertEquals('this wk.', narrowAuto.format(0, 'week'));
+assertEquals('this wk.', narrowAuto.format(-0, 'week'));
+assertEquals('next wk.', narrowAuto.format(1, 'week'));
+assertEquals('in 2 wk.', narrowAuto.format(2, 'week'));
+assertEquals('in 345 wk.', narrowAuto.format(345, 'week'));
+
+assertEquals('3 mo. ago', narrowAuto.format(-3, 'month'));
+assertEquals('2 mo. ago', narrowAuto.format(-2, 'month'));
+assertEquals('last mo.', narrowAuto.format(-1, 'month'));
+assertEquals('this mo.', narrowAuto.format(0, 'month'));
+assertEquals('this mo.', narrowAuto.format(-0, 'month'));
+assertEquals('next mo.', narrowAuto.format(1, 'month'));
+assertEquals('in 2 mo.', narrowAuto.format(2, 'month'));
+assertEquals('in 345 mo.', narrowAuto.format(345, 'month'));
+
+// "quarter" is not working in ICU now
+/*
+assertEquals('3 qtrs. ago', narrowAuto.format(-3, 'quarter'));
+assertEquals('2 qtrs. ago', narrowAuto.format(-2, 'quarter'));
+assertEquals('last qtr.', narrowAuto.format(-1, 'quarter'));
+assertEquals('this qtr.', narrowAuto.format(0, 'quarter'));
+assertEquals('this qtr.', narrowAuto.format(-0, 'quarter'));
+assertEquals('next qtr.', narrowAuto.format(1, 'quarter'));
+assertEquals('in 2 qtrs.', narrowAuto.format(2, 'quarter'));
+assertEquals('in 345 qtrs.', narrowAuto.format(345, 'quarter'));
+*/
+
+assertEquals('3 yr. ago', narrowAuto.format(-3, 'year'));
+assertEquals('2 yr. ago', narrowAuto.format(-2, 'year'));
+assertEquals('last yr.', narrowAuto.format(-1, 'year'));
+assertEquals('this yr.', narrowAuto.format(0, 'year'));
+assertEquals('this yr.', narrowAuto.format(-0, 'year'));
+assertEquals('next yr.', narrowAuto.format(1, 'year'));
+assertEquals('in 2 yr.', narrowAuto.format(2, 'year'));
+assertEquals('in 345 yr.', narrowAuto.format(345, 'year'));
+
+let longAlways = new Intl.RelativeTimeFormat(
+ "en", {style: "long", localeMatcher: 'lookup', numeric: 'always'});
+
+assertEquals('3 seconds ago', longAlways.format(-3, 'second'));
+assertEquals('2 seconds ago', longAlways.format(-2, 'second'));
+assertEquals('1 second ago', longAlways.format(-1, 'second'));
+assertEquals('in 0 seconds', longAlways.format(0, 'second'));
+assertEquals('0 seconds ago', longAlways.format(-0, 'second'));
+assertEquals('in 1 second', longAlways.format(1, 'second'));
+assertEquals('in 2 seconds', longAlways.format(2, 'second'));
+assertEquals('in 345 seconds', longAlways.format(345, 'second'));
+
+assertEquals('3 minutes ago', longAlways.format(-3, 'minute'));
+assertEquals('2 minutes ago', longAlways.format(-2, 'minute'));
+assertEquals('1 minute ago', longAlways.format(-1, 'minute'));
+assertEquals('in 0 minutes', longAlways.format(0, 'minute'));
+assertEquals('0 minutes ago', longAlways.format(-0, 'minute'));
+assertEquals('in 1 minute', longAlways.format(1, 'minute'));
+assertEquals('in 2 minutes', longAlways.format(2, 'minute'));
+assertEquals('in 345 minutes', longAlways.format(345, 'minute'));
+
+assertEquals('3 hours ago', longAlways.format(-3, 'hour'));
+assertEquals('2 hours ago', longAlways.format(-2, 'hour'));
+assertEquals('1 hour ago', longAlways.format(-1, 'hour'));
+assertEquals('in 0 hours', longAlways.format(0, 'hour'));
+assertEquals('0 hours ago', longAlways.format(-0, 'hour'));
+assertEquals('in 1 hour', longAlways.format(1, 'hour'));
+assertEquals('in 2 hours', longAlways.format(2, 'hour'));
+assertEquals('in 345 hours', longAlways.format(345, 'hour'));
+
+assertEquals('3 days ago', longAlways.format(-3, 'day'));
+assertEquals('2 days ago', longAlways.format(-2, 'day'));
+assertEquals('1 day ago', longAlways.format(-1, 'day'));
+assertEquals('in 0 days', longAlways.format(0, 'day'));
+assertEquals('0 days ago', longAlways.format(-0, 'day'));
+assertEquals('in 1 day', longAlways.format(1, 'day'));
+assertEquals('in 2 days', longAlways.format(2, 'day'));
+assertEquals('in 345 days', longAlways.format(345, 'day'));
+
+assertEquals('3 weeks ago', longAlways.format(-3, 'week'));
+assertEquals('2 weeks ago', longAlways.format(-2, 'week'));
+assertEquals('1 week ago', longAlways.format(-1, 'week'));
+assertEquals('in 0 weeks', longAlways.format(0, 'week'));
+assertEquals('0 weeks ago', longAlways.format(-0, 'week'));
+assertEquals('in 1 week', longAlways.format(1, 'week'));
+assertEquals('in 2 weeks', longAlways.format(2, 'week'));
+assertEquals('in 345 weeks', longAlways.format(345, 'week'));
+
+assertEquals('3 months ago', longAlways.format(-3, 'month'));
+assertEquals('2 months ago', longAlways.format(-2, 'month'));
+assertEquals('1 month ago', longAlways.format(-1, 'month'));
+assertEquals('in 0 months', longAlways.format(0, 'month'));
+assertEquals('0 months ago', longAlways.format(-0, 'month'));
+assertEquals('in 1 month', longAlways.format(1, 'month'));
+assertEquals('in 2 months', longAlways.format(2, 'month'));
+assertEquals('in 345 months', longAlways.format(345, 'month'));
+
+// "quarter" is not working in ICU now
+/*
+assertEquals('3 quarters ago', longAlways.format(-3, 'quarter'));
+assertEquals('2 quarters ago', longAlways.format(-2, 'quarter'));
+assertEquals('1 quarter ago', longAlways.format(-1, 'quarter'));
+assertEquals('in 0 quarters', longAlways.format(0, 'quarter'));
+assertEquals('0 quarters ago', longAlways.format(-0, 'quarter'));
+assertEquals('in 1 quarter', longAlways.format(1, 'quarter'));
+assertEquals('in 2 quarters', longAlways.format(2, 'quarter'));
+assertEquals('in 345 quarters', longAlways.format(345, 'quarter'));
+*/
+
+assertEquals('3 years ago', longAlways.format(-3, 'year'));
+assertEquals('2 years ago', longAlways.format(-2, 'year'));
+assertEquals('1 year ago', longAlways.format(-1, 'year'));
+assertEquals('in 0 years', longAlways.format(0, 'year'));
+assertEquals('0 years ago', longAlways.format(-0, 'year'));
+assertEquals('in 1 year', longAlways.format(1, 'year'));
+assertEquals('in 2 years', longAlways.format(2, 'year'));
+assertEquals('in 345 years', longAlways.format(345, 'year'));
+
+let shortAlways = new Intl.RelativeTimeFormat(
+ "en", {style: "short", localeMatcher: 'lookup', numeric: 'always'});
+
+assertEquals('3 sec. ago', shortAlways.format(-3, 'second'));
+assertEquals('2 sec. ago', shortAlways.format(-2, 'second'));
+assertEquals('1 sec. ago', shortAlways.format(-1, 'second'));
+assertEquals('in 0 sec.', shortAlways.format(0, 'second'));
+assertEquals('0 sec. ago', shortAlways.format(-0, 'second'));
+assertEquals('in 1 sec.', shortAlways.format(1, 'second'));
+assertEquals('in 2 sec.', shortAlways.format(2, 'second'));
+assertEquals('in 345 sec.', shortAlways.format(345, 'second'));
+
+assertEquals('3 min. ago', shortAlways.format(-3, 'minute'));
+assertEquals('2 min. ago', shortAlways.format(-2, 'minute'));
+assertEquals('1 min. ago', shortAlways.format(-1, 'minute'));
+assertEquals('in 0 min.', shortAlways.format(0, 'minute'));
+assertEquals('0 min. ago', shortAlways.format(-0, 'minute'));
+assertEquals('in 1 min.', shortAlways.format(1, 'minute'));
+assertEquals('in 2 min.', shortAlways.format(2, 'minute'));
+assertEquals('in 345 min.', shortAlways.format(345, 'minute'));
+
+assertEquals('3 hr. ago', shortAlways.format(-3, 'hour'));
+assertEquals('2 hr. ago', shortAlways.format(-2, 'hour'));
+assertEquals('1 hr. ago', shortAlways.format(-1, 'hour'));
+assertEquals('in 0 hr.', shortAlways.format(0, 'hour'));
+assertEquals('0 hr. ago', shortAlways.format(-0, 'hour'));
+assertEquals('in 1 hr.', shortAlways.format(1, 'hour'));
+assertEquals('in 2 hr.', shortAlways.format(2, 'hour'));
+assertEquals('in 345 hr.', shortAlways.format(345, 'hour'));
+
+assertEquals('3 days ago', shortAlways.format(-3, 'day'));
+assertEquals('2 days ago', shortAlways.format(-2, 'day'));
+assertEquals('1 day ago', shortAlways.format(-1, 'day'));
+assertEquals('in 0 days', shortAlways.format(0, 'day'));
+assertEquals('0 days ago', shortAlways.format(-0, 'day'));
+assertEquals('in 1 day', shortAlways.format(1, 'day'));
+assertEquals('in 2 days', shortAlways.format(2, 'day'));
+assertEquals('in 345 days', shortAlways.format(345, 'day'));
+
+assertEquals('3 wk. ago', shortAlways.format(-3, 'week'));
+assertEquals('2 wk. ago', shortAlways.format(-2, 'week'));
+assertEquals('1 wk. ago', shortAlways.format(-1, 'week'));
+assertEquals('in 0 wk.', shortAlways.format(0, 'week'));
+assertEquals('0 wk. ago', shortAlways.format(-0, 'week'));
+assertEquals('in 1 wk.', shortAlways.format(1, 'week'));
+assertEquals('in 2 wk.', shortAlways.format(2, 'week'));
+assertEquals('in 345 wk.', shortAlways.format(345, 'week'));
+
+assertEquals('3 mo. ago', shortAlways.format(-3, 'month'));
+assertEquals('2 mo. ago', shortAlways.format(-2, 'month'));
+assertEquals('1 mo. ago', shortAlways.format(-1, 'month'));
+assertEquals('in 0 mo.', shortAlways.format(0, 'month'));
+assertEquals('0 mo. ago', shortAlways.format(-0, 'month'));
+assertEquals('in 1 mo.', shortAlways.format(1, 'month'));
+assertEquals('in 2 mo.', shortAlways.format(2, 'month'));
+assertEquals('in 345 mo.', shortAlways.format(345, 'month'));
+
+// "quarter" is not working in ICU now
+/*
+assertEquals('3 qtrs. ago', shortAlways.format(-3, 'quarter'));
+assertEquals('2 qtrs. ago', shortAlways.format(-2, 'quarter'));
+assertEquals('1 qtr. ago', shortAlways.format(-1, 'quarter'));
+assertEquals('in 0 qtrs.', shortAlways.format(0, 'quarter'));
+assertEquals('0 qtr. ago', shortAlways.format(-0, 'quarter'));
+assertEquals('in 1 qtr.', shortAlways.format(1, 'quarter'));
+assertEquals('in 2 qtrs.', shortAlways.format(2, 'quarter'));
+assertEquals('in 345 qtrs.', shortAlways.format(345, 'quarter'));
+*/
+
+assertEquals('3 yr. ago', shortAlways.format(-3, 'year'));
+assertEquals('2 yr. ago', shortAlways.format(-2, 'year'));
+assertEquals('1 yr. ago', shortAlways.format(-1, 'year'));
+assertEquals('in 0 yr.', shortAlways.format(0, 'year'));
+assertEquals('0 yr. ago', shortAlways.format(-0, 'year'));
+assertEquals('in 1 yr.', shortAlways.format(1, 'year'));
+assertEquals('in 2 yr.', shortAlways.format(2, 'year'));
+assertEquals('in 345 yr.', shortAlways.format(345, 'year'));
+
+// Somehow in the 'en' locale, there are no valeu for -narrow
+let narrowAlways = new Intl.RelativeTimeFormat(
+ "en", {style: "narrow", localeMatcher: 'lookup', numeric: 'always'});
+
+assertEquals('3 sec. ago', narrowAlways.format(-3, 'second'));
+assertEquals('2 sec. ago', narrowAlways.format(-2, 'second'));
+assertEquals('1 sec. ago', narrowAlways.format(-1, 'second'));
+assertEquals('in 0 sec.', narrowAlways.format(0, 'second'));
+assertEquals('0 sec. ago', narrowAlways.format(-0, 'second'));
+assertEquals('in 1 sec.', narrowAlways.format(1, 'second'));
+assertEquals('in 2 sec.', narrowAlways.format(2, 'second'));
+assertEquals('in 345 sec.', narrowAlways.format(345, 'second'));
+
+assertEquals('3 min. ago', narrowAlways.format(-3, 'minute'));
+assertEquals('2 min. ago', narrowAlways.format(-2, 'minute'));
+assertEquals('1 min. ago', narrowAlways.format(-1, 'minute'));
+assertEquals('in 0 min.', narrowAlways.format(0, 'minute'));
+assertEquals('0 min. ago', narrowAlways.format(-0, 'minute'));
+assertEquals('in 1 min.', narrowAlways.format(1, 'minute'));
+assertEquals('in 2 min.', narrowAlways.format(2, 'minute'));
+assertEquals('in 345 min.', narrowAlways.format(345, 'minute'));
+
+assertEquals('3 hr. ago', narrowAlways.format(-3, 'hour'));
+assertEquals('2 hr. ago', narrowAlways.format(-2, 'hour'));
+assertEquals('1 hr. ago', narrowAlways.format(-1, 'hour'));
+assertEquals('in 0 hr.', narrowAlways.format(0, 'hour'));
+assertEquals('0 hr. ago', narrowAlways.format(-0, 'hour'));
+assertEquals('in 1 hr.', narrowAlways.format(1, 'hour'));
+assertEquals('in 2 hr.', narrowAlways.format(2, 'hour'));
+assertEquals('in 345 hr.', narrowAlways.format(345, 'hour'));
+
+assertEquals('3 days ago', narrowAlways.format(-3, 'day'));
+assertEquals('2 days ago', narrowAlways.format(-2, 'day'));
+assertEquals('1 day ago', narrowAlways.format(-1, 'day'));
+assertEquals('in 0 days', narrowAlways.format(0, 'day'));
+assertEquals('0 days ago', narrowAlways.format(-0, 'day'));
+assertEquals('in 1 day', narrowAlways.format(1, 'day'));
+assertEquals('in 2 days', narrowAlways.format(2, 'day'));
+assertEquals('in 345 days', narrowAlways.format(345, 'day'));
+
+assertEquals('3 wk. ago', narrowAlways.format(-3, 'week'));
+assertEquals('2 wk. ago', narrowAlways.format(-2, 'week'));
+assertEquals('1 wk. ago', narrowAlways.format(-1, 'week'));
+assertEquals('in 0 wk.', narrowAlways.format(0, 'week'));
+assertEquals('0 wk. ago', narrowAlways.format(-0, 'week'));
+assertEquals('in 1 wk.', narrowAlways.format(1, 'week'));
+assertEquals('in 2 wk.', narrowAlways.format(2, 'week'));
+assertEquals('in 345 wk.', narrowAlways.format(345, 'week'));
+
+assertEquals('3 mo. ago', narrowAlways.format(-3, 'month'));
+assertEquals('2 mo. ago', narrowAlways.format(-2, 'month'));
+assertEquals('1 mo. ago', narrowAlways.format(-1, 'month'));
+assertEquals('in 0 mo.', narrowAlways.format(0, 'month'));
+assertEquals('0 mo. ago', narrowAlways.format(-0, 'month'));
+assertEquals('in 1 mo.', narrowAlways.format(1, 'month'));
+assertEquals('in 2 mo.', narrowAlways.format(2, 'month'));
+assertEquals('in 345 mo.', narrowAlways.format(345, 'month'));
+
+// "quarter" is not working in ICU now
+/*
+assertEquals('3 qtrs. ago', narrowAlways.format(-3, 'quarter'));
+assertEquals('2 qtrs. ago', narrowAlways.format(-2, 'quarter'));
+assertEquals('1 qtr. ago', narrowAlways.format(-1, 'quarter'));
+assertEquals('in 0 qtrs.', narrowAlways.format(0, 'quarter'));
+assertEquals('0 qtr. ago', narrowAlways.format(-0, 'quarter'));
+assertEquals('in 1 qtr.', narrowAlways.format(1, 'quarter'));
+assertEquals('in 2 qtrs.', narrowAlways.format(2, 'quarter'));
+assertEquals('in 345 qtrs.', narrowAlways.format(345, 'quarter'));
+*/
+
+assertEquals('3 yr. ago', narrowAlways.format(-3, 'year'));
+assertEquals('2 yr. ago', narrowAlways.format(-2, 'year'));
+assertEquals('1 yr. ago', narrowAlways.format(-1, 'year'));
+assertEquals('in 0 yr.', narrowAlways.format(0, 'year'));
+assertEquals('0 yr. ago', narrowAlways.format(-0, 'year'));
+assertEquals('in 1 yr.', narrowAlways.format(1, 'year'));
+assertEquals('in 2 yr.', narrowAlways.format(2, 'year'));
+assertEquals('in 345 yr.', narrowAlways.format(345, 'year'));
+
+var styleNumericCombinations = [
+ longAuto, shortAuto, narrowAuto, longAlways,
+ shortAlways, narrowAlways ];
+var validUnits = [
+ 'second', 'minute', 'hour', 'day', 'week', 'month', 'quarter', 'year'];
+
+// Test these all throw RangeError
+for (var i = 0; i < styleNumericCombinations.length; i++) {
+ for (var j = 0; j < validUnits.length; j++) {
+ assertThrows(() => styleNumericCombinations[i].format(NaN, validUnits[j]),
+ RangeError);
+ assertThrows(() => styleNumericCombinations[i].format(NaN, validUnits[j] + 's'),
+ RangeError);
+ assertThrows(() => styleNumericCombinations[i].format(NaN, validUnits[j]),
+ RangeError);
+ assertThrows(() => styleNumericCombinations[i].format(NaN, validUnits[j] + 's'),
+ RangeError);
+ }
+}
diff --git a/deps/v8/test/intl/relative-time-format/format-to-parts-en.js b/deps/v8/test/intl/relative-time-format/format-to-parts-en.js
new file mode 100644
index 0000000000..52a0b885d7
--- /dev/null
+++ b/deps/v8/test/intl/relative-time-format/format-to-parts-en.js
@@ -0,0 +1,68 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-relative-time-format
+
+// The following test are not part of the comformance. Just some output in
+// English to verify the format does return something reasonable for English.
+// It may be changed when we update the CLDR data.
+// NOTE: These are UNSPECIFIED behavior in
+// http://tc39.github.io/proposal-intl-relative-time/
+
+// From Sample code in https://github.com/tc39/proposal-intl-relative-time#intlrelativetimeformatprototypeformattopartsvalue-unit
+// // Format relative time using the day unit.
+// rtf.formatToParts(-1, "day");
+// // > [{ type: "literal", value: "yesterday"}]
+let longAuto = new Intl.RelativeTimeFormat(
+ "en", {style: "long", localeMatcher: 'lookup', numeric: 'auto'});
+var parts = longAuto.formatToParts(-1, "day");
+assertEquals(1, parts.length);
+assertEquals(2, Object.getOwnPropertyNames(parts[0]).length);
+assertEquals('literal', parts[0].type);
+assertEquals('yesterday', parts[0].value);
+
+// From Sample code in https://github.com/tc39/proposal-intl-relative-time#intlrelativetimeformatprototypeformattopartsvalue-unit
+// rtf.formatToParts(100, "day");
+// // > [{ type: "literal", value: "in " }, { type: "integer", value: "100", unit: "day" }, { type: "literal", value: " days" }]
+let longAlways = new Intl.RelativeTimeFormat(
+ "en", {style: "long", localeMatcher: 'lookup', numeric: 'always'});
+
+parts = longAlways.formatToParts(100, "day");
+assertEquals(3, parts.length);
+
+assertEquals(2, Object.getOwnPropertyNames(parts[0]).length);
+assertEquals('literal', parts[0].type);
+assertEquals('in ', parts[0].value);
+
+assertEquals(3, Object.getOwnPropertyNames(parts[1]).length);
+assertEquals('integer', parts[1].type);
+assertEquals('100', parts[1].value);
+assertEquals('day', parts[1].unit);
+
+assertEquals(2, Object.getOwnPropertyNames(parts[2]).length);
+assertEquals('literal', parts[2].type);
+assertEquals(' days', parts[2].value);
+
+assertThrows(() => longAlways.format(NaN, 'second'), RangeError);
+assertThrows(() => longAuto.format(NaN, 'second'), RangeError);
+
+parts = longAlways.formatToParts(-10, "day");
+assertEquals(2, parts.length);
+assertEquals(3, Object.getOwnPropertyNames(parts[0]).length);
+assertEquals('integer', parts[0].type);
+assertEquals('10', parts[0].value);
+assertEquals('day', parts[0].unit);
+assertEquals(2, Object.getOwnPropertyNames(parts[1]).length);
+assertEquals('literal', parts[1].type);
+assertEquals(' days ago', parts[1].value);
+
+parts = longAlways.formatToParts(-0, "day");
+assertEquals(2, parts.length);
+assertEquals(3, Object.getOwnPropertyNames(parts[0]).length);
+assertEquals('integer', parts[0].type);
+assertEquals('0', parts[0].value);
+assertEquals('day', parts[0].unit);
+assertEquals(2, Object.getOwnPropertyNames(parts[1]).length);
+assertEquals('literal', parts[1].type);
+assertEquals(' days ago', parts[1].value);
diff --git a/deps/v8/test/intl/relative-time-format/format-to-parts.js b/deps/v8/test/intl/relative-time-format/format-to-parts.js
new file mode 100644
index 0000000000..071c4468c0
--- /dev/null
+++ b/deps/v8/test/intl/relative-time-format/format-to-parts.js
@@ -0,0 +1,82 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-relative-time-format
+
+// Make sure that RelativeTimeFormat exposes all required properties. Those not specified
+// should have undefined value.
+// http://tc39.github.io/proposal-intl-relative-time/
+
+let rtf = new Intl.RelativeTimeFormat();
+
+// Test 1.4.4 Intl.RelativeTimeFormat.prototype.formatToParts( value, unit )
+assertEquals(true, Array.isArray(rtf.formatToParts(-1, 'seconds')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-1, 'second')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-1, 'minutes')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-1, 'minute')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-1, 'hours')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-1, 'hour')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-1, 'days')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-1, 'day')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-1, 'weeks')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-1, 'week')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-1, 'months')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-1, 'month')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-1, 'quarters')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-1, 'quarter')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-1, 'years')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-1, 'year')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-0, 'seconds')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-0, 'second')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-0, 'minutes')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-0, 'minute')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-0, 'hours')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-0, 'hour')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-0, 'days')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-0, 'day')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-0, 'weeks')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-0, 'week')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-0, 'months')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-0, 'month')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-0, 'quarters')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-0, 'quarter')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-0, 'years')));
+assertEquals(true, Array.isArray(rtf.formatToParts(-0, 'year')));
+
+assertThrows(() => rtf.formatToParts(-1, 'decades'), RangeError);
+assertThrows(() => rtf.formatToParts(-1, 'decade'), RangeError);
+assertThrows(() => rtf.formatToParts(-1, 'centuries'), RangeError);
+assertThrows(() => rtf.formatToParts(-1, 'century'), RangeError);
+assertThrows(() => rtf.formatToParts(-1, 'milliseconds'), RangeError);
+assertThrows(() => rtf.formatToParts(-1, 'millisecond'), RangeError);
+assertThrows(() => rtf.formatToParts(-1, 'microseconds'), RangeError);
+assertThrows(() => rtf.formatToParts(-1, 'microsecond'), RangeError);
+assertThrows(() => rtf.formatToParts(-1, 'nanoseconds'), RangeError);
+assertThrows(() => rtf.formatToParts(-1, 'nanosecond'), RangeError);
+
+assertThrows(() => rtf.formatToParts(NaN, 'seconds'), RangeError);
+assertThrows(() => rtf.formatToParts(NaN, 'second'), RangeError);
+assertThrows(() => rtf.formatToParts(NaN, 'minutes'), RangeError);
+assertThrows(() => rtf.formatToParts(NaN, 'minute'), RangeError);
+assertThrows(() => rtf.formatToParts(NaN, 'hours'), RangeError);
+assertThrows(() => rtf.formatToParts(NaN, 'hour'), RangeError);
+assertThrows(() => rtf.formatToParts(NaN, 'days'), RangeError);
+assertThrows(() => rtf.formatToParts(NaN, 'day'), RangeError);
+assertThrows(() => rtf.formatToParts(NaN, 'weeks'), RangeError);
+assertThrows(() => rtf.formatToParts(NaN, 'week'), RangeError);
+assertThrows(() => rtf.formatToParts(NaN, 'months'), RangeError);
+assertThrows(() => rtf.formatToParts(NaN, 'month'), RangeError);
+assertThrows(() => rtf.formatToParts(NaN, 'years'), RangeError);
+assertThrows(() => rtf.formatToParts(NaN, 'year'), RangeError);
+assertThrows(() => rtf.formatToParts(NaN, 'quarters'), RangeError);
+assertThrows(() => rtf.formatToParts(NaN, 'quarter'), RangeError);
+
+assertEquals(true, Array.isArray(rtf.formatToParts(100, 'day')));
+rtf.formatToParts(100, 'day').forEach(function(part) {
+ assertEquals(true, part.type == 'literal' || part.type == 'integer');
+ assertEquals('string', typeof part.value);
+ if (part.type == 'integer') {
+ assertEquals('string', typeof part.unit);
+ }
+});
diff --git a/deps/v8/test/intl/relative-time-format/format.js b/deps/v8/test/intl/relative-time-format/format.js
new file mode 100644
index 0000000000..769358423d
--- /dev/null
+++ b/deps/v8/test/intl/relative-time-format/format.js
@@ -0,0 +1,82 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-relative-time-format
+
+// Make sure that RelativeTimeFormat exposes all required properties. Those not specified
+// should have undefined value.
+// http://tc39.github.io/proposal-intl-relative-time/
+
+let rtf = new Intl.RelativeTimeFormat();
+
+// Test 1.4.3 Intl.RelativeTimeFormat.prototype.format( value, unit )
+assertEquals('string', typeof rtf.format(-1, 'seconds'));
+assertEquals('string', typeof rtf.format(-1, 'second'));
+assertEquals('string', typeof rtf.format(-1, 'minutes'));
+assertEquals('string', typeof rtf.format(-1, 'minute'));
+assertEquals('string', typeof rtf.format(-1, 'hours'));
+assertEquals('string', typeof rtf.format(-1, 'hour'));
+assertEquals('string', typeof rtf.format(-1, 'days'));
+assertEquals('string', typeof rtf.format(-1, 'day'));
+assertEquals('string', typeof rtf.format(-1, 'weeks'));
+assertEquals('string', typeof rtf.format(-1, 'week'));
+assertEquals('string', typeof rtf.format(-1, 'months'));
+assertEquals('string', typeof rtf.format(-1, 'month'));
+assertEquals('string', typeof rtf.format(-1, 'years'));
+assertEquals('string', typeof rtf.format(-1, 'year'));
+assertEquals('string', typeof rtf.format(-1, 'quarter'));
+assertEquals('string', typeof rtf.format(-1, 'quarters'));
+
+assertEquals('string', typeof rtf.format(-0, 'seconds'));
+assertEquals('string', typeof rtf.format(-0, 'second'));
+assertEquals('string', typeof rtf.format(-0, 'minutes'));
+assertEquals('string', typeof rtf.format(-0, 'minute'));
+assertEquals('string', typeof rtf.format(-0, 'hours'));
+assertEquals('string', typeof rtf.format(-0, 'hour'));
+assertEquals('string', typeof rtf.format(-0, 'days'));
+assertEquals('string', typeof rtf.format(-0, 'day'));
+assertEquals('string', typeof rtf.format(-0, 'weeks'));
+assertEquals('string', typeof rtf.format(-0, 'week'));
+assertEquals('string', typeof rtf.format(-0, 'months'));
+assertEquals('string', typeof rtf.format(-0, 'month'));
+assertEquals('string', typeof rtf.format(-0, 'years'));
+assertEquals('string', typeof rtf.format(-0, 'year'));
+assertEquals('string', typeof rtf.format(-0, 'quarter'));
+assertEquals('string', typeof rtf.format(-0, 'quarters'));
+
+assertThrows(() => rtf.format(NaN, 'seconds'), RangeError);
+assertThrows(() => rtf.format(NaN, 'second'), RangeError);
+assertThrows(() => rtf.format(NaN, 'minutes'), RangeError);
+assertThrows(() => rtf.format(NaN, 'minute'), RangeError);
+assertThrows(() => rtf.format(NaN, 'hours'), RangeError);
+assertThrows(() => rtf.format(NaN, 'hour'), RangeError);
+assertThrows(() => rtf.format(NaN, 'days'), RangeError);
+assertThrows(() => rtf.format(NaN, 'day'), RangeError);
+assertThrows(() => rtf.format(NaN, 'weeks'), RangeError);
+assertThrows(() => rtf.format(NaN, 'week'), RangeError);
+assertThrows(() => rtf.format(NaN, 'months'), RangeError);
+assertThrows(() => rtf.format(NaN, 'month'), RangeError);
+assertThrows(() => rtf.format(NaN, 'years'), RangeError);
+assertThrows(() => rtf.format(NaN, 'year'), RangeError);
+assertThrows(() => rtf.format(NaN, 'quarters'), RangeError);
+assertThrows(() => rtf.format(NaN, 'quarter'), RangeError);
+
+assertThrows(() => rtf.format(-1, 'decades'), RangeError);
+assertThrows(() => rtf.format(-1, 'decade'), RangeError);
+assertThrows(() => rtf.format(-1, 'centuries'), RangeError);
+assertThrows(() => rtf.format(-1, 'century'), RangeError);
+assertThrows(() => rtf.format(-1, 'milliseconds'), RangeError);
+assertThrows(() => rtf.format(-1, 'millisecond'), RangeError);
+assertThrows(() => rtf.format(-1, 'microseconds'), RangeError);
+assertThrows(() => rtf.format(-1, 'microsecond'), RangeError);
+assertThrows(() => rtf.format(-1, 'nanoseconds'), RangeError);
+assertThrows(() => rtf.format(-1, 'nanosecond'), RangeError);
+
+assertEquals('string', typeof rtf.format(5, 'day'));
+assertEquals('string', typeof rtf.format('5', 'day'));
+assertEquals('string', typeof rtf.format('-5', 'day'));
+assertEquals('string', typeof rtf.format('534', 'day'));
+assertEquals('string', typeof rtf.format('-534', 'day'));
+
+//assertThrows(() => rtf.format('xyz', 'day'), RangeError);
diff --git a/deps/v8/test/js-perf-test/Array/copy-within.js b/deps/v8/test/js-perf-test/Array/copy-within.js
new file mode 100644
index 0000000000..c3cf33b481
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Array/copy-within.js
@@ -0,0 +1,43 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+(() => {
+
+const kArraySize = 1000;
+const kQuarterSize = kArraySize / 4;
+
+let array = [];
+
+// Copy a quarter of the elements from the middle to the front.
+function CopyWithin() {
+ return new Function(
+ 'array.copyWithin(0, kQuarterSize * 2, kQuarterSize * 3);');
+}
+
+createSuite('SmiCopyWithin', 1000, CopyWithin, SmiCopyWithinSetup);
+createSuite('StringCopyWithin', 1000, CopyWithin, StringCopyWithinSetup);
+createSuite('SparseSmiCopyWithin', 1000, CopyWithin, SparseSmiCopyWithinSetup);
+createSuite(
+ 'SparseStringCopyWithin', 1000, CopyWithin, SparseStringCopyWithinSetup);
+
+function SmiCopyWithinSetup() {
+ array = [];
+ for (let i = 0; i < kArraySize; ++i) array[i] = i;
+}
+
+function StringCopyWithinSetup() {
+ array = [];
+ for (let i = 0; i < kArraySize; ++i) array[i] = `Item no. ${i}`;
+}
+
+function SparseSmiCopyWithinSetup() {
+ array = [];
+ for (let i = 0; i < kArraySize; i += 10) array[i] = i;
+}
+
+function SparseStringCopyWithinSetup() {
+ array = [];
+ for (let i = 0; i < kArraySize; i += 10) array[i] = `Item no. ${i}`;
+}
+
+})();
diff --git a/deps/v8/test/js-perf-test/Array/every.js b/deps/v8/test/js-perf-test/Array/every.js
index 5a29f44e41..6e9425543a 100644
--- a/deps/v8/test/js-perf-test/Array/every.js
+++ b/deps/v8/test/js-perf-test/Array/every.js
@@ -27,11 +27,11 @@ function OptUnreliableEvery() {
DefineHigherOrderTests([
// name, test function, setup function, user callback
- "DoubleEvery", mc("every"), DoubleSetup, v => v > 0.0,
- "SmiEvery", mc("every"), SmiSetup, v => v != 34343,
- "FastEvery", mc("every"), FastSetup, v => v !== 'hi',
- "OptFastEvery", OptFastEvery, FastSetup, v => true,
- "OptUnreliableEvery", OptUnreliableEvery, FastSetup, v => true
+ ['DoubleEvery', newClosure('every'), DoubleSetup, v => v > 0.0],
+ ['SmiEvery', newClosure('every'), SmiSetup, v => v != 34343],
+ ['FastEvery', newClosure('every'), FastSetup, v => v !== 'hi'],
+ ['OptFastEvery', OptFastEvery, FastSetup, v => true],
+ ['OptUnreliableEvery', OptUnreliableEvery, FastSetup, v => true]
]);
})();
diff --git a/deps/v8/test/js-perf-test/Array/filter.js b/deps/v8/test/js-perf-test/Array/filter.js
index e0d4327dd6..4ceaf5cce2 100644
--- a/deps/v8/test/js-perf-test/Array/filter.js
+++ b/deps/v8/test/js-perf-test/Array/filter.js
@@ -54,13 +54,19 @@ function OptUnreliableFilter() {
DefineHigherOrderTests([
// name, test function, setup function, user callback
- "NaiveFilterReplacement", NaiveFilter, NaiveFilterSetup, v => true,
- "DoubleFilter", mc("filter"), DoubleSetup, v => Math.floor(v) % 2 === 0,
- "SmiFilter", mc("filter"), SmiSetup, v => v % 2 === 0,
- "FastFilter", mc("filter"), FastSetup, (_, i) => i % 2 === 0,
- "GenericFilter", mc("filter", true), ObjectSetup, (_, i) => i % 2 === 0,
- "OptFastFilter", OptFastFilter, FastSetup, undefined,
- "OptUnreliableFilter", OptUnreliableFilter, FastSetup, v => true
+ ['NaiveFilterReplacement', NaiveFilter, NaiveFilterSetup, v => true],
+ [
+ 'DoubleFilter', newClosure('filter'), DoubleSetup,
+ v => Math.floor(v) % 2 === 0
+ ],
+ ['SmiFilter', newClosure('filter'), SmiSetup, v => v % 2 === 0],
+ ['FastFilter', newClosure('filter'), FastSetup, (_, i) => i % 2 === 0],
+ [
+ 'GenericFilter', newClosure('filter', true), ObjectSetup,
+ (_, i) => i % 2 === 0
+ ],
+ ['OptFastFilter', OptFastFilter, FastSetup, undefined],
+ ['OptUnreliableFilter', OptUnreliableFilter, FastSetup, v => true]
]);
})();
diff --git a/deps/v8/test/js-perf-test/Array/find-index.js b/deps/v8/test/js-perf-test/Array/find-index.js
index 716aa710bb..1029b26124 100644
--- a/deps/v8/test/js-perf-test/Array/find-index.js
+++ b/deps/v8/test/js-perf-test/Array/find-index.js
@@ -51,13 +51,22 @@ function NaiveSetup() {
DefineHigherOrderTests([
// name, test function, setup function, user callback
- "NaiveFindIndexReplacement", Naive, NaiveSetup, v => v === max_index,
- "DoubleFindIndex", mc("findIndex"), DoubleSetup, v => v === max_index + 0.5,
- "SmiFindIndex", mc("findIndex"), SmiSetup, v => v === max_index,
- "FastFindIndex", mc("findIndex"), FastSetup, v => v === `value ${max_index}`,
- "GenericFindIndex", mc("findIndex", true), ObjectSetup, v => v === max_index,
- "OptFastFindIndex", OptFast, FastSetup, undefined,
- "OptUnreliableFindIndex", OptUnreliable, FastSetup, v => v === max_index
+ ['NaiveFindIndexReplacement', Naive, NaiveSetup, v => v === max_index],
+ [
+ 'DoubleFindIndex', newClosure('findIndex'), DoubleSetup,
+ v => v === max_index + 0.5
+ ],
+ ['SmiFindIndex', newClosure('findIndex'), SmiSetup, v => v === max_index],
+ [
+ 'FastFindIndex', newClosure('findIndex'), FastSetup,
+ v => v === `value ${max_index}`
+ ],
+ [
+ 'GenericFindIndex', newClosure('findIndex', true), ObjectSetup,
+ v => v === max_index
+ ],
+ ['OptFastFindIndex', OptFast, FastSetup, undefined],
+ ['OptUnreliableFindIndex', OptUnreliable, FastSetup, v => v === max_index]
]);
})();
diff --git a/deps/v8/test/js-perf-test/Array/find.js b/deps/v8/test/js-perf-test/Array/find.js
index 9b9a19f1c4..580d646a30 100644
--- a/deps/v8/test/js-perf-test/Array/find.js
+++ b/deps/v8/test/js-perf-test/Array/find.js
@@ -51,13 +51,13 @@ function NaiveSetup() {
DefineHigherOrderTests([
// name, test function, setup function, user callback
- "NaiveFindReplacement", Naive, NaiveSetup, v => v === max_index,
- "DoubleFind", mc("find"), DoubleSetup, v => v === max_index + 0.5,
- "SmiFind", mc("find"), SmiSetup, v => v === max_index,
- "FastFind", mc("find"), FastSetup, v => v === `value ${max_index}`,
- "GenericFind", mc("find", true), ObjectSetup, v => v === max_index,
- "OptFastFind", OptFast, FastSetup, undefined,
- "OptUnreliableFind", OptUnreliable, FastSetup, v => v === max_index
+ ['NaiveFindReplacement', Naive, NaiveSetup, v => v === max_index],
+ ['DoubleFind', newClosure('find'), DoubleSetup, v => v === max_index + 0.5],
+ ['SmiFind', newClosure('find'), SmiSetup, v => v === max_index],
+ ['FastFind', newClosure('find'), FastSetup, v => v === `value ${max_index}`],
+ ['GenericFind', newClosure('find', true), ObjectSetup, v => v === max_index],
+ ['OptFastFind', OptFast, FastSetup, undefined],
+ ['OptUnreliableFind', OptUnreliable, FastSetup, v => v === max_index]
]);
})();
diff --git a/deps/v8/test/js-perf-test/Array/for-each.js b/deps/v8/test/js-perf-test/Array/for-each.js
index 79d279894b..c87d5406e0 100644
--- a/deps/v8/test/js-perf-test/Array/for-each.js
+++ b/deps/v8/test/js-perf-test/Array/for-each.js
@@ -50,13 +50,25 @@ function OptUnreliable() {
}
DefineHigherOrderTests([
- "NaiveForEachReplacement", Naive, NaiveSetup, v => v === max_index,
- "DoubleForEach", mc("forEach"), DoubleSetup, v => v === max_index + 0.5,
- "SmiForEach", mc("forEach"), SmiSetup, v => v === max_index,
- "FastForEach", mc("forEach"), FastSetup, v => v === `value ${max_index}`,
- "GenericForEach", mc("forEach", true), ObjectSetup, v => v === max_index,
- "OptFastForEach", OptFast, FastSetup, undefined,
- "OptUnreliableForEach", OptUnreliable, FastSetup, v => v === `value ${max_index}`
+ ['NaiveForEachReplacement', Naive, NaiveSetup, v => v === max_index],
+ [
+ 'DoubleForEach', newClosure('forEach'), DoubleSetup,
+ v => v === max_index + 0.5
+ ],
+ ['SmiForEach', newClosure('forEach'), SmiSetup, v => v === max_index],
+ [
+ 'FastForEach', newClosure('forEach'), FastSetup,
+ v => v === `value ${max_index}`
+ ],
+ [
+ 'GenericForEach', newClosure('forEach', true), ObjectSetup,
+ v => v === max_index
+ ],
+ ['OptFastForEach', OptFast, FastSetup, undefined],
+ [
+ 'OptUnreliableForEach', OptUnreliable, FastSetup,
+ v => v === `value ${max_index}`
+ ]
]);
})();
diff --git a/deps/v8/test/js-perf-test/Array/map.js b/deps/v8/test/js-perf-test/Array/map.js
index 9179aa3c88..4b278b8882 100644
--- a/deps/v8/test/js-perf-test/Array/map.js
+++ b/deps/v8/test/js-perf-test/Array/map.js
@@ -49,15 +49,15 @@ function OptUnreliableMap() {
DefineHigherOrderTests([
// name, test function, setup function, user callback
- "NaiveMapReplacement", NaiveMap, NaiveMapSetup, v => v,
- "SmiMap", mc("map"), SmiSetup, v => v,
- "DoubleMap", mc("map"), DoubleSetup, v => v,
- "FastMap", mc("map"), FastSetup, v => v,
- "SmallSmiToDoubleMap", mc("map"), SmiSetup, v => v + 0.5,
- "SmallSmiToFastMap", mc("map"), SmiSetup, v => "hi" + v,
- "GenericMap", mc("map", true), ObjectSetup, v => v,
- "OptFastMap", OptFastMap, FastSetup, undefined,
- "OptUnreliableMap", OptUnreliableMap, FastSetup, v => v
+ ['NaiveMapReplacement', NaiveMap, NaiveMapSetup, v => v],
+ ['SmiMap', newClosure('map'), SmiSetup, v => v],
+ ['DoubleMap', newClosure('map'), DoubleSetup, v => v],
+ ['FastMap', newClosure('map'), FastSetup, v => v],
+ ['SmallSmiToDoubleMap', newClosure('map'), SmiSetup, v => v + 0.5],
+ ['SmallSmiToFastMap', newClosure('map'), SmiSetup, v => 'hi' + v],
+ ['GenericMap', newClosure('map', true), ObjectSetup, v => v],
+ ['OptFastMap', OptFastMap, FastSetup, undefined],
+ ['OptUnreliableMap', OptUnreliableMap, FastSetup, v => v]
]);
})();
diff --git a/deps/v8/test/js-perf-test/Array/reduce-right.js b/deps/v8/test/js-perf-test/Array/reduce-right.js
index ed00f5ac27..c643c2b383 100644
--- a/deps/v8/test/js-perf-test/Array/reduce-right.js
+++ b/deps/v8/test/js-perf-test/Array/reduce-right.js
@@ -27,12 +27,22 @@ function OptUnreliableReduceRight() {
DefineHigherOrderTests([
// name, test function, setup function, user callback
- "DoubleReduceRight", mc("reduceRight"), DoubleSetup, (p, v, i, o) => p + v,
- "SmiReduceRight", mc("reduceRight"), SmiSetup, (p, v, i, a) => p + 1,
- "FastReduceRight", mc("reduceRight"), FastSetup, (p, v, i, a) => p + v,
- "OptFastReduceRight", OptFastReduceRight, FastSetup, undefined,
- "OptUnreliableReduceRight", OptUnreliableReduceRight, FastSetup,
- (p, v, i, a) => p + v
+ [
+ 'DoubleReduceRight', newClosure('reduceRight'), DoubleSetup,
+ (p, v, i, o) => p + v
+ ],
+ [
+ 'SmiReduceRight', newClosure('reduceRight'), SmiSetup, (p, v, i, a) => p + 1
+ ],
+ [
+ 'FastReduceRight', newClosure('reduceRight'), FastSetup,
+ (p, v, i, a) => p + v
+ ],
+ ['OptFastReduceRight', OptFastReduceRight, FastSetup, undefined],
+ [
+ 'OptUnreliableReduceRight', OptUnreliableReduceRight, FastSetup,
+ (p, v, i, a) => p + v
+ ]
]);
})();
diff --git a/deps/v8/test/js-perf-test/Array/reduce.js b/deps/v8/test/js-perf-test/Array/reduce.js
index 02d689f7c4..2b9a28f098 100644
--- a/deps/v8/test/js-perf-test/Array/reduce.js
+++ b/deps/v8/test/js-perf-test/Array/reduce.js
@@ -27,12 +27,13 @@ function OptUnreliableReduce() {
DefineHigherOrderTests([
// name, test function, setup function, user callback
- "DoubleReduce", mc("reduce"), DoubleSetup, (p, v, i, o) => p + v,
- "SmiReduce", mc("reduce"), SmiSetup, (p, v, i, a) => p + 1,
- "FastReduce", mc("reduce"), FastSetup, (p, v, i, a) => p + v,
- "OptFastReduce", OptFastReduce, FastSetup, undefined,
- "OptUnreliableReduce", OptUnreliableReduce, FastSetup,
- (p, v, i, a) => p = v
+ ['DoubleReduce', newClosure('reduce'), DoubleSetup, (p, v, i, o) => p + v],
+ ['SmiReduce', newClosure('reduce'), SmiSetup, (p, v, i, a) => p + 1],
+ ['FastReduce', newClosure('reduce'), FastSetup, (p, v, i, a) => p + v],
+ ['OptFastReduce', OptFastReduce, FastSetup, undefined],
+ [
+ 'OptUnreliableReduce', OptUnreliableReduce, FastSetup, (p, v, i, a) => p = v
+ ]
]);
})();
diff --git a/deps/v8/test/js-perf-test/Array/run.js b/deps/v8/test/js-perf-test/Array/run.js
index 52de9c3809..e8b6ef0024 100644
--- a/deps/v8/test/js-perf-test/Array/run.js
+++ b/deps/v8/test/js-perf-test/Array/run.js
@@ -13,10 +13,10 @@ let result;
const array_size = 100;
const max_index = array_size - 1;
-// mc stands for "Make Closure," it's a handy function to get a fresh
+// newClosure is a handy function to get a fresh
// closure unpolluted by IC feedback for a 2nd-order array builtin
// test.
-function mc(name, generic = false) {
+function newClosure(name, generic = false) {
if (generic) {
return new Function(
`result = Array.prototype.${name}.call(array, func, this_arg);`);
@@ -24,16 +24,55 @@ function mc(name, generic = false) {
return new Function(`result = array.${name}(func, this_arg);`);
}
+function MakeHoley(array) {
+ for (let i =0; i < array.length; i+=2) {
+ delete array[i];
+ }
+ assert(%HasHoleyElements(array));
+}
+
function SmiSetup() {
array = Array.from({ length: array_size }, (_, i) => i);
+ assert(%HasSmiElements(array));
+}
+
+function HoleySmiSetup() {
+ SmiSetup();
+ MakeHoley(array);
+ assert(%HasSmiElements(array));
}
function DoubleSetup() {
array = Array.from({ length: array_size }, (_, i) => i + 0.5);
+ assert(%HasDoubleElements(array));
+}
+
+function HoleyDoubleSetup() {
+ DoubleSetup();
+ MakeHoley(array);
+ assert(%HasDoubleElements(array));
}
function FastSetup() {
array = Array.from({ length: array_size }, (_, i) => `value ${i}`);
+ assert(%HasObjectElements(array));
+}
+
+function HoleyFastSetup() {
+ FastSetup();
+ MakeHoley(array);
+ assert(%HasObjectElements(array));
+}
+
+function DictionarySetup() {
+ array = [];
+ // Add a large index to force dictionary elements.
+ array[2**30] = 10;
+ // Spread out {array_size} elements.
+ for (var i = 0; i < array_size-1; i++) {
+ array[i*101] = i;
+ }
+ assert(%HasDictionaryElements(array));
}
function ObjectSetup() {
@@ -41,15 +80,25 @@ function ObjectSetup() {
for (var i = 0; i < array_size; i++) {
array[i] = i;
}
+ assert(%HasObjectElements(array));
+ assert(%HasHoleyElements(array));
+}
+
+
+const ARRAY_SETUP = {
+ PACKED_SMI: SmiSetup,
+ HOLEY_SMI: HoleySmiSetup,
+ PACKED_DOUBLE: DoubleSetup,
+ HOLEY_DOUBLE: HoleyDoubleSetup,
+ PACKED: FastSetup,
+ HOLEY: HoleyFastSetup,
+ DICTIONARY: DictionarySetup,
}
function DefineHigherOrderTests(tests) {
let i = 0;
while (i < tests.length) {
- const name = tests[i++];
- const testFunc = tests[i++];
- const setupFunc = tests[i++];
- const callback = tests[i++];
+ const [name, testFunc, setupFunc, callback] = tests[i++];
let setupFuncWrapper = () => {
func = callback;
@@ -77,6 +126,7 @@ load('of.js');
load('join.js');
load('to-string.js');
load('slice.js');
+load('copy-within.js');
var success = true;
diff --git a/deps/v8/test/js-perf-test/Array/slice.js b/deps/v8/test/js-perf-test/Array/slice.js
index c9e60930f3..af99c092b1 100644
--- a/deps/v8/test/js-perf-test/Array/slice.js
+++ b/deps/v8/test/js-perf-test/Array/slice.js
@@ -43,13 +43,6 @@
})();
(() => {
-
- function assert(condition, message) {
- if (!condition) {
- throw Error(message);
- }
- }
-
const A = new Array(1000);
for (let i = 0; i < A.length; i++) {
diff --git a/deps/v8/test/js-perf-test/Array/some.js b/deps/v8/test/js-perf-test/Array/some.js
index ea820e9801..d7d5efa908 100644
--- a/deps/v8/test/js-perf-test/Array/some.js
+++ b/deps/v8/test/js-perf-test/Array/some.js
@@ -27,11 +27,11 @@ function OptUnreliableSome() {
DefineHigherOrderTests([
// name, test function, setup function, user callback
- "DoubleSome", mc("some"), DoubleSetup, v => v < 0.0,
- "SmiSome", mc("some"), SmiSetup, v => v === 34343,
- "FastSome", mc("some"), FastSetup, v => v === 'hi',
- "OptFastSome", OptFastSome, FastSetup, undefined,
- "OptUnreliableSome", OptUnreliableSome, FastSetup, v => v === 'hi'
+ ['DoubleSome', newClosure('some'), DoubleSetup, v => v < 0.0],
+ ['SmiSome', newClosure('some'), SmiSetup, v => v === 34343],
+ ['FastSome', newClosure('some'), FastSetup, v => v === 'hi'],
+ ['OptFastSome', OptFastSome, FastSetup, undefined],
+ ['OptUnreliableSome', OptUnreliableSome, FastSetup, v => v === 'hi']
]);
})();
diff --git a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLarge/run.js b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLarge/run.js
new file mode 100644
index 0000000000..a018735fc9
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLarge/run.js
@@ -0,0 +1,153 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Comparing different copy schemes against spread initial literals.
+// Benchmarks for large packed arrays.
+
+const largeHoleyArray = new Array(100000);
+const largeArray = Array.from(largeHoleyArray.keys());
+
+// ----------------------------------------------------------------------------
+// Benchmark: Spread
+// ----------------------------------------------------------------------------
+
+function SpreadLarge() {
+ var newArr = [...largeArray];
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForLength
+// ----------------------------------------------------------------------------
+
+function ForLengthLarge() {
+ var newArr = new Array(largeArray.length);
+ for (let i = 0; i < largeArray.length; i++) {
+ newArr[i] = largeArray[i];
+ }
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForLengthEmpty
+// ----------------------------------------------------------------------------
+
+function ForLengthEmptyLarge() {
+ var newArr = [];
+ for (let i = 0; i < largeArray.length; i++) {
+ newArr[i] = largeArray[i];
+ }
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Slice
+// ----------------------------------------------------------------------------
+
+function SliceLarge() {
+ var newArr = largeArray.slice();
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Slice0
+// ----------------------------------------------------------------------------
+
+function Slice0Large() {
+ var newArr = largeArray.slice(0);
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ConcatReceive
+// ----------------------------------------------------------------------------
+
+function ConcatReceiveLarge() {
+ var newArr = largeArray.concat();
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ConcatArg
+// ----------------------------------------------------------------------------
+
+function ConcatArgLarge() {
+ var newArr = [].concat(largeArray);
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfPush
+// ----------------------------------------------------------------------------
+
+function ForOfPushLarge() {
+ var newArr = [];
+ for (let x of largeArray) {
+ newArr.push(x)
+ }
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: MapId
+// ----------------------------------------------------------------------------
+
+function MapIdLarge() {
+ var newArr = largeArray.map(x => x);
+ // basic sanity check
+ if (newArr.length != largeArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Setup and Run
+// ----------------------------------------------------------------------------
+
+load('../base.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-ArrayLiteralInitialSpreadLarge(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult('Error: ' + name, error);
+ success = false;
+}
+
+// Run the benchmark (20 x 100) iterations instead of 1 second.
+function CreateBenchmark(name, f) {
+ new BenchmarkSuite(name, [1000], [ new Benchmark(name, false, false, 20, f) ]);
+}
+
+CreateBenchmark('Spread', SpreadLarge);
+CreateBenchmark('ForLength', ForLengthLarge);
+CreateBenchmark('ForLengthEmpty', ForLengthEmptyLarge);
+CreateBenchmark('Slice', SliceLarge);
+CreateBenchmark('Slice0', Slice0Large);
+CreateBenchmark('ConcatReceive', ConcatReceiveLarge);
+CreateBenchmark('ConcatArg', ConcatArgLarge);
+CreateBenchmark('ForOfPush', ForOfPushLarge);
+CreateBenchmark('MapId', MapIdLarge);
+
+BenchmarkSuite.config.doWarmup = true;
+BenchmarkSuite.config.doDeterministic = true;
+BenchmarkSuite.RunSuites({NotifyResult: PrintResult, NotifyError: PrintError});
diff --git a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeHoley/run.js b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeHoley/run.js
new file mode 100644
index 0000000000..b56efe9836
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadLargeHoley/run.js
@@ -0,0 +1,161 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Comparing different copy schemes against spread initial literals.
+// Benchmarks for large holey arrays.
+
+const largeHoleyArray = new Array(100000);
+
+for (var i = 0; i < 100; i++) {
+ largeHoleyArray[i] = i;
+}
+
+for (var i = 5000; i < 5500; i++) {
+ largeHoleyArray[i] = i;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Spread
+// ----------------------------------------------------------------------------
+
+function SpreadLargeHoley() {
+ var newArr = [...largeHoleyArray];
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForLength
+// ----------------------------------------------------------------------------
+
+function ForLengthLargeHoley() {
+ var newArr = new Array(largeHoleyArray.length);
+ for (let i = 0; i < largeHoleyArray.length; i++) {
+ newArr[i] = largeHoleyArray[i];
+ }
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForLengthEmpty
+// ----------------------------------------------------------------------------
+
+function ForLengthEmptyLargeHoley() {
+ var newArr = [];
+ for (let i = 0; i < largeHoleyArray.length; i++) {
+ newArr[i] = largeHoleyArray[i];
+ }
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Slice
+// ----------------------------------------------------------------------------
+
+function SliceLargeHoley() {
+ var newArr = largeHoleyArray.slice();
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Slice0
+// ----------------------------------------------------------------------------
+
+function Slice0LargeHoley() {
+ var newArr = largeHoleyArray.slice(0);
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ConcatReceive
+// ----------------------------------------------------------------------------
+
+function ConcatReceiveLargeHoley() {
+ var newArr = largeHoleyArray.concat();
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ConcatArg
+// ----------------------------------------------------------------------------
+
+function ConcatArgLargeHoley() {
+ var newArr = [].concat(largeHoleyArray);
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfPush
+// ----------------------------------------------------------------------------
+
+function ForOfPushLargeHoley() {
+ var newArr = [];
+ for (let x of largeHoleyArray) {
+ newArr.push(x)
+ }
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: MapId
+// ----------------------------------------------------------------------------
+
+function MapIdLargeHoley() {
+ var newArr = largeHoleyArray.map(x => x);
+ // basic sanity check
+ if (newArr.length != largeHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Setup and Run
+// ----------------------------------------------------------------------------
+
+load('../base.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-ArrayLiteralInitialSpreadLargeHoley(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult('Error: ' + name, error);
+ success = false;
+}
+
+// Run the benchmark (20 x 100) iterations instead of 1 second.
+function CreateBenchmark(name, f) {
+ new BenchmarkSuite(name, [1000], [ new Benchmark(name, false, false, 20, f) ]);
+}
+
+CreateBenchmark('Spread', SpreadLargeHoley);
+CreateBenchmark('ForLength', ForLengthLargeHoley);
+CreateBenchmark('ForLengthEmpty', ForLengthEmptyLargeHoley);
+CreateBenchmark('Slice', SliceLargeHoley);
+CreateBenchmark('Slice0', Slice0LargeHoley);
+CreateBenchmark('ConcatReceive', ConcatReceiveLargeHoley);
+CreateBenchmark('ConcatArg', ConcatArgLargeHoley);
+CreateBenchmark('ForOfPush', ForOfPushLargeHoley);
+CreateBenchmark('MapId', MapIdLargeHoley);
+
+
+BenchmarkSuite.config.doWarmup = true;
+BenchmarkSuite.config.doDeterministic = true;
+BenchmarkSuite.RunSuites({NotifyResult: PrintResult, NotifyError: PrintError});
diff --git a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmall/run.js b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmall/run.js
new file mode 100644
index 0000000000..5c8b8d1ac4
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmall/run.js
@@ -0,0 +1,154 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Comparing different copy schemes against spread initial literals.
+// Benchmarks for small packed arrays.
+
+const smallHoleyArray = Array(100);
+const smallArray = Array.from(Array(100).keys());
+
+// ----------------------------------------------------------------------------
+// Benchmark: Spread
+// ----------------------------------------------------------------------------
+
+function SpreadSmall() {
+ var newArr = [...smallArray];
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForLength
+// ----------------------------------------------------------------------------
+
+function ForLengthSmall() {
+ var newArr = new Array(smallArray.length);
+ for (let i = 0; i < smallArray.length; i++) {
+ newArr[i] = smallArray[i];
+ }
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForLengthEmpty
+// ----------------------------------------------------------------------------
+
+function ForLengthEmptySmall() {
+ var newArr = [];
+ for (let i = 0; i < smallArray.length; i++) {
+ newArr[i] = smallArray[i];
+ }
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Slice
+// ----------------------------------------------------------------------------
+
+function SliceSmall() {
+ var newArr = smallArray.slice();
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Slice0
+// ----------------------------------------------------------------------------
+
+function Slice0Small() {
+ var newArr = smallArray.slice(0);
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ConcatReceive
+// ----------------------------------------------------------------------------
+
+function ConcatReceiveSmall() {
+ var newArr = smallArray.concat();
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ConcatArg
+// ----------------------------------------------------------------------------
+
+function ConcatArgSmall() {
+ var newArr = [].concat(smallArray);
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfPush
+// ----------------------------------------------------------------------------
+
+function ForOfPushSmall() {
+ var newArr = [];
+ for (let x of smallArray) {
+ newArr.push(x)
+ }
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: MapId
+// ----------------------------------------------------------------------------
+
+function MapIdSmall() {
+ var newArr = smallArray.map(x => x);
+ // basic sanity check
+ if (newArr.length != smallArray.length) throw 666;
+ return newArr;
+}
+
+
+// ----------------------------------------------------------------------------
+// Setup and Run
+// ----------------------------------------------------------------------------
+
+load('../base.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-ArrayLiteralInitialSpreadSmall(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult('Error: ' + name, error);
+ success = false;
+}
+
+function CreateBenchmark(name, f) {
+ new BenchmarkSuite(name, [1000], [ new Benchmark(name, false, false, 0, f) ]);
+}
+
+CreateBenchmark('Spread', SpreadSmall);
+CreateBenchmark('ForLength', ForLengthSmall);
+CreateBenchmark('ForLengthEmpty', ForLengthEmptySmall);
+CreateBenchmark('Slice', SliceSmall);
+CreateBenchmark('Slice0', Slice0Small);
+CreateBenchmark('ConcatReceive', ConcatReceiveSmall);
+CreateBenchmark('ConcatArg', ConcatArgSmall);
+CreateBenchmark('ForOfPush', ForOfPushSmall);
+CreateBenchmark('MapId', MapIdSmall);
+
+
+BenchmarkSuite.config.doWarmup = true;
+BenchmarkSuite.config.doDeterministic = false;
+BenchmarkSuite.RunSuites({NotifyResult: PrintResult, NotifyError: PrintError});
diff --git a/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallHoley/run.js b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallHoley/run.js
new file mode 100644
index 0000000000..9e7aea2f19
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ArrayLiteralInitialSpreadSmallHoley/run.js
@@ -0,0 +1,159 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Comparing different copy schemes against spread initial literals.
+// Benchmarks for small holey arrays.
+
+const smallHoleyArray = Array(100);
+
+for (var i = 0; i < 10; i++) {
+ smallHoleyArray[i] = i;
+}
+for (var i = 90; i < 99; i++) {
+ smallHoleyArray[i] = i;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Spread
+// ----------------------------------------------------------------------------
+
+function SpreadSmallHoley() {
+ var newArr = [...smallHoleyArray];
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForLength
+// ----------------------------------------------------------------------------
+
+function ForLengthSmallHoley() {
+ var newArr = new Array(smallHoleyArray.length);
+ for (let i = 0; i < smallHoleyArray.length; i++) {
+ newArr[i] = smallHoleyArray[i];
+ }
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForLengthEmpty
+// ----------------------------------------------------------------------------
+
+function ForLengthEmptySmallHoley() {
+ var newArr = [];
+ for (let i = 0; i < smallHoleyArray.length; i++) {
+ newArr[i] = smallHoleyArray[i];
+ }
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Slice
+// ----------------------------------------------------------------------------
+
+function SliceSmallHoley() {
+ var newArr = smallHoleyArray.slice();
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Slice0
+// ----------------------------------------------------------------------------
+
+function Slice0SmallHoley() {
+ var newArr = smallHoleyArray.slice(0);
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ConcatReceive
+// ----------------------------------------------------------------------------
+
+function ConcatReceiveSmallHoley() {
+ var newArr = smallHoleyArray.concat();
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ConcatArg
+// ----------------------------------------------------------------------------
+
+function ConcatArgSmallHoley() {
+ var newArr = [].concat(smallHoleyArray);
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ForOfPush
+// ----------------------------------------------------------------------------
+
+function ForOfPushSmallHoley() {
+ var newArr = [];
+ for (let x of smallHoleyArray) {
+ newArr.push(x)
+ }
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: MapId
+// ----------------------------------------------------------------------------
+
+function MapIdSmallHoley() {
+ var newArr = smallHoleyArray.map(x => x);
+ // basic sanity check
+ if (newArr.length != smallHoleyArray.length) throw 666;
+ return newArr;
+}
+
+
+// ----------------------------------------------------------------------------
+// Setup and Run
+// ----------------------------------------------------------------------------
+
+load('../base.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-ArrayLiteralInitialSpreadSmallHoley(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult('Error: ' + name, error);
+ success = false;
+}
+
+function CreateBenchmark(name, f) {
+ new BenchmarkSuite(name, [1000], [ new Benchmark(name, false, false, 0, f) ]);
+}
+
+CreateBenchmark('Spread', SpreadSmallHoley);
+CreateBenchmark('ForLength', ForLengthSmallHoley);
+CreateBenchmark('ForLengthEmpty', ForLengthEmptySmallHoley);
+CreateBenchmark('Slice', SliceSmallHoley);
+CreateBenchmark('Slice0', Slice0SmallHoley);
+CreateBenchmark('ConcatReceive', ConcatReceiveSmallHoley);
+CreateBenchmark('ConcatArg', ConcatArgSmallHoley);
+CreateBenchmark('ForOfPush', ForOfPushSmallHoley);
+CreateBenchmark('MapId', MapIdSmallHoley);
+
+BenchmarkSuite.config.doWarmup = true;
+BenchmarkSuite.config.doDeterministic = false;
+BenchmarkSuite.RunSuites({NotifyResult: PrintResult, NotifyError: PrintError});
diff --git a/deps/v8/test/js-perf-test/ArraySort/sort-base.js b/deps/v8/test/js-perf-test/ArraySort/sort-base.js
index a3301752b2..c888972191 100644
--- a/deps/v8/test/js-perf-test/ArraySort/sort-base.js
+++ b/deps/v8/test/js-perf-test/ArraySort/sort-base.js
@@ -11,12 +11,6 @@ for (let i = 0; i < kArraySize; ++i) {
let array_to_sort = [];
-function assert(condition, message) {
- if (!condition) {
- throw Error(message);
- }
-}
-
function AssertPackedSmiElements() {
assert(%HasFastPackedElements(array_to_sort) &&
%HasSmiElements(array_to_sort),
diff --git a/deps/v8/test/js-perf-test/ArraySort/sort-lengths.js b/deps/v8/test/js-perf-test/ArraySort/sort-lengths.js
index aa681295c1..f27805a0ef 100644
--- a/deps/v8/test/js-perf-test/ArraySort/sort-lengths.js
+++ b/deps/v8/test/js-perf-test/ArraySort/sort-lengths.js
@@ -9,28 +9,24 @@ function SortAsc() {
}
function Random(length) {
+ array_to_sort = [];
for (let i = 0; i < length; ++i) {
- array_to_sort.push(Math.floor(Math.random()) * length);
+ array_to_sort.push(Math.floor(Math.random() * length));
}
AssertPackedSmiElements();
}
function Sorted(length) {
+ array_to_sort = [];
for (let i = 0; i < length; ++i) {
array_to_sort.push(i);
}
AssertPackedSmiElements();
}
-function TearDown() {
- array_to_sort = [];
-}
-
function CreateSortSuitesForLength(length) {
- createSortSuite(
- 'Random' + length, 1000, SortAsc, () => Random(length), TearDown);
- createSortSuite(
- 'Sorted' + length, 1000, SortAsc, () => Sorted(length), TearDown);
+ createSortSuite('Random' + length, 1000, SortAsc, () => Random(length));
+ createSortSuite('Sorted' + length, 1000, SortAsc, () => Sorted(length));
}
CreateSortSuitesForLength(10);
diff --git a/deps/v8/test/js-perf-test/JSTests.json b/deps/v8/test/js-perf-test/JSTests.json
index ce7e102b62..3793e2c9a8 100644
--- a/deps/v8/test/js-perf-test/JSTests.json
+++ b/deps/v8/test/js-perf-test/JSTests.json
@@ -60,6 +60,51 @@
"results_regexp": "^Generators\\-Generators\\(Score\\): (.+)$"
},
{
+ "name": "ArrayLiteralInitialSpread",
+ "path": ["ArrayLiteralInitialSpread"],
+ "main": "run.js",
+ "resources": [],
+ "results_regexp": "^%s\\-ArrayLiteralInitialSpread\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Spread-Small"},
+ {"name": "ForLength-Small"},
+ {"name": "ForLengthEmpty-Small"},
+ {"name": "Slice-Small"},
+ {"name": "Slice0-Small"},
+ {"name": "ConcatReceive-Small"},
+ {"name": "ConcatArg-Small"},
+ {"name": "ForOfPush-Small"},
+ {"name": "MapId-Small"},
+ {"name": "Spread-Large"},
+ {"name": "ForLength-Large"},
+ {"name": "ForLengthEmpty-Large"},
+ {"name": "Slice-Large"},
+ {"name": "Slice0-Large"},
+ {"name": "ConcatReceive-Large"},
+ {"name": "ConcatArg-Large"},
+ {"name": "ForOfPush-Large"},
+ {"name": "MapId-Large"},
+ {"name": "Spread-SmallHoley"},
+ {"name": "ForLength-SmallHoley"},
+ {"name": "ForLengthEmpty-SmallHoley"},
+ {"name": "Slice-SmallHoley"},
+ {"name": "Slice0-SmallHoley"},
+ {"name": "ConcatReceive-SmallHoley"},
+ {"name": "ConcatArg-SmallHoley"},
+ {"name": "ForOfPush-SmallHoley"},
+ {"name": "MapId-SmallHoley"},
+ {"name": "Spread-LargeHoley"},
+ {"name": "ForLength-LargeHoley"},
+ {"name": "ForLengthEmpty-LargeHoley"},
+ {"name": "Slice-LargeHoley"},
+ {"name": "Slice0-LargeHoley"},
+ {"name": "ConcatReceive-LargeHoley"},
+ {"name": "ConcatArg-LargeHoley"},
+ {"name": "ForOfPush-LargeHoley"},
+ {"name": "MapId-LargeHoley"}
+ ]
+ },
+ {
"name": "ArrayLiteralSpread",
"path": ["ArrayLiteralSpread"],
"main": "run.js",
@@ -141,7 +186,7 @@
"path": ["Closures"],
"main": "run.js",
"resources": ["closures.js"],
- "flags": ["--mark_shared_functions_for_tier_up"],
+ "flags": [],
"results_regexp": "^%s\\-Closures\\(Score\\): (.+)$",
"tests": [
{"name": "Closures"}
@@ -614,9 +659,9 @@
"path": ["Array"],
"main": "run.js",
"resources": [
- "filter.js", "map.js", "every.js", "join.js", "some.js",
- "reduce.js", "reduce-right.js", "to-string.js", "find.js",
- "find-index.js", "from.js", "of.js", "for-each.js", "slice.js"
+ "filter.js", "map.js", "every.js", "join.js", "some.js", "reduce.js",
+ "reduce-right.js", "to-string.js", "find.js", "find-index.js",
+ "from.js", "of.js", "for-each.js", "slice.js", "copy-within.js"
],
"flags": [
"--allow-natives-syntax"
@@ -711,7 +756,11 @@
{"name": "Array.slice(500,999)-dict"},
{"name": "Array.slice(200,700)-dict"},
{"name": "Array.slice(200,-300)-dict"},
- {"name": "Array.slice(4,1)-dict"}
+ {"name": "Array.slice(4,1)-dict"},
+ {"name": "SmiCopyWithin"},
+ {"name": "StringCopyWithin"},
+ {"name": "SparseSmiCopyWithin"},
+ {"name": "SparseStringCopyWithin"}
]
},
{
@@ -1032,12 +1081,45 @@
"path": ["Parsing"],
"main": "run.js",
"flags": ["--no-compilation-cache", "--allow-natives-syntax"],
- "resources": [ "comments.js"],
+ "resources": [ "comments.js", "strings.js", "arrowfunctions.js"],
"results_regexp": "^%s\\-Parsing\\(Score\\): (.+)$",
"tests": [
{"name": "OneLineComment"},
{"name": "OneLineComments"},
- {"name": "MultiLineComment"}
+ {"name": "MultiLineComment"},
+ {"name": "SingleLineString"},
+ {"name": "SingleLineStrings"},
+ {"name": "MultiLineString"},
+ {"name": "ArrowFunctionShort"},
+ {"name": "ArrowFunctionLong"},
+ {"name": "CommaSepExpressionListShort"},
+ {"name": "CommaSepExpressionListLong"},
+ {"name": "CommaSepExpressionListLate"},
+ {"name": "FakeArrowFunction"}
+ ]
+ },
+ {
+ "name": "Numbers",
+ "path": ["Numbers"],
+ "main": "run.js",
+ "flags": ["--allow-natives-syntax"],
+ "resources": [ "toNumber.js"],
+ "results_regexp": "^%s\\-Numbers\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Constructor"},
+ {"name": "UnaryPlus"},
+ {"name": "ParseFloat"}
+ ]
+ },
+ {
+ "name": "TurboFan",
+ "path": ["TurboFan"],
+ "main": "run.js",
+ "flags": [],
+ "resources": [ "typedLowering.js"],
+ "results_regexp": "^%s\\-TurboFan\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "NumberToString"}
]
}
]
diff --git a/deps/v8/test/js-perf-test/Numbers/run.js b/deps/v8/test/js-perf-test/Numbers/run.js
new file mode 100644
index 0000000000..cdfbf25a70
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Numbers/run.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+load('../base.js');
+load('toNumber.js');
+
+function PrintResult(name, result) {
+ console.log(name);
+ console.log(name + '-Numbers(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+}
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError });
diff --git a/deps/v8/test/js-perf-test/Numbers/toNumber.js b/deps/v8/test/js-perf-test/Numbers/toNumber.js
new file mode 100644
index 0000000000..44382f092b
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Numbers/toNumber.js
@@ -0,0 +1,25 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+const A = [undefined, 12, "123"];
+
+function NumberConstructor() {
+ Number.isNaN(Number(A[0]))
+ Number.isNaN(Number(A[1]))
+ Number.isNaN(Number(A[2]))
+}
+createSuite('Constructor', 1000, NumberConstructor, ()=>{});
+
+function NumberPlus() {
+ Number.isNaN(+(A[0]))
+ Number.isNaN(+(A[1]))
+ Number.isNaN(+(A[2]))
+}
+createSuite('UnaryPlus', 1000, NumberPlus, ()=>{});
+
+function NumberParseFloat() {
+ Number.isNaN(parseFloat(A[0]))
+ Number.isNaN(parseFloat(A[1]))
+ Number.isNaN(parseFloat(A[2]))
+}
+createSuite('ParseFloat', 1000, NumberParseFloat, ()=>{});
diff --git a/deps/v8/test/js-perf-test/Parsing/arrowfunctions.js b/deps/v8/test/js-perf-test/Parsing/arrowfunctions.js
new file mode 100644
index 0000000000..bee4ef8b30
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Parsing/arrowfunctions.js
@@ -0,0 +1,60 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+new BenchmarkSuite("ArrowFunctionShort", [1000], [
+ new Benchmark("ArrowFunctionShort", false, true, iterations, Run, ArrowFunctionShortSetup)
+]);
+
+new BenchmarkSuite("ArrowFunctionLong", [1000], [
+ new Benchmark("ArrowFunctionLong", false, true, iterations, Run, ArrowFunctionLongSetup)
+]);
+
+new BenchmarkSuite("CommaSepExpressionListShort", [1000], [
+ new Benchmark("CommaSepExpressionListShort", false, true, iterations, Run, CommaSepExpressionListShortSetup)
+]);
+
+new BenchmarkSuite("CommaSepExpressionListLong", [1000], [
+ new Benchmark("CommaSepExpressionListLong", false, true, iterations, Run, CommaSepExpressionListLongSetup)
+]);
+
+new BenchmarkSuite("CommaSepExpressionListLate", [1000], [
+ new Benchmark("CommaSepExpressionListLate", false, true, iterations, Run, CommaSepExpressionListLateSetup)
+]);
+
+new BenchmarkSuite("FakeArrowFunction", [1000], [
+ new Benchmark("FakeArrowFunction", false, true, iterations, Run, FakeArrowFunctionSetup)
+]);
+
+function ArrowFunctionShortSetup() {
+ code = "let a;\n" + "a = (a,b) => { return a+b; }\n".repeat(100)
+}
+
+function ArrowFunctionLongSetup() {
+ code = "let a;\n" + "a = (a,b,c,d,e,f,g,h,i,j) => { return a+b; }\n".repeat(100)
+}
+
+function CommaSepExpressionListShortSetup() {
+ code = "let a;\n" + "a = (a,1)\n".repeat(100)
+}
+
+function CommaSepExpressionListLongSetup() {
+ code = "let a; let b; let c;\n" + "a = (a,2,3,4,5,b,c,1,7,1)\n".repeat(100)
+}
+
+function CommaSepExpressionListLateSetup() {
+ code = "let a; let b; let c; let d; let e; let f; let g; let h; let i;\n"
+ + "a = (a,b,c,d,e,f,g,h,i,1)\n".repeat(100)
+}
+
+function FakeArrowFunctionSetup() {
+ code = "let a; let b; let c; let d; let e; let f; let g; let h; let i; let j;\n"
+ + "a = (a,b,c,d,e,f,g,h,i,j)\n".repeat(100)
+}
+
+function Run() {
+ if (code == undefined) {
+ throw new Error("No test data");
+ }
+ eval(code);
+}
diff --git a/deps/v8/test/js-perf-test/Parsing/comments.js b/deps/v8/test/js-perf-test/Parsing/comments.js
index e5dbab1c75..5d6b70f085 100644
--- a/deps/v8/test/js-perf-test/Parsing/comments.js
+++ b/deps/v8/test/js-perf-test/Parsing/comments.js
@@ -2,22 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-const iterations = 100;
-
-new BenchmarkSuite('OneLineComment', [1000], [
- new Benchmark('OneLineComment', false, true, iterations, Run, OneLineCommentSetup)
+new BenchmarkSuite("OneLineComment", [1000], [
+ new Benchmark("OneLineComment", false, true, iterations, Run, OneLineCommentSetup)
]);
-new BenchmarkSuite('OneLineComments', [1000], [
- new Benchmark('OneLineComments', false, true, iterations, Run, OneLineCommentsSetup)
+new BenchmarkSuite("OneLineComments", [1000], [
+ new Benchmark("OneLineComments", false, true, iterations, Run, OneLineCommentsSetup)
]);
-new BenchmarkSuite('MultiLineComment', [1000], [
- new Benchmark('MultiLineComment', false, true, iterations, Run, MultiLineCommentSetup)
+new BenchmarkSuite("MultiLineComment", [1000], [
+ new Benchmark("MultiLineComment", false, true, iterations, Run, MultiLineCommentSetup)
]);
-let code;
-
function OneLineCommentSetup() {
code = "//" + " This is a comment... ".repeat(600);
%FlattenString(code);
diff --git a/deps/v8/test/js-perf-test/Parsing/run.js b/deps/v8/test/js-perf-test/Parsing/run.js
index e6531af5bb..d699af2eff 100644
--- a/deps/v8/test/js-perf-test/Parsing/run.js
+++ b/deps/v8/test/js-perf-test/Parsing/run.js
@@ -3,14 +3,19 @@
// found in the LICENSE file.
-load('../base.js');
+load("../base.js");
-load('comments.js');
+const iterations = 100;
+let code;
+
+load("comments.js");
+load("strings.js");
+load("arrowfunctions.js")
var success = true;
function PrintResult(name, result) {
- print(name + '-Parsing(Score): ' + result);
+ print(name + "-Parsing(Score): " + result);
}
diff --git a/deps/v8/test/js-perf-test/Parsing/strings.js b/deps/v8/test/js-perf-test/Parsing/strings.js
new file mode 100644
index 0000000000..63eef38701
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Parsing/strings.js
@@ -0,0 +1,37 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+new BenchmarkSuite("SingleLineString", [1000], [
+ new Benchmark("SingleLineString", false, true, iterations, Run, SingleLineStringSetup)
+]);
+
+new BenchmarkSuite("SingleLineStrings", [3000], [
+ new Benchmark("SingleLineStrings", false, true, iterations, Run, SingleLineStringsSetup)
+]);
+
+new BenchmarkSuite("MultiLineString", [1000], [
+ new Benchmark("MultiLineString", false, true, iterations, Run, MultiLineStringSetup)
+]);
+
+function SingleLineStringSetup() {
+ code = "\"" + "This is a string".repeat(600) + "\"";
+ %FlattenString(code);
+}
+
+function SingleLineStringsSetup() {
+ code = "\"This is a string\"\n".repeat(600);
+ %FlattenString(code);
+}
+
+function MultiLineStringSetup() {
+ code = "\"" + "This is a string \\\n".repeat(600) + "\"";
+ %FlattenString(code);
+}
+
+function Run() {
+ if (code == undefined) {
+ throw new Error("No test data");
+ }
+ eval(code);
+}
diff --git a/deps/v8/test/js-perf-test/StringIterators/string-iterator.js b/deps/v8/test/js-perf-test/StringIterators/string-iterator.js
index c55925415b..8a7e323b42 100644
--- a/deps/v8/test/js-perf-test/StringIterators/string-iterator.js
+++ b/deps/v8/test/js-perf-test/StringIterators/string-iterator.js
@@ -2,37 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-function assert(expression, message) {
- if (typeof expression === "string" && message === void 0) {
- message = expression;
- expression = eval(expression);
- }
-
- if (!expression) {
- var lines = ["Benchmark Error"];
- if (message !== void 0) {
- lines = ["Benchmark Error:", String(message)];
- }
- throw new Error(lines.join("\n"));
- }
- return true;
-}
-
-assert.same = function(expected, actual, message) {
- var isSame =
- expected === actual || typeof expected !== expected && actual !== actual;
- if (!isSame) {
- var details = `Expected: ${String(expected)}\n` +
- `But found: ${String(actual)}`;
- var lines = ["Benchmark Error:", details];
- if (message !== void 0) {
- lines = ["Benchmark Error:", details, "", String(message)];
- }
- throw new Error(lines.join("\n"));
- }
- return true;
-}
-
new BenchmarkSuite('Spread_OneByteShort', [1000], [
new Benchmark('test', false, false, 0,
Spread_OneByteShort, Spread_OneByteShortSetup,
@@ -52,8 +21,8 @@ function Spread_OneByteShort() {
function Spread_OneByteShortTearDown() {
var expected = "A|l|p|h|a|b|e|t|-|S|o|u|p";
- return assert("Array.isArray(result)")
- && assert.same(expected, result.join("|"));
+ return assert(Array.isArray(result))
+ && assertEquals(expected, result.join("|"));
}
// ----------------------------------------------------------------------------
@@ -75,8 +44,8 @@ function Spread_TwoByteShort() {
function Spread_TwoByteShortTearDown() {
var expected = "\u5FCD|\u8005|\u306E|\u653B|\u6483";
- return assert("Array.isArray(result)")
- && assert.same(expected, result.join("|"));
+ return assert(Array.isArray(result))
+ && assertEquals(expected, result.join("|"));
}
// ----------------------------------------------------------------------------
@@ -100,8 +69,8 @@ function Spread_WithSurrogatePairsShort() {
function Spread_WithSurrogatePairsShortTearDown() {
var expected =
"\uD83C\uDF1F|\u5FCD|\u8005|\u306E|\u653B|\u6483|\uD83C\uDF1F";
- return assert("Array.isArray(result)")
- && assert.same(expected, result.join("|"));
+ return assert(Array.isArray(result))
+ && assertEquals(expected, result.join("|"));
}
// ----------------------------------------------------------------------------
@@ -123,7 +92,7 @@ function ForOf_OneByteShort() {
}
function ForOf_OneByteShortTearDown() {
- return assert.same(string, result);
+ return assertEquals(string, result);
}
// ----------------------------------------------------------------------------
@@ -145,7 +114,7 @@ function ForOf_TwoByteShort() {
}
function ForOf_TwoByteShortTearDown() {
- return assert.same(string, result);
+ return assertEquals(string, result);
}
// ----------------------------------------------------------------------------
@@ -168,7 +137,7 @@ function ForOf_WithSurrogatePairsShort() {
}
function ForOf_WithSurrogatePairsShortTearDown() {
- return assert.same(string, result);
+ return assertEquals(string, result);
}
// ----------------------------------------------------------------------------
@@ -190,7 +159,7 @@ function ForOf_OneByteLong() {
}
function ForOf_OneByteLongTearDown() {
- return assert.same(string, result);
+ return assertEquals(string, result);
}
// ----------------------------------------------------------------------------
@@ -212,7 +181,7 @@ function ForOf_TwoByteLong() {
}
function ForOf_TwoByteLongTearDown() {
- return assert.same(string, result);
+ return assertEquals(string, result);
}
// ----------------------------------------------------------------------------
@@ -235,5 +204,5 @@ function ForOf_WithSurrogatePairsLong() {
}
function ForOf_WithSurrogatePairsLongTearDown() {
- return assert.same(string, result);
+ return assertEquals(string, result);
}
diff --git a/deps/v8/test/js-perf-test/TurboFan/run.js b/deps/v8/test/js-perf-test/TurboFan/run.js
new file mode 100644
index 0000000000..3415262317
--- /dev/null
+++ b/deps/v8/test/js-perf-test/TurboFan/run.js
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+load("../base.js");
+
+const iterations = 100;
+
+load("typedLowering.js");
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + "-TurboFan(Score): " + result);
+}
+
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError });
diff --git a/deps/v8/test/js-perf-test/TurboFan/typedLowering.js b/deps/v8/test/js-perf-test/TurboFan/typedLowering.js
new file mode 100644
index 0000000000..d2ce15cc6e
--- /dev/null
+++ b/deps/v8/test/js-perf-test/TurboFan/typedLowering.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function NumberToString() {
+ var ret;
+ var num = 10240;
+ var obj = {};
+
+ for ( var i = 0; i < num; i++ )
+ ret = obj["test" + num];
+}
+createSuite('NumberToString', 1000, NumberToString);
diff --git a/deps/v8/test/js-perf-test/base.js b/deps/v8/test/js-perf-test/base.js
index 61089f039f..ca25789488 100644
--- a/deps/v8/test/js-perf-test/base.js
+++ b/deps/v8/test/js-perf-test/base.js
@@ -373,3 +373,23 @@ BenchmarkSuite.prototype.RunStep = function(runner) {
// Start out running the setup.
return RunNextSetup();
}
+
+
+
+function assert(condition, message) {
+ if (!condition) throw Error(message);
+}
+
+
+function assertEquals(expected, actual, message) {
+ var isSame =
+ expected === actual || typeof expected !== expected && actual !== actual;
+ if (isSame) return true;
+ var details = `Expected: ${String(expected)}\n` +
+ `But found: ${String(actual)}`;
+ var lines = ["Benchmark Error:", details];
+ if (message !== undefined) {
+ lines = ["Benchmark Error:", details, "", String(message)];
+ }
+ throw new Error(lines.join("\n"));
+}
diff --git a/deps/v8/test/message/asm-linking-bogus-heap.out b/deps/v8/test/message/asm-linking-bogus-heap.out
index 5a324c1fea..b0af630e57 100644
--- a/deps/v8/test/message/asm-linking-bogus-heap.out
+++ b/deps/v8/test/message/asm-linking-bogus-heap.out
@@ -2,4 +2,4 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-*%(basename)s:7: Linking failure in asm.js: Unexpected heap size
+*%(basename)s:7: Linking failure in asm.js: Invalid heap size
diff --git a/deps/v8/test/message/fail/non-alphanum.out b/deps/v8/test/message/fail/non-alphanum.out
index 34464efb74..3a147dd9a2 100644
--- a/deps/v8/test/message/fail/non-alphanum.out
+++ b/deps/v8/test/message/fail/non-alphanum.out
@@ -2,5 +2,5 @@
([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]])([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(![]+[])[+!+[]]]((![]+[])[+!+[]])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][[]]+[])[+[]]+(+[![]]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+!+[]]]+[][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]]((![]+[])[+!+[]]+(+[![]]+[])[+[]])[+[]]+(![]+[])[+!+[]]+(+[]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[!+[]+!+[]+!+[]+[+[]]]+(!![]+[])[!+[]+!+[]+!+[]]+(+[![]]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+!+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+([][[]]+[])[+!+[]]+(+[![]]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+!+[]]]+([]+([]+[])[([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][[]]+[])[+!+[]]+(![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[+!+[]]+([][[]]+[])[+[]]+([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]])[+!+[]+[!+[]+!+[]+!+[]+!+[]]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+[][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]]((![]+[])[+!+[]]+[+[]])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+([][[]]+[])[!+[]+!+[]]+[][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]]((+(+!+[]+(!+[]+[])[!+[]+!+[]+!+[]]+[+!+[]]+[+[]]+[+[]]+[+[]])+[])[+[]]+(![]+[])[+[]])[+[]])
^
TypeError: Cannot convert undefined or null to object
- at sort (native)
+ at sort (<anonymous>)
at *%(basename)s:34:410
diff --git a/deps/v8/test/message/wasm-trace-memory-interpreted.out b/deps/v8/test/message/wasm-trace-memory-interpreted.out
index 7865195649..248d67e827 100644
--- a/deps/v8/test/message/wasm-trace-memory-interpreted.out
+++ b/deps/v8/test/message/wasm-trace-memory-interpreted.out
@@ -1,9 +1,9 @@
-I 0+0x3 load @00000004 i32:0 / 00000000
-I 1+0x3 load @00000001 i8:0 / 00
-I 3+0x5 store @00000004 i32:305419896 / 12345678
-I 0+0x3 load @00000002 i32:1450704896 / 56780000
-I 1+0x3 load @00000006 i8:52 / 34
-I 2+0x3 load @00000002 f32:68169720922112.000000 / 56780000
-I 4+0x5 store @00000004 i8:171 / ab
-I 0+0x3 load @00000002 i32:1454047232 / 56ab0000
-I 2+0x3 load @00000002 f32:94008244174848.000000 / 56ab0000
+interpreter func: 0+0x3 load from 00000004 val: i32:0 / 00000000
+interpreter func: 1+0x3 load from 00000001 val: i8:0 / 00
+interpreter func: 3+0x5 store to 00000004 val: i32:305419896 / 12345678
+interpreter func: 0+0x3 load from 00000002 val: i32:1450704896 / 56780000
+interpreter func: 1+0x3 load from 00000006 val: i8:52 / 34
+interpreter func: 2+0x3 load from 00000002 val: f32:68169720922112.000000 / 56780000
+interpreter func: 4+0x5 store to 00000004 val: i8:171 / ab
+interpreter func: 0+0x3 load from 00000002 val: i32:1454047232 / 56ab0000
+interpreter func: 2+0x3 load from 00000002 val: f32:94008244174848.000000 / 56ab0000
diff --git a/deps/v8/test/message/wasm-trace-memory-liftoff.out b/deps/v8/test/message/wasm-trace-memory-liftoff.out
index 26f22a5498..31fdefde3d 100644
--- a/deps/v8/test/message/wasm-trace-memory-liftoff.out
+++ b/deps/v8/test/message/wasm-trace-memory-liftoff.out
@@ -1,9 +1,9 @@
-L 0+0x3 load @00000004 i32:0 / 00000000
-L 1+0x3 load @00000001 i8:0 / 00
-L 3+0x5 store @00000004 i32:305419896 / 12345678
-L 0+0x3 load @00000002 i32:1450704896 / 56780000
-L 1+0x3 load @00000006 i8:52 / 34
-L 2+0x3 load @00000002 f32:68169720922112.000000 / 56780000
-L 4+0x5 store @00000004 i8:171 / ab
-L 0+0x3 load @00000002 i32:1454047232 / 56ab0000
-L 2+0x3 load @00000002 f32:94008244174848.000000 / 56ab0000
+liftoff func: 0+0x3 load from 00000004 val: i32:0 / 00000000
+liftoff func: 1+0x3 load from 00000001 val: i8:0 / 00
+liftoff func: 3+0x5 store to 00000004 val: i32:305419896 / 12345678
+liftoff func: 0+0x3 load from 00000002 val: i32:1450704896 / 56780000
+liftoff func: 1+0x3 load from 00000006 val: i8:52 / 34
+liftoff func: 2+0x3 load from 00000002 val: f32:68169720922112.000000 / 56780000
+liftoff func: 4+0x5 store to 00000004 val: i8:171 / ab
+liftoff func: 0+0x3 load from 00000002 val: i32:1454047232 / 56ab0000
+liftoff func: 2+0x3 load from 00000002 val: f32:94008244174848.000000 / 56ab0000
diff --git a/deps/v8/test/message/wasm-trace-memory.out b/deps/v8/test/message/wasm-trace-memory.out
index 37faa6a989..bc6b1b64ec 100644
--- a/deps/v8/test/message/wasm-trace-memory.out
+++ b/deps/v8/test/message/wasm-trace-memory.out
@@ -1,9 +1,9 @@
-T 0+0x3 load @00000004 i32:0 / 00000000
-T 1+0x3 load @00000001 i8:0 / 00
-T 3+0x5 store @00000004 i32:305419896 / 12345678
-T 0+0x3 load @00000002 i32:1450704896 / 56780000
-T 1+0x3 load @00000006 i8:52 / 34
-T 2+0x3 load @00000002 f32:68169720922112.000000 / 56780000
-T 4+0x5 store @00000004 i8:171 / ab
-T 0+0x3 load @00000002 i32:1454047232 / 56ab0000
-T 2+0x3 load @00000002 f32:94008244174848.000000 / 56ab0000
+turbofan func: 0+0x3 load from 00000004 val: i32:0 / 00000000
+turbofan func: 1+0x3 load from 00000001 val: i8:0 / 00
+turbofan func: 3+0x5 store to 00000004 val: i32:305419896 / 12345678
+turbofan func: 0+0x3 load from 00000002 val: i32:1450704896 / 56780000
+turbofan func: 1+0x3 load from 00000006 val: i8:52 / 34
+turbofan func: 2+0x3 load from 00000002 val: f32:68169720922112.000000 / 56780000
+turbofan func: 4+0x5 store to 00000004 val: i8:171 / ab
+turbofan func: 0+0x3 load from 00000002 val: i32:1454047232 / 56ab0000
+turbofan func: 2+0x3 load from 00000002 val: f32:94008244174848.000000 / 56ab0000
diff --git a/deps/v8/test/mjsunit/array-constructor-feedback.js b/deps/v8/test/mjsunit/array-constructor-feedback.js
index fb4ad5a19a..44d132820c 100644
--- a/deps/v8/test/mjsunit/array-constructor-feedback.js
+++ b/deps/v8/test/mjsunit/array-constructor-feedback.js
@@ -138,7 +138,6 @@ function assertKind(expected, obj, name_opt) {
%OptimizeFunctionOnNextCall(bar);
b = bar();
assertOptimized(bar);
- %DebugPrint(3);
b[0] = 3.5;
c = bar();
assertKind(elements_kind.fast_smi_only, c);
diff --git a/deps/v8/test/mjsunit/array-iterator-prototype-next.js b/deps/v8/test/mjsunit/array-iterator-prototype-next.js
new file mode 100644
index 0000000000..d17bf2740c
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-iterator-prototype-next.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+/* Test behaviors when the prototype has elements */
+
+(function () {
+ var array = [,];
+
+ function next() {
+ return array[Symbol.iterator]().next();
+ }
+
+ assertEquals(next().value, undefined);
+
+ array.__proto__.push(5);
+ assertEquals(next().value, 5);
+})();
diff --git a/deps/v8/test/mjsunit/array-lastindexof.js b/deps/v8/test/mjsunit/array-lastindexof.js
index 785bd64727..c31ad18b55 100644
--- a/deps/v8/test/mjsunit/array-lastindexof.js
+++ b/deps/v8/test/mjsunit/array-lastindexof.js
@@ -8,3 +8,76 @@ assertThrows(() => {
assertThrows(() => {
Array.prototype.lastIndexOf.call(undefined, 42);
}, TypeError);
+
+/* Tests inspired by test262's
+ lastIndexOf/calls-only-has-on-prototype-after-length-zeroed.js */
+// Stateful fromIndex that tries to empty the array
+(function testFromIndex() {
+ var array = [5, undefined, 7];
+ var fromIndex = {
+ valueOf: function() {
+ array.length = 1;
+ return 2;
+ }
+ };
+ assertEquals(-1, array.lastIndexOf(undefined, fromIndex));
+
+ array = [5, undefined, 7];
+ assertEquals(0, array.lastIndexOf(5, fromIndex));
+})();
+
+// Stateful fromIndex and proxy as Prototype
+// Must test for [[HasProperty]] before [[Get]]
+var testHasProperty = function(value) {
+ var array = [5, undefined, 7];
+ var fromIndex = {
+ valueOf: function() {
+ array.length = 0;
+ return 2;
+ }
+ };
+
+ // Install a prototype that only has [[HasProperty]], and throws on [[Get]]
+ Object.setPrototypeOf(array,
+ new Proxy(Array.prototype, {
+ has: function(t, pk) { return pk in t; },
+ get: function () { throw new Error('[[Get]] trap called') },
+ }));
+
+ assertEquals(-1, Array.prototype.lastIndexOf.call(array, value, fromIndex));
+}
+
+testHasProperty(5);
+testHasProperty(undefined);
+
+// Test call order: [[HasProperty]] before [[Get]]
+var testHasPropertyThenGet = function(value) {
+ var array = [5, , 7];
+ var log = [];
+
+ // Install a prototype with only [[HasProperty]] and [[Get]]
+ Object.setPrototypeOf(array,
+ new Proxy(Array.prototype, {
+ has: function() { log.push("HasProperty"); return true; },
+ get: function() { log.push("Get"); },
+ }));
+ // The 2nd element (index 1) will trigger the calls to the prototype
+ Array.prototype.lastIndexOf.call(array, value);
+ assertEquals(["HasProperty", "Get"], log);
+}
+
+testHasPropertyThenGet(5);
+testHasPropertyThenGet(undefined);
+
+// Test for sparse Arrays
+/* This will not enter the fast path for sparse arrays, due to UseSparseVariant
+ excluding array elements with accessors */
+(function() {
+ var array = new Array(10000);
+ array[0] = 5; array[9999] = 7;
+
+ var count = 0;
+ Object.defineProperty(array.__proto__, 9998, { get: () => ++count });
+ Array.prototype.lastIndexOf.call(array, 0);
+ assertEquals(1,count);
+})();
diff --git a/deps/v8/test/mjsunit/array-prototype-every.js b/deps/v8/test/mjsunit/array-prototype-every.js
new file mode 100644
index 0000000000..a9e415e084
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-prototype-every.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/* Test behaviors when the prototype has elements */
+
+// every
+
+(function () {
+ var array = [,];
+
+ function every() {
+ return array.every(v => v > 0);
+ }
+
+ assertEquals(every(), true);
+
+ array.__proto__.push(-6);
+ assertEquals(every(), false);
+})();
diff --git a/deps/v8/test/mjsunit/array-prototype-filter.js b/deps/v8/test/mjsunit/array-prototype-filter.js
new file mode 100644
index 0000000000..958df23a26
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-prototype-filter.js
@@ -0,0 +1,22 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/* Test behaviors when the prototype has elements */
+
+// filter
+
+(function () {
+ var array = [,];
+
+ function filter() {
+ return array.filter(v => v > 0);
+ }
+
+ assertEquals(filter(), []);
+
+ array.__proto__.push(6);
+ var narr = filter();
+ assertNotEquals(Object.getOwnPropertyDescriptor(narr, 0), undefined);
+ assertEquals(narr, [6]);
+})();
diff --git a/deps/v8/test/mjsunit/array-prototype-find.js b/deps/v8/test/mjsunit/array-prototype-find.js
new file mode 100644
index 0000000000..8847fddd63
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-prototype-find.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/* Test behaviors when the prototype has elements */
+
+// find
+
+(function () {
+ var array = [,];
+
+ function find() {
+ return array.find(v => v > 0);
+ }
+
+ assertEquals(find(), undefined);
+
+ array.__proto__.push(6);
+ assertEquals(find(), 6);
+})();
diff --git a/deps/v8/test/mjsunit/array-prototype-findindex.js b/deps/v8/test/mjsunit/array-prototype-findindex.js
new file mode 100644
index 0000000000..5d627ca043
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-prototype-findindex.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/* Test behaviors when the prototype has elements */
+
+// findIndex
+
+(function () {
+ var array = [,];
+
+ function findIndex() {
+ return array.findIndex(v => v > 0);
+ }
+
+ assertEquals(findIndex(), -1);
+
+ array.__proto__.push(6);
+ assertEquals(findIndex(), 0);
+})();
diff --git a/deps/v8/test/mjsunit/array-prototype-foreach.js b/deps/v8/test/mjsunit/array-prototype-foreach.js
new file mode 100644
index 0000000000..7f01d8ef65
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-prototype-foreach.js
@@ -0,0 +1,26 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/* Test behaviors when the prototype has elements */
+
+// forEach
+
+(function () {
+ var array = [,];
+
+ function increment(v, k, arr) { arr[k] = v + 1; }
+ function forEach() {
+ array.forEach(increment);
+ }
+
+ forEach();
+ assertEquals(array, [,]);
+
+ // behavior from the prototype
+ array.__proto__.push(5);
+ assertEquals(Object.getOwnPropertyDescriptor(array, 0), undefined);
+ forEach();
+ assertNotEquals(Object.getOwnPropertyDescriptor(array, 0), undefined);
+ assertEquals(array[0], 6);
+})();
diff --git a/deps/v8/test/mjsunit/array-prototype-includes.js b/deps/v8/test/mjsunit/array-prototype-includes.js
new file mode 100644
index 0000000000..5c1d926372
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-prototype-includes.js
@@ -0,0 +1,36 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/* Test behaviors when the prototype has elements */
+
+// includes
+
+(function() {
+ const iarr = [,3];
+
+ function includes(arr, val) {
+ return arr.includes(val);
+ }
+
+ assertFalse(includes(iarr, 2));
+ assertTrue(includes(iarr, 3));
+
+ iarr.__proto__ = [2];
+ assertTrue(includes(iarr, 2));
+})();
+
+// This pollutes the Array prototype, so we should not run more tests
+// in the same environment after this.
+(function () {
+ var array = [,];
+
+ function includes(val) {
+ return array.includes(val);
+ }
+
+ assertFalse(includes(6));
+
+ array.__proto__.push(6);
+ assertTrue(includes(6));
+})();
diff --git a/deps/v8/test/mjsunit/array-prototype-indexof.js b/deps/v8/test/mjsunit/array-prototype-indexof.js
new file mode 100644
index 0000000000..ca639f4da2
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-prototype-indexof.js
@@ -0,0 +1,36 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// indexOf
+
+/* Test behaviors when the prototype has elements */
+
+(function() {
+ const iarr = [,3];
+
+ function indexOf(arr, val) {
+ return arr.indexOf(val);
+ }
+
+ assertEquals(-1, indexOf(iarr, 2));
+ assertEquals(1, indexOf(iarr, 3));
+
+ iarr.__proto__ = [2];
+ assertEquals(0, indexOf(iarr, 2));
+})();
+
+// This pollutes the Array prototype, so we should not run more tests
+// in the same environment after this.
+(function () {
+ var array = [,];
+
+ function indexOf(val) {
+ return array.indexOf(val);
+ }
+
+ assertEquals(indexOf(6), -1);
+
+ array.__proto__.push(6);
+ assertEquals(indexOf(6), 0);
+})();
diff --git a/deps/v8/test/mjsunit/array-prototype-lastindexof.js b/deps/v8/test/mjsunit/array-prototype-lastindexof.js
new file mode 100644
index 0000000000..b0345301c9
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-prototype-lastindexof.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/* Test behaviors when the prototype has elements */
+
+// lastIndexOf
+
+(function () {
+ var array = [,];
+
+ function lastIndexOf(val) {
+ return array.lastIndexOf(val);
+ }
+
+ assertEquals(lastIndexOf(6), -1);
+
+ array.__proto__.push(6);
+ assertEquals(lastIndexOf(6), 0);
+})();
diff --git a/deps/v8/test/mjsunit/array-prototype-map.js b/deps/v8/test/mjsunit/array-prototype-map.js
new file mode 100644
index 0000000000..1ef6088669
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-prototype-map.js
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/* Test behaviors when the prototype has elements */
+
+// map
+(function () {
+ var array = [,];
+
+ function map() {
+ return array.map(x => x + 1);
+ }
+
+ assertEquals(map(), [,]);
+
+ array.__proto__.push(5);
+ var narr = map();
+ assertNotEquals(Object.getOwnPropertyDescriptor(narr, 0), undefined);
+ assertEquals(narr[0], 6);
+})();
diff --git a/deps/v8/test/mjsunit/array-prototype-pop.js b/deps/v8/test/mjsunit/array-prototype-pop.js
new file mode 100644
index 0000000000..730b98255a
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-prototype-pop.js
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/* Test behaviors when the prototype has elements */
+
+// pop
+
+(function () {
+ var array = [,];
+
+ function pop() {
+ return array.pop();
+ }
+
+ assertEquals(pop(), undefined);
+})();
+
+
+(function () {
+ var array = [,];
+
+ function pop() {
+ return array.pop();
+ }
+
+ array.__proto__.push(6);
+ assertEquals(pop(), 6);
+})();
diff --git a/deps/v8/test/mjsunit/array-prototype-reduce.js b/deps/v8/test/mjsunit/array-prototype-reduce.js
new file mode 100644
index 0000000000..593066f897
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-prototype-reduce.js
@@ -0,0 +1,24 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/* Test behaviors when the prototype has elements */
+
+// reduce
+
+(function () {
+ var array = [, 3];
+
+ function accumulate (prev, cur, curIdx, arr) { arr[curIdx] = cur + prev; }
+ function reduce() {
+ array.reduce(accumulate);
+ }
+
+ reduce();
+ assertEquals(array, [,3]);
+
+ array.__proto__.push(3);
+ reduce();
+ assertEquals(array, [, 6]);
+ assertEquals(Object.getOwnPropertyDescriptor(array, 0), undefined);
+})();
diff --git a/deps/v8/test/mjsunit/array-prototype-slice.js b/deps/v8/test/mjsunit/array-prototype-slice.js
new file mode 100644
index 0000000000..d4bcad277d
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-prototype-slice.js
@@ -0,0 +1,21 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/* Test behaviors when the prototype has elements */
+
+// slice
+(function () {
+ var array = [,];
+
+ function slice() {
+ return array.slice();
+ }
+
+ assertEquals(slice(), [,]);
+
+ array.__proto__.push(5);
+ var narr = slice();
+ assertNotEquals(Object.getOwnPropertyDescriptor(narr, 0), undefined);
+ assertEquals(narr[0], 5);
+})();
diff --git a/deps/v8/test/mjsunit/array-prototype-some.js b/deps/v8/test/mjsunit/array-prototype-some.js
new file mode 100644
index 0000000000..2118444168
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-prototype-some.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/* Test behaviors when the prototype has elements */
+
+// some
+
+(function () {
+ var array = [,];
+
+ function some() {
+ return array.some(v => v > 0);
+ }
+
+ assertEquals(some(), false);
+
+ array.__proto__.push(6);
+ assertEquals(some(), true);
+})();
diff --git a/deps/v8/test/mjsunit/array-reverse.js b/deps/v8/test/mjsunit/array-reverse.js
new file mode 100644
index 0000000000..11aeb60cac
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-reverse.js
@@ -0,0 +1,70 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertArrayEquals([], [].reverse());
+assertArrayEquals([8, 6, 4, 2], [2, 4, 6, 8].reverse());
+assertArrayEquals([0.8, 0.6, 0.4], [0.4, 0.6, 0.8].reverse());
+assertArrayEquals(["str4", "str3", "str2"], ["str2", "str3", "str4"].reverse());
+
+assertArrayEquals([4,3,,1], [1,,3,4].reverse());
+assertArrayEquals([4,,2,1], [1,2,,4].reverse());
+assertArrayEquals([5,,3,,1], [1,,3,,5].reverse());
+
+function TestReverseWithObject() {
+ let obj = { length: 5 };
+ obj[0] = "foo";
+ obj[3] = "bar";
+ Array.prototype.reverse.call(obj);
+
+ assertArrayEquals([,"bar",,,"foo"], obj);
+}
+TestReverseWithObject();
+
+function TestReverseWithPrototypeChain() {
+ let proto = { 0: "foo", 19: "bar" };
+ let obj = { length: 20, 5: "foobar", __proto__: proto };
+ Array.prototype.reverse.call(obj);
+
+ assertEquals("bar", obj[0]);
+ assertEquals("foobar", obj[14]);
+ assertEquals("foo", obj[19]);
+}
+TestReverseWithPrototypeChain();
+
+function TestReverseWithTypedArrays() {
+ const constructors = [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Uint32Array,
+ Int32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array
+ ];
+
+ for (const constructor of constructors) {
+ const array_odd = new constructor([1, 2, 3]);
+ Array.prototype.reverse.call(array_odd);
+ assertArrayEquals([3, 2, 1], array_odd, constructor);
+
+ const array_even = new constructor([1, 2, 3, 4]);
+ Array.prototype.reverse.call(array_even);
+ assertArrayEquals([4, 3, 2, 1], array_even, constructor);
+
+ // Array.prototype.reverse respects shadowing length on TypedArrays.
+ const array = new constructor([1, 2, 3, 4]);
+ Object.defineProperty(array, 'length', {value: 2});
+ Array.prototype.reverse.call(array);
+
+ assertArrayEquals([2, 1], array, constructor);
+
+ const array_shadowed_length = new constructor([1, 2]);
+ Object.defineProperty(array_shadowed_length, 'length', {value: 5});
+
+ assertThrows(() => Array.prototype.reverse.call(array_shadowed_length));
+ }
+}
+TestReverseWithTypedArrays();
diff --git a/deps/v8/test/mjsunit/asm/asm-heap.js b/deps/v8/test/mjsunit/asm/asm-heap.js
new file mode 100644
index 0000000000..d0d190a73d
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/asm-heap.js
@@ -0,0 +1,99 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax --expose-gc --mock-arraybuffer-allocator
+
+let gCounter = 1000;
+let gMinHeap = new ArrayBuffer(1 << 12);
+let gStdlib = {Uint8Array: Uint8Array};
+
+// The template of asm.js modules used in this test.
+function Template(stdlib, ffi, heap) {
+ "use asm";
+ var MEM8 = new stdlib.Uint8Array(heap);
+ function foo() { return VAL; }
+ return { foo: foo };
+}
+
+// Create a fresh module each time.
+function NewModule() {
+ // Use eval() to get a unique module each time.
+ let val = gCounter++;
+ let string = (Template + "; Template").replace("VAL", "" + val);
+// print(string);
+ let module = eval(string);
+// print(module);
+ module(gStdlib, {}, gMinHeap);
+ assertTrue(%IsAsmWasmCode(module));
+ return {module: module, val: val};
+}
+
+(function TestValid_PowerOfTwo() {
+ print("TestValid_PowerOfTwo...");
+ let r = NewModule();
+ for (let i = 12; i <= 24; i++) {
+ gc(); // Likely OOM otherwise.
+ let size = 1 << i;
+ print(" size=" + size);
+ let heap = new ArrayBuffer(size);
+ var instance = r.module(gStdlib, {}, heap);
+ assertTrue(%IsAsmWasmCode(r.module));
+ assertEquals(r.val, instance.foo());
+ }
+})();
+
+(function TestValid_Multiple() {
+ print("TestValid_Multiple...");
+ let r = NewModule();
+ for (let i = 1; i < 47; i += 7) {
+ gc(); // Likely OOM otherwise.
+ let size = i * (1 << 24);
+ print(" size=" + size);
+ let heap = new ArrayBuffer(size);
+ var instance = r.module(gStdlib, {}, heap);
+ assertTrue(%IsAsmWasmCode(r.module));
+ assertEquals(r.val, instance.foo());
+ }
+})();
+
+(function TestInvalid_TooSmall() {
+ print("TestInvalid_TooSmall...");
+ for (let i = 1; i < 12; i++) {
+ let size = 1 << i;
+ print(" size=" + size);
+ let r = NewModule();
+ let heap = new ArrayBuffer(size);
+ var instance = r.module(gStdlib, {}, heap);
+ assertFalse(%IsAsmWasmCode(r.module));
+ assertEquals(r.val, instance.foo());
+ }
+})();
+
+(function TestInValid_NonPowerOfTwo() {
+ print("TestInvalid_NonPowerOfTwo...");
+ for (let i = 12; i <= 24; i++) {
+ gc(); // Likely OOM otherwise.
+ let size = 1 + (1 << i);
+ print(" size=" + size);
+ let r = NewModule();
+ let heap = new ArrayBuffer(size);
+ var instance = r.module(gStdlib, {}, heap);
+ assertFalse(%IsAsmWasmCode(r.module));
+ assertEquals(r.val, instance.foo());
+ }
+})();
+
+(function TestInValid_NonMultiple() {
+ print("TestInvalid_NonMultiple...");
+ for (let i = (1 << 24); i < (1 << 25); i += (1 << 22)) {
+ gc(); // Likely OOM otherwise.
+ let size = i + (1 << 20);
+ print(" size=" + size);
+ let r = NewModule();
+ let heap = new ArrayBuffer(size);
+ var instance = r.module(gStdlib, {}, heap);
+ assertFalse(%IsAsmWasmCode(r.module));
+ assertEquals(r.val, instance.foo());
+ }
+})();
diff --git a/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js b/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js
index 8699346bf2..3ddff992f7 100644
--- a/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js
+++ b/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js
@@ -3,6 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --opt --no-always-opt
+// Flags: --no-stress-background-compile
let id = 0;
diff --git a/deps/v8/test/mjsunit/compiler/array-slice-clone.js b/deps/v8/test/mjsunit/compiler/array-slice-clone.js
new file mode 100644
index 0000000000..610fa17c1a
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/array-slice-clone.js
@@ -0,0 +1,365 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+
+// Test CloneFastJSArray inserted by JSCallReducer for Array.prototype.slice.
+// CloneFastJSArray produces COW arrays if the original array is COW.
+
+// Trigger JSCallReducer on slice() and slice(0)
+(function() {
+ const arr = [1,2,3,4,5];
+
+ function slice() {
+ return arr.slice();
+ }
+
+ function slice0() {
+ return arr.slice(0);
+ }
+
+ assertEquals(arr, slice());
+ assertFalse(arr === slice());
+ assertEquals(slice(), slice0());
+ assertEquals(slice0(), slice());
+
+ %OptimizeFunctionOnNextCall(slice0);
+ %OptimizeFunctionOnNextCall(slice);
+
+ assertEquals(slice(), slice0());
+ assertOptimized(slice); assertOptimized(slice0);
+})();
+
+// This will cause deopt of slice by a CheckMap installed by
+// JSNativeContextSpecialization::ReduceNamedAccess
+(function() {
+ const arr = [1,2,3,4,5];
+
+ function slice() {
+ return arr.slice();
+ }
+
+ assertEquals(arr, slice());
+ assertEquals(slice(), arr);
+
+ %OptimizeFunctionOnNextCall(slice);
+ slice();
+
+ // Trigger deopt here
+ arr.push(7.2);
+ assertEquals(slice()[5], 7.2);
+})();
+
+// There should not be a deopt cycle.
+(function() {
+ const arr = [1,2,3,4,5];
+
+ function slice() {
+ return arr.slice();
+ }
+
+ assertEquals(arr, slice());
+ assertEquals(slice(), arr);
+
+ %OptimizeFunctionOnNextCall(slice);
+ // Trigger opt
+ assertEquals(slice(), arr);
+
+ // Trigger deopt by CheckMap from JSNativeContextSpecialization
+ arr.push(7.2);
+ slice();
+
+ %OptimizeFunctionOnNextCall(slice);
+ // Trigger opt again
+ slice();
+
+ // Should not deopt again
+ arr.push(8.2);
+ slice();
+ assertOptimized(slice);
+})();
+
+// JSCallReducer will not reduce because the species has been modified
+(function() {
+ const array = [3,4,5];
+
+ function slice(){
+ return array.slice();
+ }
+
+ class MyArray extends Array {};
+ array.constructor = MyArray;
+
+ slice(); slice();
+
+ %OptimizeFunctionOnNextCall(slice);
+ var narr = slice();
+ assertInstanceof(narr, MyArray);
+})();
+
+(function() {
+ const array = [3,4,5];
+
+ function slice(){
+ return array.slice();
+ }
+
+ slice(); slice();
+
+ %OptimizeFunctionOnNextCall(slice);
+
+ slice();
+
+ class MyArray extends Array {};
+ array.constructor = MyArray;
+ // deopt
+ var narr = slice();
+ // if not deopt, narr will be instanceof Array
+ assertTrue(narr instanceof MyArray);
+})();
+
+// JSCallReducer adds check for UnreliableReceiverMaps
+(function() {
+ const arr = [1,2,3,4,5];
+
+ function slice() {
+ return arr.slice();
+ }
+
+ slice(); slice();
+ arr.foo = 6.2;
+
+ %OptimizeFunctionOnNextCall(slice);
+ // JSCallReducer will add check for UnreliableReceiverMaps
+ slice();
+
+ // Trigger deopt because of DependOnStableMaps
+ // installed by JSNativeContextSpecialization,
+ // but not the check installed by ReduceArrayPrototypeSlice itself
+ arr.bar = 7.2;
+
+ let narr = slice();
+ assertEquals(arr, narr);
+ assertEquals(narr.foo, undefined);
+ assertEquals(narr.bar, undefined);
+})();
+
+// Multiple maps
+(function() {
+ const iarr = [1,2,3];
+ const darr = [2.1, 3.3, 0.2];
+
+ function slice(arr) {
+ return arr.slice();
+ }
+
+ slice(iarr); slice(darr);
+ slice(iarr); slice(darr);
+
+ %OptimizeFunctionOnNextCall(slice);
+ // The optimization works for both maps
+ assertEquals(iarr, slice(iarr));
+ assertEquals(darr, slice(darr));
+ assertOptimized(slice);
+})();
+
+// Tests for the branch of CanInlineArrayIteratingBuiltin
+
+// JSCallReducer will not reduce to CloneFastJSArray
+// if array's prototype is not JS_ARRAY_TYPE
+(function () {
+ class MyArray extends Array {
+ constructor() {
+ super();
+ this[6]= 6;
+ }
+ }
+ let array = new MyArray(3, 5, 4);
+
+ function slice() {
+ return array.slice();
+ }
+
+ assertEquals(slice(),array);
+ slice();
+
+ %OptimizeFunctionOnNextCall(slice);
+ let narr = slice();
+ // here, slice supposes to call MyArray's constructor.
+ // If we optimize with CloneFastJSArray, Array's constructor is called instead.
+ assertEquals(narr[6], 6);
+ assertTrue(narr instanceof MyArray);
+})();
+
+// JSCallReducer will not reduce to CloneFastJSArray
+// if array's instance type is not JS_ARRAY_TYPE.
+// CloneFastJSArray does not work with non JS_ARRAY_TYPE.
+// Check : receiver_map->instance_type() == JS_ARRAY_TYPE
+(function () {
+ var x = {"0" : 0, "2": 2} ;
+ x.__proto__ = Array.prototype;
+
+ function slice() {
+ return x.slice();
+ }
+
+ slice(); slice();
+
+ %OptimizeFunctionOnNextCall(slice);
+ assertEquals(slice(), []);
+})();
+
+// JSCallReducer will not reduce to CloneFastJSArray
+// since array is not Fast Elements Kind
+// Check : IsFastElementsKind(receiver_map->elements_kind())
+(function () {
+ var array = [3, 4, 5];
+
+ function slice() {
+ return array.slice();
+ }
+
+ assertEquals(slice(),array);
+ slice();
+
+ // a sparse array switches to Dictionary Elements
+ array[9999] = 0;
+ %OptimizeFunctionOnNextCall(slice);
+ var narr = slice();
+ assertEquals(narr, array);
+})();
+
+(function () {
+ var array = [3, 4, 5];
+
+ function slice() {
+ return array.slice();
+ }
+
+ assertEquals(slice(),array);
+ slice();
+
+ %OptimizeFunctionOnNextCall(slice);
+ slice();
+
+ // a sparse array switches to Dictionary Elements
+ array[9999] = 0;
+ // trigger deopt because map changes
+ assertEquals(slice(),array);
+})();
+
+// JSCallReducer will not reduce to CloneFastJSArray
+// if array is used as a prototype and has unstable map
+(function () {
+ var array = [3, 5, 4];
+
+ function slice(arr) {
+ return arr.slice();
+ }
+
+ // make array's map is_prototype_map()
+ var x = {__proto__ : array};
+
+ assertEquals(slice(array),array);
+ slice(array);
+
+ // make array's map unstable
+ array.push(6.3);
+ slice(array);
+
+ %OptimizeFunctionOnNextCall(slice);
+
+ assertEquals(slice(array),array);
+})();
+
+// JSCallReducer will not reduce to CloneFastJSArray
+// if the Array prototype got some elements.
+// Check: isolate->IsNoElementsProtectorIntact()
+(function () {
+ var array = [, 6, 6];
+
+ function slice() {
+ return array.slice();
+ }
+
+ assertEquals(slice(),array);
+ slice();
+
+ array.__proto__.push(6);
+
+ %OptimizeFunctionOnNextCall(slice);
+
+ // if we optimized, we would get [ , 6, 6]
+ // here, slice copies elements from both the object and the prototype
+ let narr = slice();
+ assertNotEquals(Object.getOwnPropertyDescriptor(narr,0), undefined);
+ assertEquals(narr, [6, 6, 6]);
+})();
+
+(function () {
+ var array = [, 6, 6];
+
+ function slice() {
+ return array.slice();
+ }
+
+ assertEquals(slice(),array);
+ slice();
+
+ %OptimizeFunctionOnNextCall(slice);
+ slice();
+
+ // Deopt
+ array.__proto__.push(6);
+ let narr = slice();
+ assertNotEquals(Object.getOwnPropertyDescriptor(narr, 0), undefined);
+ assertEquals(narr[0], 6);
+})();
+
+// JSCallReducer will not reduce to CloneFastJSArray
+// if the Array prototype is not original
+// Check: isolate->IsAnyInitialArrayPrototype(receiver_prototype)
+(function () {
+ var array = [6, , 6];
+
+ function slice() {
+ return array.slice();
+ }
+
+ assertEquals(slice(),array);
+ slice();
+
+ // change the prototype
+ array.__proto__ = [ , 6, ];
+
+ %OptimizeFunctionOnNextCall(slice);
+ let narr = slice();
+ // if optimized, we would get [6, , 6]
+ assertNotEquals(Object.getOwnPropertyDescriptor(narr, 1), undefined);
+ assertEquals(narr, [6,6,6]);
+})();
+
+(function () {
+ var array = [6, ,6];
+
+ function slice() {
+ return array.slice();
+ }
+
+ assertEquals(slice(),array);
+ slice();
+
+ %OptimizeFunctionOnNextCall(slice);
+ slice();
+
+ // change the prototype
+ array.__proto__ = [,6,];
+ // deopt because of map changed
+ let narr = slice();
+
+ // if optimized, we would get [6, , 6]
+ assertNotEquals(Object.getOwnPropertyDescriptor(narr, 1), undefined);
+ assertEquals(narr, [6,6,6]);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/dataview-deopt.js b/deps/v8/test/mjsunit/compiler/dataview-deopt.js
new file mode 100644
index 0000000000..b19de30e5d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/dataview-deopt.js
@@ -0,0 +1,58 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt
+
+// Check that there are no deopt loops for DataView methods.
+
+var buffer = new ArrayBuffer(64);
+var dataview = new DataView(buffer, 8, 24);
+
+// Check DataView getters.
+
+function readUint8(offset) {
+ return dataview.getUint8(offset);
+}
+
+function warmupRead(f) {
+ f(0);
+ f(1);
+ %OptimizeFunctionOnNextCall(f);
+ f(2);
+ f(3);
+}
+
+warmupRead(readUint8);
+assertOptimized(readUint8);
+readUint8(0.5); // Deopts.
+assertUnoptimized(readUint8);
+
+warmupRead(readUint8);
+assertOptimized(readUint8);
+readUint8(1.5); // Doesn't deopt because getUint8 didn't get inlined this time.
+assertOptimized(readUint8);
+
+// Check DataView setters.
+
+function writeUint8(offset, value) {
+ dataview.setUint8(offset, value);
+}
+
+function warmupWrite(f) {
+ f(0, 0);
+ f(0, 1);
+ %OptimizeFunctionOnNextCall(f);
+ f(0, 2);
+ f(0, 3);
+}
+
+warmupWrite(writeUint8);
+assertOptimized(writeUint8);
+writeUint8(0.5, 0); // Deopts.
+assertUnoptimized(writeUint8);
+
+warmupWrite(writeUint8);
+assertOptimized(writeUint8);
+writeUint8(1.5, 0); // Doesn't deopt.
+assertOptimized(writeUint8);
diff --git a/deps/v8/test/mjsunit/compiler/dataview-get.js b/deps/v8/test/mjsunit/compiler/dataview-get.js
index 6c64410344..78c6bdf4ac 100644
--- a/deps/v8/test/mjsunit/compiler/dataview-get.js
+++ b/deps/v8/test/mjsunit/compiler/dataview-get.js
@@ -131,43 +131,56 @@ assertEquals(b4, readFloat64(16));
dataview.setFloat64(16, b4, true);
assertEquals(b4, readFloat64(16, true));
-
-// TurboFan out of bounds read, throw with exception handler.
+// TurboFan out of bounds reads deopt.
assertOptimized(readInt8Handled);
assertInstanceof(readInt8Handled(24), RangeError);
-assertOptimized(readInt8Handled);
+assertUnoptimized(readInt8Handled);
assertOptimized(readInt16Handled);
assertInstanceof(readInt16Handled(23), RangeError);
-assertOptimized(readInt16Handled);
+assertUnoptimized(readInt16Handled);
assertOptimized(readInt32Handled);
assertInstanceof(readInt32Handled(21), RangeError);
-assertOptimized(readInt32Handled);
+assertUnoptimized(readInt32Handled);
// Without exception handler.
assertOptimized(readUint8);
assertThrows(() => readUint8(24));
-assertOptimized(readUint8);
+assertUnoptimized(readUint8);
assertOptimized(readFloat32);
assertThrows(() => readFloat32(21));
-assertOptimized(readFloat32);
+assertUnoptimized(readFloat32);
assertOptimized(readFloat64);
assertThrows(() => readFloat64(17));
-assertOptimized(readFloat64);
-
+assertUnoptimized(readFloat64);
-// TurboFan deoptimizations.
-assertOptimized(readInt8Handled);
-assertInstanceof(readInt8Handled(-1), RangeError); // Negative Smi deopts.
-assertUnoptimized(readInt8Handled);
-
-warmup(readInt8Handled);
-assertOptimized(readInt8Handled);
-assertEquals(values[3], readInt8Handled(3.14)); // Non-Smi index deopts.
-assertUnoptimized(readInt8Handled);
-
-// TurboFan neutered buffer.
-warmup(readInt8Handled);
-assertOptimized(readInt8Handled);
-%ArrayBufferNeuter(buffer);
-assertInstanceof(readInt8Handled(0), TypeError);
-assertOptimized(readInt8Handled);
+// Negative Smi deopts.
+(function() {
+ function readInt8Handled(offset) {
+ try { return dataview.getInt8(offset); } catch (e) { return e; }
+ }
+ warmup(readInt8Handled);
+ assertOptimized(readInt8Handled);
+ assertInstanceof(readInt8Handled(-1), RangeError);
+ assertUnoptimized(readInt8Handled);
+})();
+
+// Non-Smi index deopts.
+(function() {
+ function readUint8(offset) { return dataview.getUint8(offset); }
+ warmup(readUint8);
+ assertOptimized(readUint8);
+ assertEquals(values[3], readUint8(3.14));
+ assertUnoptimized(readUint8);
+})();
+
+// TurboFan neutered buffer deopts.
+(function() {
+ function readInt8Handled(offset) {
+ try { return dataview.getInt8(offset); } catch (e) { return e; }
+ }
+ warmup(readInt8Handled);
+ assertOptimized(readInt8Handled);
+ %ArrayBufferNeuter(buffer);
+ assertInstanceof(readInt8Handled(0), TypeError);
+ assertUnoptimized(readInt8Handled);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/dataview-set.js b/deps/v8/test/mjsunit/compiler/dataview-set.js
index 07bcb8d93c..4759597881 100644
--- a/deps/v8/test/mjsunit/compiler/dataview-set.js
+++ b/deps/v8/test/mjsunit/compiler/dataview-set.js
@@ -117,15 +117,15 @@ assertEquals(b4, dataview.getFloat64(8));
writeFloat64(8, b4, true);
assertEquals(b4, dataview.getFloat64(8, true));
-// TurboFan out of bounds read, throw with exception handler.
+// TurboFan out of bounds read, deopt.
assertOptimized(writeInt8Handled);
assertInstanceof(writeInt8Handled(24, 0), RangeError);
-assertOptimized(writeInt8Handled);
+assertUnoptimized(writeInt8Handled);
-// Without exception handler.
+// Without exception handler, deopt too.
assertOptimized(writeUint8);
assertThrows(() => writeUint8(24, 0));
-assertOptimized(writeUint8);
+assertUnoptimized(writeUint8);
// None of the stores wrote out of bounds.
var bytes = new Uint8Array(buffer);
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-iterator-prototype-next.js b/deps/v8/test/mjsunit/compiler/deopt-array-iterator-prototype-next.js
new file mode 100644
index 0000000000..b75f3185ac
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-iterator-prototype-next.js
@@ -0,0 +1,24 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+/* Test deopt behaviors when the prototype has elements */
+
+(function () {
+ var array = [,];
+
+ function next() {
+ return array[Symbol.iterator]().next();
+ }
+
+ assertEquals(next().value, undefined);
+ assertEquals(next().value, undefined);
+
+ %OptimizeFunctionOnNextCall(next);
+ assertEquals(next().value, undefined);
+
+ array.__proto__.push(5);
+ assertEquals(next().value, 5);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-every.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-every.js
new file mode 100644
index 0000000000..0ebf126fa6
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-every.js
@@ -0,0 +1,26 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+/* Test deopt behaviors when the prototype has elements */
+
+// every
+
+(function () {
+ var array = [,];
+
+ function every() {
+ return array.every(v => v > 0);
+ }
+
+ every(); every();
+
+ %OptimizeFunctionOnNextCall(every);
+ assertEquals(every(), true);
+
+ array.__proto__.push(-6);
+ //deopt
+ assertEquals(every(), false);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-filter.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-filter.js
new file mode 100644
index 0000000000..fcae939eb8
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-filter.js
@@ -0,0 +1,28 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+/* Test deopt behaviors when the prototype has elements */
+
+// filter
+
+(function () {
+ var array = [,];
+
+ function filter() {
+ return array.filter(v => v > 0);
+ }
+
+ filter(); filter();
+
+ %OptimizeFunctionOnNextCall(filter);
+ assertEquals(filter(), []);
+
+ array.__proto__.push(6);
+ // deopt
+ var narr = filter();
+ assertNotEquals(Object.getOwnPropertyDescriptor(narr, 0), undefined);
+ assertEquals(narr, [6]);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-find.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-find.js
new file mode 100644
index 0000000000..e8b5b9bd1b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-find.js
@@ -0,0 +1,26 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+/* Test deopt behaviors when the prototype has elements */
+
+// find
+
+(function () {
+ var array = [,];
+
+ function find() {
+ return array.find(v => v > 0);
+ }
+
+ find(); find();
+
+ %OptimizeFunctionOnNextCall(find);
+ assertEquals(find(), undefined);
+
+ array.__proto__.push(6);
+ // deopt
+ assertEquals(find(), 6);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-findindex.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-findindex.js
new file mode 100644
index 0000000000..37866a4506
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-findindex.js
@@ -0,0 +1,26 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+/* Test deopt behaviors when the prototype has elements */
+
+// findIndex
+
+(function () {
+ var array = [,];
+
+ function findIndex() {
+ return array.findIndex(v => v > 0);
+ }
+
+ findIndex(); findIndex();
+
+ %OptimizeFunctionOnNextCall(findIndex);
+ assertEquals(findIndex(), -1);
+
+ array.__proto__.push(6);
+ // deopt
+ assertEquals(findIndex(), 0);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-foreach.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-foreach.js
new file mode 100644
index 0000000000..6db9078e1b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-foreach.js
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+/* Test deopt behaviors when the prototype has elements */
+
+// forEach
+
+(function () {
+ var array = [,];
+
+ function increment (v, k, arr) { arr[k] = v + 1; }
+ function forEach() {
+ array.forEach(increment);
+ }
+ forEach(); forEach();
+
+ %OptimizeFunctionOnNextCall(forEach);
+
+ forEach();
+ assertEquals(array, [,]);
+
+ array.__proto__.push(5);
+ assertEquals(Object.getOwnPropertyDescriptor(array, 0), undefined);
+ // deopt
+ forEach();
+ assertNotEquals(Object.getOwnPropertyDescriptor(array, 0), undefined);
+ assertEquals(array[0], 6); // this reads from the prototype
+})();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-includes.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-includes.js
new file mode 100644
index 0000000000..77aedfe1e6
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-includes.js
@@ -0,0 +1,103 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+/* Test deopt behaviors when the prototype has elements */
+
+// includes
+
+(function() {
+ const iarr = [0,1,2];
+ const darr = [0.0, 2.0, 3.3];
+
+ function includes(arr, val) {
+ return arr.includes(val);
+ }
+
+ assertTrue(includes(iarr, 0)); assertTrue(includes(darr, 0));
+ assertTrue(includes(iarr, 2)); assertTrue(includes(darr, 2));
+
+ // JSCallReducer for includes not reduce because it only works with single map
+ %OptimizeFunctionOnNextCall(includes);
+
+ assertTrue(includes(iarr, 0));
+ assertTrue(includes(darr, 0));
+})();
+
+(function() {
+ const iarr = [0,1,2];
+
+ function includes(arr, val) {
+ return arr.includes(val);
+ }
+
+ assertTrue(includes(iarr, 0));
+ assertTrue(includes(iarr, 2));
+
+ %OptimizeFunctionOnNextCall(includes);
+
+ assertTrue(includes(iarr, 0));
+
+ const darr = [0.0, 2.0, 3.3];
+ // deopt because of map change
+ assertTrue(includes(darr, 0));
+})();
+
+(function() {
+ const iarr = [,3];
+
+ function includes(arr, val) {
+ return arr.includes(val);
+ }
+
+ iarr.__proto__ = [2];
+
+ // get feedback
+ assertFalse(includes(iarr, 0));
+ assertTrue(includes(iarr, 2));
+
+ %OptimizeFunctionOnNextCall(includes);
+
+ assertFalse(includes(iarr, 0));
+
+ assertTrue(includes(iarr, 2));
+})();
+
+(function() {
+ const iarr = [,3];
+
+ function includes(arr, val) {
+ return arr.includes(val);
+ }
+
+ assertFalse(includes(iarr, 2));
+ assertTrue(includes(iarr, 3));
+
+ %OptimizeFunctionOnNextCall(includes);
+ assertFalse(includes(iarr, 2));
+
+ // deopt because of map change
+ iarr.__proto__ = [2];
+ assertTrue(includes(iarr, 2));
+})();
+
+// This pollutes the Array prototype, so we should not run more tests
+// in the same environment after this.
+(function () {
+ var array = [,];
+
+ function includes(val) {
+ return array.includes(val);
+ }
+
+ includes(6); includes(6);
+
+ %OptimizeFunctionOnNextCall(includes);
+ assertFalse(includes(6));
+
+ array.__proto__.push(6);
+ // deopt because of no_elements_protector
+ assertTrue(includes(6));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-indexof.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-indexof.js
new file mode 100644
index 0000000000..78cf60507c
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-indexof.js
@@ -0,0 +1,104 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+/* Test deopt behaviors when the prototype has elements */
+
+// indexOf
+
+(function() {
+ const iarr = [0,1,2];
+ const darr = [0.0, 2.0, 3.3];
+
+ function indexOf(arr, val) {
+ return arr.indexOf(val);
+ }
+
+ assertEquals(0, indexOf(iarr, 0));
+ assertEquals(0, indexOf(darr, 0));
+ assertEquals(2, indexOf(iarr, 2));
+ assertEquals(1, indexOf(darr, 2));
+
+ // JSCallReducer for indexOf will not reduce
+ // because it only works with single map
+ %OptimizeFunctionOnNextCall(indexOf);
+
+ assertEquals(0, indexOf(iarr, 0));
+ assertEquals(0, indexOf(darr, 0));
+})();
+
+(function() {
+ const iarr = [0,1,2];
+
+ function indexOf(arr, val) {
+ return arr.indexOf(val);
+ }
+
+ assertEquals(0, indexOf(iarr, 0));
+ assertEquals(2, indexOf(iarr, 2));
+
+ %OptimizeFunctionOnNextCall(indexOf);
+
+ assertEquals(0, indexOf(iarr, 0));
+
+ const darr = [0.0, 2.0, 3.3];
+ // deopt because of map change
+ assertEquals(0, indexOf(darr, 0));
+})();
+
+(function() {
+ const iarr = [,3];
+
+ function indexOf(arr, val) {
+ return arr.indexOf(val);
+ }
+
+ iarr.__proto__ = [2];
+ assertEquals(-1, indexOf(iarr, 0));
+ assertEquals(0, indexOf(iarr, 2));
+
+ %OptimizeFunctionOnNextCall(indexOf);
+
+ assertEquals(-1, indexOf(iarr, 0));
+
+ assertEquals(0, indexOf(iarr, 2));
+})();
+
+(function() {
+ const iarr = [,3];
+
+ function indexOf(arr, val) {
+ return arr.indexOf(val);
+ }
+
+ assertEquals(-1, indexOf(iarr, 2));
+ assertEquals(1, indexOf(iarr, 3));
+
+ %OptimizeFunctionOnNextCall(indexOf);
+ assertEquals(-1, indexOf(iarr, 2));
+
+ // deopt because of map change
+ iarr.__proto__ = [2];
+ assertEquals(0, indexOf(iarr, 2));
+})();
+
+// This pollutes the Array prototype, so we should not run more tests
+// in the same environment after this.
+(function () {
+ var array = [,];
+
+ function indexOf(val) {
+ return array.indexOf(val);
+ }
+
+ indexOf(6); indexOf(6);
+
+ %OptimizeFunctionOnNextCall(indexOf);
+ assertEquals(indexOf(6), -1);
+
+ array.__proto__.push(6);
+ // deopt because of no_elements_protector
+ assertEquals(indexOf(6), 0);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-lastindexof.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-lastindexof.js
new file mode 100644
index 0000000000..d47926cd3d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-lastindexof.js
@@ -0,0 +1,26 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+/* Test deopt behaviors when the prototype has elements */
+
+// lastIndexOf
+
+(function () {
+ var array = [,];
+
+ function lastIndexOf(val) {
+ return array.lastIndexOf(val);
+ }
+
+ lastIndexOf(6); lastIndexOf(6);
+
+ %OptimizeFunctionOnNextCall(lastIndexOf);
+ assertEquals(lastIndexOf(6), -1);
+
+ array.__proto__.push(6);
+ // deopt
+ assertEquals(lastIndexOf(6), 0);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-map.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-map.js
new file mode 100644
index 0000000000..29c7d32174
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-map.js
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+/* Test deopt behaviors when the prototype has elements */
+
+// map
+
+(function () {
+ var array = [,];
+
+ function map() {
+ return array.map(x => x + 1);
+ }
+
+ map(); map();
+
+ %OptimizeFunctionOnNextCall(map);
+
+ assertEquals(map(), [,]);
+
+ array.__proto__.push(5);
+ // deopt
+ var narr = map();
+ assertNotEquals(Object.getOwnPropertyDescriptor(narr, 0), undefined);
+ assertEquals(narr[0], 6);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-pop.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-pop.js
new file mode 100644
index 0000000000..6d0fe068e4
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-pop.js
@@ -0,0 +1,26 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+/* Test behaviors when the prototype has elements */
+
+// pop
+
+(function () {
+ var array = [, , , ,];
+
+ function pop() {
+ return array.pop();
+ }
+
+ assertEquals(pop(), undefined);
+ assertEquals(pop(), undefined);
+
+ %OptimizeFunctionOnNextCall(pop);
+ assertEquals(pop(), undefined);
+
+ array.__proto__.push(6);
+ assertEquals(pop(), 6);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-reduce.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-reduce.js
new file mode 100644
index 0000000000..d2ba0db0f8
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-reduce.js
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+/* Test deopt behaviors when the prototype has elements */
+
+// reduce
+
+(function () {
+ var array = [, 3];
+
+ function accumulate (prev, cur, curIdx, arr) { arr[curIdx] = cur + prev; }
+ function reduce() {
+ array.reduce(accumulate);
+ }
+
+ reduce(); reduce();
+
+ %OptimizeFunctionOnNextCall(reduce);
+
+ reduce();
+ assertEquals(array, [,3]);
+
+ array.__proto__.push(3);
+ // deopt
+ reduce();
+ assertEquals(array, [, 6]);
+ assertEquals(Object.getOwnPropertyDescriptor(array, 0), undefined);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-slice.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-slice.js
new file mode 100644
index 0000000000..6b2d5fa22d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-slice.js
@@ -0,0 +1,29 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+/* Test deopt behaviors when the prototype has elements */
+
+// slice
+
+(function () {
+ var array = [,];
+
+ function slice() {
+ return array.slice();
+ }
+
+ slice(); slice();
+
+ %OptimizeFunctionOnNextCall(slice);
+
+ assertEquals(slice(), [,]);
+
+ array.__proto__.push(5);
+ // deopt
+ var narr = slice();
+ assertNotEquals(Object.getOwnPropertyDescriptor(narr, 0), undefined);
+ assertEquals(narr[0], 5);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-array-prototype-some.js b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-some.js
new file mode 100644
index 0000000000..d9604d0c76
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-array-prototype-some.js
@@ -0,0 +1,26 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+/* Test deopt behaviors when the prototype has elements */
+
+// some
+
+(function () {
+ var array = [,];
+
+ function some() {
+ return array.some(v => v > 0);
+ }
+
+ some(); some();
+
+ %OptimizeFunctionOnNextCall(some);
+ assertEquals(some(), false);
+
+ array.__proto__.push(6);
+ //deopt
+ assertEquals(some(), true);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/number-constructor-deopt.js b/deps/v8/test/mjsunit/compiler/number-constructor-deopt.js
new file mode 100644
index 0000000000..edffc9ec53
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/number-constructor-deopt.js
@@ -0,0 +1,32 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// This test writes {} to x to trigger lazy deopt
+// from inside the number constructor.
+var x = "5";
+var b = false;
+
+check = function() {
+ if (b) x = {};
+ return 0;
+}
+
+var obj = {};
+obj.valueOf = check;
+
+function f() {
+ try {
+ return x + Number(obj);
+ } catch(e) {
+ console.log(e.stack);
+ }
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+b = true;
+f();
diff --git a/deps/v8/test/mjsunit/empirical_max_arraybuffer.js b/deps/v8/test/mjsunit/empirical_max_arraybuffer.js
new file mode 100644
index 0000000000..cacefd4692
--- /dev/null
+++ b/deps/v8/test/mjsunit/empirical_max_arraybuffer.js
@@ -0,0 +1,101 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let kMax31BitSmi = (1 << 30) - 1;
+let k1MiB = 1 * 1024 * 1024;
+let k1GiB = 1 * 1024 * 1024 * 1024;
+let k4GiB = 4 * k1GiB;
+let kPageSize = 65536;
+let kMaxArrayBufferSize = 2 * k1GiB - kPageSize; // TODO(titzer): raise this to 4GiB
+let kStrideLength = 65536;
+
+(function Test() {
+ var buffer;
+ try {
+ buffer = new ArrayBuffer(kMaxArrayBufferSize);
+ } catch (e) {
+ print("OOM: sorry, best effort max array buffer size test!");
+ return;
+ }
+
+ print("Allocated " + buffer.byteLength + " bytes");
+ assertEquals(kMaxArrayBufferSize, buffer.byteLength);
+
+ function probe(view, stride, f) {
+ print("--------------------");
+ let max = view.length;
+ for (let i = 0; i < max; i += stride) {
+ view[i] = f(i);
+ }
+ for (let i = 0; i < max; i += stride) {
+ // print(`${i} = ${f(i)}`);
+ assertEquals(f(i), view[i]);
+ }
+ }
+
+ {
+ // Make an uint32 view and probe it.
+ let elemSize = 4;
+ let viewSize = kMaxArrayBufferSize / elemSize;
+ // TODO(titzer): view sizes are limited to 31 bit SMIs. fix.
+ if (viewSize <= kMax31BitSmi) {
+ let uint32 = new Uint32Array(buffer);
+ assertEquals(kMaxArrayBufferSize / elemSize, uint32.length);
+ probe(uint32, kStrideLength / elemSize,
+ i => (0xaabbccee ^ ((i >> 11) * 0x110005)) >>> 0);
+ }
+ }
+
+ {
+ // Make an uint16 view and probe it.
+ let elemSize = 2;
+ let viewSize = kMaxArrayBufferSize / elemSize;
+ // TODO(titzer): view sizes are limited to 31 bit SMIs. fix.
+ if (viewSize <= kMax31BitSmi) {
+ let uint16 = new Uint16Array(buffer);
+ assertEquals(kMaxArrayBufferSize / elemSize, uint16.length);
+ probe(uint16, kStrideLength / elemSize,
+ i => (0xccee ^ ((i >> 11) * 0x110005)) & 0xFFFF);
+ }
+ }
+
+ {
+ // Make an uint8 view and probe it.
+ let elemSize = 1;
+ let viewSize = kMaxArrayBufferSize / elemSize;
+ // TODO(titzer): view sizes are limited to 31 bit SMIs. fix.
+ if (viewSize <= kMax31BitSmi) {
+ let uint8 = new Uint8Array(buffer);
+ assertEquals(kMaxArrayBufferSize / elemSize, uint8.length);
+ probe(uint8, kStrideLength / elemSize,
+ i => (0xee ^ ((i >> 11) * 0x05)) & 0xFF);
+ }
+ }
+
+ {
+ // Make a float64 view and probe it.
+ let elemSize = 8;
+ let viewSize = kMaxArrayBufferSize / elemSize;
+ // TODO(titzer): view sizes are limited to 31 bit SMIs. fix.
+ if (viewSize <= kMax31BitSmi) {
+ let float64 = new Float64Array(buffer);
+ assertEquals(kMaxArrayBufferSize / elemSize, float64.length);
+ probe(float64, kStrideLength / elemSize,
+ i => 0xaabbccee ^ ((i >> 11) * 0x110005));
+ }
+ }
+
+ {
+ // Make a float32 view and probe it.
+ let elemSize = 4;
+ let viewSize = kMaxArrayBufferSize / elemSize;
+ // TODO(titzer): view sizes are limited to 31 bit SMIs. fix.
+ if (viewSize <= kMax31BitSmi) {
+ let float32 = new Float32Array(buffer);
+ assertEquals(kMaxArrayBufferSize / elemSize, float32.length);
+ probe(float32, kStrideLength / elemSize,
+ i => Math.fround(0xaabbccee ^ ((i >> 11) * 0x110005)));
+ }
+ }
+})();
diff --git a/deps/v8/test/mjsunit/es6/array-fill-receiver.js b/deps/v8/test/mjsunit/es6/array-fill-receiver.js
new file mode 100644
index 0000000000..21d7a2ab03
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/array-fill-receiver.js
@@ -0,0 +1,118 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Ensure `Array.prototype.fill` functions correctly for numerous elements
+// kinds.
+
+// If no arguments are provided, call Array.p.fill without any arguments,
+// otherwise the test is allowed to specify what value to use to better control
+// ElementsKind transitions. From and to is provided by the harness.
+function callAndAssertFill(object, test_value, harness_value, from, to) {
+ let value = arguments.length > 2 ? test_value : harness_value;
+
+ Array.prototype.fill.call(object, value, from, to);
+
+ %HeapObjectVerify(object);
+ assertArrayHasValueInRange(object, value, from, to);
+}
+
+function assertArrayHasValueInRange(obj, value, from, to) {
+ for (let i = from; i < to; ++i) {
+ assertEquals(value, obj[i]);
+ }
+}
+
+// Tests are executed multiple times. Creating arrays using literal notation
+// will create COW-Arrays, which will propagate the most general ElementsKind
+// back to their allocation site.
+// pristineArray will always return a 🐄-Array with the ElementsKind we actually
+// want.
+let i = 0;
+function pristineArray(str) {
+ return eval(str + "//" + (i++));
+}
+
+let tests = {
+ ARRAY_PACKED_ELEMENTS(value, from, to) {
+ let array = pristineArray(
+ `["Some string", {}, /foobar/, "Another string", {}]`);
+ assertTrue(%HasObjectElements(array));
+ assertFalse(%HasHoleyElements(array));
+
+ callAndAssertFill(array, "42", ...arguments);
+ },
+
+ ARRAY_HOLEY_ELEMENTS(value, from, to) {
+ let array = pristineArray(`["Some string", , {}, , "Another string"]`);
+ assertTrue(%HasObjectElements(array));
+ assertTrue(%HasHoleyElements(array));
+
+ callAndAssertFill(array, "42", ...arguments);
+ },
+
+ ARRAY_PACKED_SMI_ELEMENTS(value, from, to) {
+ let array = pristineArray(`[0, -42, 5555, 23, 6]`);
+ assertTrue(%HasSmiElements(array));
+ assertFalse(%HasHoleyElements(array));
+
+ callAndAssertFill(array, 42, ...arguments);
+ },
+
+ ARRAY_HOLEY_SMI_ELEMENTS(value, from, to) {
+ let array = pristineArray(`[0, , 5555, , 6]`);
+ assertTrue(%HasSmiElements(array));
+ assertTrue(%HasHoleyElements(array));
+
+ callAndAssertFill(array, 42, ...arguments);
+ },
+
+ ARRAY_PACKED_DOUBLE_ELEMENTS(value, from, to) {
+ let array = pristineArray(`[3.14, 7.00001, NaN, -25.3333, 1.0]`);
+ assertTrue(%HasDoubleElements(array));
+ assertFalse(%HasHoleyElements(array));
+
+ callAndAssertFill(array, 42.42, ...arguments);
+ },
+
+ ARRAY_HOLEY_DOUBLE_ELEMENTS(value, from, to) {
+ let array = pristineArray(`[3.14, , , , 1.0]`);
+ assertTrue(%HasDoubleElements(array));
+ assertTrue(%HasHoleyElements(array));
+
+ callAndAssertFill(array, 42.42, ...arguments);
+ },
+
+ ARRAY_DICTIONARY_ELEMENTS(value, from, to) {
+ let array = pristineArray(`[0, , 2, 3, 4]`);
+ Object.defineProperty(array, 1, { get() { return this.foo; },
+ set(val) { this.foo = val; }});
+ assertTrue(%HasDictionaryElements(array));
+
+ callAndAssertFill(array, "42", ...arguments);
+ }
+
+ // TODO(szuend): Add additional tests receivers other than arrays
+ // (Objects, TypedArrays, etc.).
+};
+
+function RunTest(test) {
+ test();
+ test(undefined);
+ test(undefined, 1);
+ test(undefined, 1, 4);
+}
+
+function RunTests(tests) {
+ Object.keys(tests).forEach(test => RunTest(tests[test]));
+}
+
+RunTests(tests);
+
+Array.prototype.__proto__ = {
+ __proto__: Array.prototype.__proto__
+};
+
+RunTests(tests);
diff --git a/deps/v8/test/mjsunit/es6/array-fill.js b/deps/v8/test/mjsunit/es6/array-fill.js
index ef316e8146..8ca41c7248 100644
--- a/deps/v8/test/mjsunit/es6/array-fill.js
+++ b/deps/v8/test/mjsunit/es6/array-fill.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
assertEquals(1, Array.prototype.fill.length);
assertArrayEquals([].fill(8), []);
@@ -28,3 +30,98 @@ assertArrayEquals(Object.freeze([1, 2, 3]).fill(0, 0, 0), [1, 2, 3]);
assertThrows('Object.freeze([0]).fill()', TypeError);
assertThrows('Array.prototype.fill.call(null)', TypeError);
assertThrows('Array.prototype.fill.call(undefined)', TypeError);
+
+function TestFillObjectWithAccessors() {
+ const kLength = 5;
+
+ let log = [];
+
+ let object = {
+ length: kLength,
+ get 1() {
+ log.push("get 1");
+ return this.foo;
+ },
+
+ set 1(val) {
+ log.push("set 1 " + val);
+ this.foo = val;
+ }
+ };
+
+ Array.prototype.fill.call(object, 42);
+
+ %HeapObjectVerify(object);
+ assertEquals(kLength, object.length);
+ assertArrayEquals(["set 1 42"], log);
+
+ for (let i = 0; i < kLength; ++i) {
+ assertEquals(42, object[i]);
+ }
+}
+TestFillObjectWithAccessors();
+
+function TestFillObjectWithMaxNumberLength() {
+ const kMaxSafeInt = 2 ** 53 - 1;
+ let object = {};
+ object.length = kMaxSafeInt;
+
+ Array.prototype.fill.call(object, 42, 2 ** 53 - 4);
+
+ %HeapObjectVerify(object);
+ assertEquals(kMaxSafeInt, object.length);
+ assertEquals(42, object[kMaxSafeInt - 3]);
+ assertEquals(42, object[kMaxSafeInt - 2]);
+ assertEquals(42, object[kMaxSafeInt - 1]);
+}
+TestFillObjectWithMaxNumberLength();
+
+function TestFillObjectWithPrototypeAccessors() {
+ const kLength = 5;
+ let log = [];
+ let proto = {
+ get 1() {
+ log.push("get 0");
+ return this.foo;
+ },
+
+ set 1(val) {
+ log.push("set 1 " + val);
+ this.foo = val;
+ }
+ };
+
+ let object = { __proto__: proto, 0:0, 2:2, length: kLength};
+
+ Array.prototype.fill.call(object, "42");
+
+ %HeapObjectVerify(object);
+ assertEquals(kLength, object.length);
+ assertArrayEquals(["set 1 42"], log);
+ assertTrue(object.hasOwnProperty(0));
+ assertFalse(object.hasOwnProperty(1));
+ assertTrue(object.hasOwnProperty(2));
+ assertTrue(object.hasOwnProperty(3));
+ assertTrue(object.hasOwnProperty(4));
+
+ for (let i = 0; i < kLength; ++i) {
+ assertEquals("42", object[i]);
+ }
+}
+TestFillObjectWithPrototypeAccessors();
+
+function TestFillSealedObject() {
+ let object = { length: 42 };
+ Object.seal(object);
+
+ assertThrows(() => Array.prototype.fill.call(object), TypeError);
+}
+TestFillSealedObject();
+
+function TestFillFrozenObject() {
+ let object = { length: 42 };
+ Object.freeze(object);
+
+ assertThrows(() => Array.prototype.fill.call(object), TypeError);
+}
+TestFillFrozenObject();
diff --git a/deps/v8/test/mjsunit/es6/array-iterator.js b/deps/v8/test/mjsunit/es6/array-iterator.js
index b143c8c034..62485dfc2c 100644
--- a/deps/v8/test/mjsunit/es6/array-iterator.js
+++ b/deps/v8/test/mjsunit/es6/array-iterator.js
@@ -252,3 +252,16 @@ function TestNonOwnSlots() {
}, TypeError);
}
TestNonOwnSlots();
+
+function TestForDictionaryArray() {
+ var array = [];
+ array[1024] = 'c';
+ assertTrue(%HasDictionaryElements(array));
+ var iterator = array[Symbol.iterator]();
+ for (var i = 0; i < 1024; ++i) {
+ assertIteratorResult(void 0, false, iterator.next());
+ }
+ assertIteratorResult('c', false, iterator.next());
+ assertIteratorResult(void 0, true, iterator.next());
+}
+TestForDictionaryArray();
diff --git a/deps/v8/test/mjsunit/es6/math-log2-log10.js b/deps/v8/test/mjsunit/es6/math-log2-log10.js
index ea17a79daf..eeacee927b 100644
--- a/deps/v8/test/mjsunit/es6/math-log2-log10.js
+++ b/deps/v8/test/mjsunit/es6/math-log2-log10.js
@@ -44,7 +44,9 @@
for (var i = -310; i <= 308; i += 0.5) {
assertEquals(i, Math.log10(Math.pow(10, i)));
// Square roots are tested below.
- if (i != -0.5 && i != 0.5) assertEquals(i, Math.log2(Math.pow(2, i)));
+ if (i != -0.5 && i != 0.5 ) {
+ assertEqualsDelta(i, Math.log2(Math.pow(2, i)), Number.EPSILON);
+ }
}
// Test denormals.
diff --git a/deps/v8/test/mjsunit/es6/promise-all-overflow-1.js b/deps/v8/test/mjsunit/es6/promise-all-overflow-1.js
index e86edbbc27..1a1cb4b61b 100644
--- a/deps/v8/test/mjsunit/es6/promise-all-overflow-1.js
+++ b/deps/v8/test/mjsunit/es6/promise-all-overflow-1.js
@@ -4,6 +4,8 @@
// Flags: --allow-natives-syntax
+load('test/mjsunit/test-async.js');
+
// Make sure we properly throw a RangeError when overflowing the maximum
// number of elements for Promise.all, which is capped at 2^21 bits right
// now, since we store the indices as identity hash on the resolve element
diff --git a/deps/v8/test/mjsunit/es6/promise-all-overflow-2.js b/deps/v8/test/mjsunit/es6/promise-all-overflow-2.js
index ece2c5b9b9..61d0bd9ce5 100644
--- a/deps/v8/test/mjsunit/es6/promise-all-overflow-2.js
+++ b/deps/v8/test/mjsunit/es6/promise-all-overflow-2.js
@@ -4,6 +4,8 @@
// Flags: --allow-natives-syntax
+load('test/mjsunit/test-async.js');
+
// Test that pre-allocation of the result array works even if it needs to be
// allocated in large object space.
const a = new Array(64 * 1024);
diff --git a/deps/v8/test/mjsunit/es6/promise-all.js b/deps/v8/test/mjsunit/es6/promise-all.js
index c60d3069a6..3a0980d425 100644
--- a/deps/v8/test/mjsunit/es6/promise-all.js
+++ b/deps/v8/test/mjsunit/es6/promise-all.js
@@ -4,6 +4,8 @@
// Flags: --allow-natives-syntax
+load('test/mjsunit/test-async.js');
+
// We store the index in the hash code field of the Promise.all resolve
// element closures, so make sure we properly handle the cases where this
// magical field turns into a PropertyArray later.
diff --git a/deps/v8/test/mjsunit/es6/proxies.js b/deps/v8/test/mjsunit/es6/proxies.js
index f67f9df41e..fc59b346b7 100644
--- a/deps/v8/test/mjsunit/es6/proxies.js
+++ b/deps/v8/test/mjsunit/es6/proxies.js
@@ -55,7 +55,7 @@ function TestWithFunctionProxy(test, x, y, z) {
(function TestProxyProperties() {
assertEquals(2, Proxy.length);
assertEquals(Function.__proto__, Proxy.__proto__);
- assertEquals(null, Proxy.prototype);
+ assertEquals(undefined, Proxy.prototype);
assertEquals(undefined, Object.getOwnPropertyDescriptor(Proxy, "arguments"));
assertThrows(() => Proxy.arguments, TypeError);
assertEquals(undefined, Object.getOwnPropertyDescriptor(Proxy, "caller"));
diff --git a/deps/v8/test/mjsunit/es9/object-spread-ic.js b/deps/v8/test/mjsunit/es9/object-spread-ic.js
new file mode 100644
index 0000000000..d76ffd4eb8
--- /dev/null
+++ b/deps/v8/test/mjsunit/es9/object-spread-ic.js
@@ -0,0 +1,101 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function testDoubleElements() {
+ function f(src) { return {...src}; }
+ var src = [1.5];
+ src[0] = 1;
+
+ // Uninitialized
+ assertEquals({ 0: 1 }, f(src));
+
+ src[0] = 1.3;
+
+ // Monomorphic
+ assertEquals({ 0: 1.3 }, f(src));
+})();
+
+(function testInObjectProperties() {
+ function f(src) { return {...src}; }
+ function C() { this.foo = "foo"; }
+ var src;
+ for (var i = 0; i < 10; ++i) {
+ src = new C();
+ }
+
+ // Uninitialized
+ assertEquals({ foo: "foo" }, f(src));
+
+ // Monomorphic
+ assertEquals({ foo: "foo" }, f(src));
+})();
+
+(function testInObjectProperties2() {
+ function f(src) { return {...src}; }
+ function C() {
+ this.foo = "foo";
+ this.p0 = "0";
+ this.p1 = "1";
+ this.p2 = "2";
+ this.p3 = "3";
+ }
+ var src;
+ for (var i = 0; i < 10; ++i) {
+ src = new C();
+ }
+
+ // Uninitialized
+ assertEquals({ foo: "foo", p0: "0", p1: "1", p2: "2", p3: "3" }, f(src));
+
+ // Monomorphic
+ assertEquals({ foo: "foo", p0: "0", p1: "1", p2: "2", p3: "3" }, f(src));
+})();
+
+(function testPolymorphicToMegamorphic() {
+ function f(src) { return {...src}; }
+ function C1() {
+ this.foo = "foo";
+ this.p0 = "0";
+ this.p1 = "1";
+ this.p2 = "2";
+ this.p3 = "3";
+ }
+ function C2() {
+ this.p0 = "0";
+ this.p1 = "1";
+ this[0] = 0;
+ }
+ function C3() {
+ this.x = 774;
+ this.y = 663;
+ this.rgb = 0xFF00FF;
+ }
+ function C4() {
+ this.qqq = {};
+ this.v_1 = [];
+ this.name = "C4";
+ this.constructor = C4;
+ }
+
+ // Uninitialized
+ assertEquals({ foo: "foo", p0: "0", p1: "1", p2: "2", p3: "3" }, f(new C1()));
+
+ // Monomorphic
+ assertEquals({ foo: "foo", p0: "0", p1: "1", p2: "2", p3: "3" }, f(new C1()));
+
+ // Polymorphic (2)
+ assertEquals({ 0: 0, p0: "0", p1: "1" }, f(new C2()));
+ assertEquals({ 0: 0, p0: "0", p1: "1" }, f(new C2()));
+
+ // Polymorphic (3)
+ assertEquals({ x: 774, y: 663, rgb: 0xFF00FF }, f(new C3()));
+ assertEquals({ x: 774, y: 663, rgb: 0xFF00FF }, f(new C3()));
+
+ // Polymorphic (4)
+ assertEquals({ qqq: {}, v_1: [], name: "C4", constructor: C4 }, f(new C4()));
+ assertEquals({ qqq: {}, v_1: [], name: "C4", constructor: C4 }, f(new C4()));
+
+ // Megamorphic
+ assertEquals({ boop: 1 }, f({ boop: 1 }));
+})();
diff --git a/deps/v8/test/mjsunit/es9/regress/regress-866229.js b/deps/v8/test/mjsunit/es9/regress/regress-866229.js
new file mode 100644
index 0000000000..7e98c6ae8a
--- /dev/null
+++ b/deps/v8/test/mjsunit/es9/regress/regress-866229.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var obj = { length: 1, 0: "spread" };
+obj[Symbol.toStringTag] = "foo";
+obj[Symbol.hasInstance] = function() { return true; }
+obj[Symbol.isConcatSpreadable] = true;
+
+var obj2 = { ...obj };
+
+// Crash if fast result map bitfield is not set correctly, if verifying heap
+%HeapObjectVerify(obj2);
+
+// Ensure correct result for some well-known symbols
+assertEquals("[object foo]", Object.prototype.toString.call(obj2));
+assertTrue(Uint8Array instanceof obj2);
+assertEquals(["spread"], [].concat(obj2));
diff --git a/deps/v8/test/mjsunit/es9/regress/regress-866282.js b/deps/v8/test/mjsunit/es9/regress/regress-866282.js
new file mode 100644
index 0000000000..ae23c05c27
--- /dev/null
+++ b/deps/v8/test/mjsunit/es9/regress/regress-866282.js
@@ -0,0 +1,17 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Runtime_ObjectCloneIC_Slow() source argument must be a HeapObject handle,
+// because undefined/null are allowed.
+function spread(o) { return { ...o }; }
+
+// Transition to MEGAMORPHIC
+assertEquals({}, spread(new function C1() {}));
+assertEquals({}, spread(new function C2() {}));
+assertEquals({}, spread(new function C3() {}));
+assertEquals({}, spread(new function C4() {}));
+assertEquals({}, spread(new function C5() {}));
+
+// Trigger Runtime_ObjectCloneIC_Slow() with a non-JSReceiver.
+assertEquals({}, spread(undefined));
diff --git a/deps/v8/test/mjsunit/es9/regress/regress-866357.js b/deps/v8/test/mjsunit/es9/regress/regress-866357.js
new file mode 100644
index 0000000000..3b6230b0f4
--- /dev/null
+++ b/deps/v8/test/mjsunit/es9/regress/regress-866357.js
@@ -0,0 +1,17 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Check that we do appropriate used/unused field accounting
+var p = Promise.resolve();
+var then = p.then = () => {};
+
+function spread() { return { ...p }; }
+
+assertEquals({ then }, spread());
+assertEquals({ then }, spread());
+assertEquals({ then }, spread());
+%OptimizeFunctionOnNextCall(spread);
+assertEquals({ then }, spread());
diff --git a/deps/v8/test/mjsunit/es9/regress/regress-866727.js b/deps/v8/test/mjsunit/es9/regress/regress-866727.js
new file mode 100644
index 0000000000..ddfcf7edf9
--- /dev/null
+++ b/deps/v8/test/mjsunit/es9/regress/regress-866727.js
@@ -0,0 +1,19 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Check that IfException/IfSuccess rewiring works in JSInliner
+function test() {
+ var spread = function(value) { return { ...value }; }
+ try {
+ assertEquals({}, spread());
+ } catch (e) {}
+}
+
+test();
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/deps/v8/test/mjsunit/es9/regress/regress-866861.js b/deps/v8/test/mjsunit/es9/regress/regress-866861.js
new file mode 100644
index 0000000000..dc6c764f5c
--- /dev/null
+++ b/deps/v8/test/mjsunit/es9/regress/regress-866861.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Check that property constness for out-of-object fields is valid
+var o = {};
+var toString = o.toString = function() {};
+try {
+assertEquals({ toString }, o = { ...o });
+} catch (e) {}
+o.toString = [];
diff --git a/deps/v8/test/mjsunit/es9/regress/regress-867958.js b/deps/v8/test/mjsunit/es9/regress/regress-867958.js
new file mode 100644
index 0000000000..eca3f1e8b8
--- /dev/null
+++ b/deps/v8/test/mjsunit/es9/regress/regress-867958.js
@@ -0,0 +1,13 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Check that encountering deprecated Maps does not cause CloneObjectIC to
+// crash.
+var obj1 = { x: 1 };
+var obj2 = { x: 2 }; // same map
+obj2.x = null; // deprecate map
+
+function f() { return { ...obj1 } };
+assertEquals({ x: 1 }, f()); // missed, object migrated to cached new map
+assertEquals({ x: 1 }, f()); // monomorphic cache-hit
diff --git a/deps/v8/test/mjsunit/es9/regress/regress-869342.js b/deps/v8/test/mjsunit/es9/regress/regress-869342.js
new file mode 100644
index 0000000000..fceb98e68f
--- /dev/null
+++ b/deps/v8/test/mjsunit/es9/regress/regress-869342.js
@@ -0,0 +1,18 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc
+
+function spread(o) { return { ...o }; }
+
+(function setupPolymorphicFeedback() {
+ function C1() { this.p0 = 1; }
+ function C2() { this.p1 = 2; this.p2 = 3; }
+ assertEquals({ p0: 1 }, spread(new C1));
+ assertEquals({ p1: 2, p2: 3 }, spread(new C2));
+})();
+
+gc(); // Clobber cached map in feedback[0], and check that we don't crash
+function C3() { this.p0 = 3; }
+assertEquals({ p0: 3 }, spread(new C3));
diff --git a/deps/v8/test/mjsunit/harmony/async-generators-resume-return.js b/deps/v8/test/mjsunit/harmony/async-generators-resume-return.js
index 715c81fc21..2c6973e646 100644
--- a/deps/v8/test/mjsunit/harmony/async-generators-resume-return.js
+++ b/deps/v8/test/mjsunit/harmony/async-generators-resume-return.js
@@ -4,6 +4,8 @@
// Flags: --allow-natives-syntax
+load('test/mjsunit/test-async.js');
+
// .return() from state suspendedStart with undefined
testAsync(test => {
test.plan(3);
diff --git a/deps/v8/test/mjsunit/harmony/async-generators-return.js b/deps/v8/test/mjsunit/harmony/async-generators-return.js
index 27cbd4373b..45f359fd54 100644
--- a/deps/v8/test/mjsunit/harmony/async-generators-return.js
+++ b/deps/v8/test/mjsunit/harmony/async-generators-return.js
@@ -4,6 +4,8 @@
// Flags: --allow-natives-syntax
+load('test/mjsunit/test-async.js');
+
testAsync(test => {
test.plan(2);
@@ -68,7 +70,7 @@ testAsync(test => {
// Return a thenable which is fulfilled later
testAsync(test => {
- test.plan(2);
+ test.plan(3);
let resolve;
let awaitedThenable = { then(resolveFn) { resolve = resolveFn; } };
@@ -84,7 +86,8 @@ testAsync(test => {
gen().next().then(
(iterResult) => {
- test.equals({ value: "resolvedPromise", done: true }, iterResult);
+ test.equals("resolvedPromise", iterResult.value);
+ test.equals(true, iterResult.done);
test.equals(true, finallyEvaluated);
},
test.unexpectedRejection());
diff --git a/deps/v8/test/mjsunit/harmony/async-generators-yield.js b/deps/v8/test/mjsunit/harmony/async-generators-yield.js
index feb6339af2..b34f6b8050 100644
--- a/deps/v8/test/mjsunit/harmony/async-generators-yield.js
+++ b/deps/v8/test/mjsunit/harmony/async-generators-yield.js
@@ -4,6 +4,8 @@
// Flags: --allow-natives-syntax
+load('test/mjsunit/test-async.js');
+
// Yield a thenable which is never settled
testAsync(test => {
test.plan(0);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/regress-tonumbercode.js b/deps/v8/test/mjsunit/harmony/bigint/regress-tonumbercode.js
new file mode 100644
index 0000000000..4dedf4d27c
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/regress-tonumbercode.js
@@ -0,0 +1,16 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-bigint
+
+function f(x, b) {
+ if (b) return Math.trunc(+(x))
+ else return Math.trunc(Number(x))
+}
+
+f("1", true);
+f("2", true);
+f("2", false);
+%OptimizeFunctionOnNextCall(f);
+f(3n);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/turbo.js b/deps/v8/test/mjsunit/harmony/bigint/turbo.js
index 87130ea101..4ce4880f3d 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/turbo.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/turbo.js
@@ -31,7 +31,7 @@ function Test(f, ...cases) {
function V(input, expected_value) {
function check(result) {
- assertFalse(result.exception, input);
+ assertFalse(result.exception, `unexpected exception ${result.value} on input ${input}`);
assertEquals(expected_value, result.value);
}
return {input, check};
@@ -39,7 +39,7 @@ function V(input, expected_value) {
function E(input, expected_exception) {
function check(result) {
- assertTrue(result.exception, input);
+ assertTrue(result.exception, `expected exception ${expected_exception.name} on input ${input}`);
assertInstanceof(result.value, expected_exception);
}
return {input, check};
@@ -56,10 +56,15 @@ const six = {[Symbol.toPrimitive]() {return 6n}};
// inputs.
////////////////////////////////////////////////////////////////////////////////
-
Test(x => Number(x),
V(1n, 1), V(1, 1), V("", 0), V(1.4, 1.4), V(null, 0), V(six, 6));
+Test(x => Math.trunc(+x),
+ E(1n, TypeError), V(1, 1), V("", 0), V(1.4, 1), V(null, 0), E(six, TypeError));
+
+Test(x => Math.trunc(Number(x)),
+ V(1n, 1), V(1, 1), V("", 0), V(1.4, 1), V(null, 0), V(six, 6));
+
Test(x => String(x),
V(1n, "1"), V(1, "1"), V(1.4, "1.4"), V(null, "null"), V(six, "6"));
diff --git a/deps/v8/src/torque/TorqueListener.cpp b/deps/v8/test/mjsunit/harmony/global-configurable.js
index 440b950584..dd823f1531 100644
--- a/deps/v8/src/torque/TorqueListener.cpp
+++ b/deps/v8/test/mjsunit/harmony/global-configurable.js
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Generated from Torque.g4 by ANTLR 4.7.1
+// Flags: --harmony-global
-#include "TorqueListener.h"
+assertTrue(delete globalThis);
+assertEquals(this.globalThis, undefined);
diff --git a/deps/v8/src/torque/TorqueBaseVisitor.cpp b/deps/v8/test/mjsunit/harmony/global-writable.js
index 7f9c942307..f0c2d754bb 100644
--- a/deps/v8/src/torque/TorqueBaseVisitor.cpp
+++ b/deps/v8/test/mjsunit/harmony/global-writable.js
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Generated from Torque.g4 by ANTLR 4.7.1
+// Flags: --harmony-global
-#include "TorqueBaseVisitor.h"
+globalThis = '[[Writable]]';
+assertEquals(globalThis, '[[Writable]]');
diff --git a/deps/v8/test/mjsunit/harmony/global.js b/deps/v8/test/mjsunit/harmony/global.js
new file mode 100644
index 0000000000..733b95312b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/global.js
@@ -0,0 +1,28 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-global
+
+assertEquals(globalThis, this);
+assertEquals(this.globalThis, this);
+assertEquals(globalThis.globalThis, this);
+assertEquals(globalThis.globalThis.globalThis, this);
+assertEquals(globalThis.globalThis.globalThis.globalThis, this);
+
+{
+ const realm = Realm.create();
+ assertEquals(Realm.global(realm), Realm.eval(realm, 'globalThis'));
+ assertTrue(Realm.global(realm) !== globalThis);
+}
+
+{
+ const descriptor = Object.getOwnPropertyDescriptor(
+ this,
+ 'globalThis'
+ );
+ assertEquals(descriptor.value, this);
+ assertEquals(descriptor.writable, true);
+ assertEquals(descriptor.enumerable, false);
+ assertEquals(descriptor.configurable, true);
+}
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-namespace.js b/deps/v8/test/mjsunit/harmony/modules-import-namespace.js
new file mode 100644
index 0000000000..dfcd6cd502
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-namespace.js
@@ -0,0 +1,19 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+// Flags: --allow-natives-syntax
+
+import * as m from "modules-skip-3.js";
+
+function get() {
+ return m.stringlife;
+}
+
+assertEquals("42", get());
+assertEquals("42", get());
+assertEquals("42", get());
+%OptimizeFunctionOnNextCall(get);
+assertEquals("42", get());
diff --git a/deps/v8/test/mjsunit/harmony/private-fields-special-object.js b/deps/v8/test/mjsunit/harmony/private-fields-special-object.js
index 309143d904..0ade4305bf 100644
--- a/deps/v8/test/mjsunit/harmony/private-fields-special-object.js
+++ b/deps/v8/test/mjsunit/harmony/private-fields-special-object.js
@@ -4,6 +4,8 @@
// Flags: --harmony-private-fields --allow-natives-syntax
+load('test/mjsunit/test-async.js');
+
async function f(assert) {
try {
module_namespace_obj = await import('modules-skip-1.js');
diff --git a/deps/v8/test/mjsunit/harmony/promise-prototype-finally.js b/deps/v8/test/mjsunit/harmony/promise-prototype-finally.js
index 3051c91d2b..b5456504fa 100644
--- a/deps/v8/test/mjsunit/harmony/promise-prototype-finally.js
+++ b/deps/v8/test/mjsunit/harmony/promise-prototype-finally.js
@@ -4,6 +4,8 @@
// Flags: --allow-natives-syntax
+load('test/mjsunit/test-async.js');
+
assertThrows(() => Promise.prototype.finally.call(5), TypeError);
testAsync(assert => {
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 60f2219db1..a458e0cd10 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -180,9 +180,6 @@ var isOptimized;
// Returns true if given function is compiled by TurboFan.
var isTurboFanned;
-// Used for async tests. See definition below for more documentation.
-var testAsync;
-
// Monkey-patchable all-purpose failure handler.
var failWithMessage;
@@ -757,111 +754,4 @@ var prettyPrinted;
} catch(e) {};
return error.stack;
}
-
- /**
- * This is to be used through the testAsync helper function defined
- * below.
- *
- * This requires the --allow-natives-syntax flag to allow calling
- * runtime functions.
- *
- * There must be at least one assertion in an async test. A test
- * with no assertions will fail.
- *
- * @example
- * testAsync(assert => {
- * assert.plan(1) // There should be one assertion in this test.
- * Promise.resolve(1)
- * .then(val => assert.equals(1, val),
- * assert.unreachable);
- * })
- */
- class AsyncAssertion {
- constructor(test, name) {
- this.expectedAsserts_ = -1;
- this.actualAsserts_ = 0;
- this.test_ = test;
- this.name_ = name || '';
- }
-
- /**
- * Sets the number of expected asserts in the test. The test fails
- * if the number of asserts computed after running the test is not
- * equal to this specified value.
- * @param {number} expectedAsserts
- */
- plan(expectedAsserts) {
- this.expectedAsserts_ = expectedAsserts;
- }
-
- fail(expectedText, found) {
- let message = formatFailureText(expectedText, found);
- message += "\nin test:" + this.name_
- message += "\n" + Function.prototype.toString.apply(this.test_);
- eval("%AbortJS(message)");
- }
-
- equals(expected, found, name_opt) {
- this.actualAsserts_++;
- if (!deepEquals(expected, found)) {
- this.fail(prettyPrinted(expected), found, name_opt);
- }
- }
-
- unreachable() {
- let message = "Failure: unreachable in test: " + this.name_;
- message += "\n" + Function.prototype.toString.apply(this.test_);
- eval("%AbortJS(message)");
- }
-
- unexpectedRejection(details) {
- return (error) => {
- let message =
- "Failure: unexpected Promise rejection in test: " + this.name_;
- if (details) message += "\n @" + details;
- if (error instanceof Error) {
- message += "\n" + String(error.stack);
- } else {
- message += "\n" + String(error);
- }
- message += "\n\n" + Function.prototype.toString.apply(this.test_);
- eval("%AbortJS(message)");
- };
- }
-
- drainMicrotasks() {
- eval("%RunMicrotasks()");
- }
-
- done_() {
- if (this.expectedAsserts_ === -1) {
- let message = "Please call t.plan(count) to initialize test harness " +
- "with correct assert count (Note: count > 0)";
- eval("%AbortJS(message)");
- }
-
- if (this.expectedAsserts_ !== this.actualAsserts_) {
- let message = "Expected asserts: " + this.expectedAsserts_;
- message += ", Actual asserts: " + this.actualAsserts_;
- message += "\nin test: " + this.name_;
- message += "\n" + Function.prototype.toString.apply(this.test_);
- eval("%AbortJS(message)");
- }
- }
- }
-
- /** This is used to test async functions and promises.
- * @param {testCallback} test - test function
- * @param {string} [name] - optional name of the test
- *
- *
- * @callback testCallback
- * @param {AsyncAssertion} assert
- */
- testAsync = function(test, name) {
- let assert = new AsyncAssertion(test, name);
- test(assert);
- eval("%RunMicrotasks()");
- assert.done_();
- }
})();
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 28900d22da..4d54808b46 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -154,17 +154,23 @@
'asm/poppler/*': [PASS, SLOW, NO_VARIANTS],
'asm/sqlite3/*': [PASS, SLOW, NO_VARIANTS],
+ # OOM flakes in isolates tests because too many largish heaps are created.
+ 'asm/asm-heap': [PASS, NO_VARIANTS, ['isolates', SKIP]],
+
# Slow tests.
'copy-on-write-assert': [PASS, SLOW],
'es6/typedarray-construct-offset-not-smi': [PASS, SLOW],
'harmony/regexp-property-script-extensions': [PASS, SLOW],
+ 'md5': [PASS, SLOW],
'numops-fuzz-part*': [PASS, ['mode == debug', SLOW]],
'readonly': [PASS, SLOW],
'regress/regress-1122': [PASS, SLOW],
'regress/regress-605470': [PASS, SLOW],
'regress/regress-655573': [PASS, SLOW],
'regress/regress-1200351': [PASS, SLOW],
+ 'string-replace-gc': [PASS, SLOW],
'wasm/asm-wasm-f32': [PASS, SLOW],
+ 'wasm/asm-wasm-f64': [PASS, SLOW],
'wasm/embenchen/*': [PASS, SLOW],
'wasm/grow-memory': [PASS, SLOW],
'wasm/unreachable-validation': [PASS, SLOW],
@@ -253,6 +259,7 @@
'regress/regress-2249': [SKIP],
'regress/regress-4121': [SKIP],
'compare-known-objects-slow': [SKIP],
+ 'compiler/array-multiple-receiver-maps': [SKIP],
# Tests taking too long
'packed-elements': [SKIP],
'regress/regress-1122': [SKIP],
@@ -294,12 +301,6 @@
# BUG(v8:4779): Crashes flakily with stress mode on arm64.
'array-splice': [PASS, SLOW, ['arch == arm64', NO_VARIANTS]],
- # BUG(chromium:751825): Crashes flakily.
- 'wasm/js-api': [SKIP],
-
- # BUG(chromium:773631): Crashes flakily.
- 'wasm/streaming-trap-location': [SKIP],
-
# BUG(v8:7880): Slow tests.
'regress/regress-707066': [SKIP],
'regress/regress-446389': [SKIP],
@@ -319,6 +320,9 @@
'asm/sqlite3/*': [SKIP],
# TODO(mips-team): Fix Wasm for big-endian.
'wasm/*': [SKIP],
+ # TODO(mips-team): Fix SEGV on regress-864509.js on big endian
+ # (https://crbug.com/v8/7953).
+ 'regress/wasm/regress-864509': [SKIP],
}], # 'byteorder == big'
##############################################################################
@@ -638,6 +642,18 @@
}], # 'system == windows'
##############################################################################
+['system == android', {
+ # Tests consistently failing on Android.
+ # Unable to change locale on Android:
+ 'icu-date-to-string': [FAIL],
+ 'regress/regress-6288': [FAIL],
+ # OOM:
+ 'regress/regress-748069': [FAIL],
+ 'regress/regress-752764': [FAIL],
+ 'regress/regress-779407': [FAIL],
+}], # 'system == android'
+
+##############################################################################
['system == macos', {
# BUG(v8:5333)
'big-object-literal': [SKIP],
@@ -769,17 +785,39 @@
# Flaky crash on Odroid devices: https://crbug.com/v8/7678
'regress/regress-336820': [PASS, ['arch == arm and not simulator_run', SKIP]],
+
+ # Too slow for TSAN in stress mode.
+ 'es6/classes': [PASS, ['tsan', SKIP]],
+ 'regress/regress-1122': [PASS, ['tsan', SKIP]],
+
+ # Slow on arm64 simulator: https://crbug.com/v8/7783
+ 'string-replace-gc': [PASS, ['arch == arm64 and simulator_run', SKIP]],
}], # variant == stress
##############################################################################
['variant == nooptimization and (arch == arm or arch == arm64) and simulator_run', {
# Slow tests: https://crbug.com/v8/7783
'lexicographic-compare': [SKIP],
+ 'md5': [SKIP],
'regress/regress-2185': [SKIP],
+ 'wasm/asm-wasm-f32': [SKIP],
+ 'wasm/asm-wasm-f64': [SKIP],
'wasm/grow-memory': [SKIP],
}], # variant == nooptimization and (arch == arm or arch == arm64) and simulator_run
##############################################################################
+['(arch == arm or arch == arm64) and simulator_run', {
+ # Slow tests: https://crbug.com/v8/7783
+ 'regress/regress-2185': [SKIP],
+}], # (arch == arm or arch == arm64) and simulator_run
+
+##############################################################################
+['(arch == arm or arch == arm64)', {
+ # Flaky tests: https://crbug.com/v8/8090
+ 'regress/regress-752764': [SKIP],
+}], # (arch == arm or arch == arm64)
+
+##############################################################################
['gcov_coverage', {
# Tests taking too long.
'array-functions-prototype-misc': [SKIP],
diff --git a/deps/v8/test/mjsunit/regress/regress-356053.js b/deps/v8/test/mjsunit/regress/regress-356053.js
index 8f0dbdd093..2695c48fd1 100644
--- a/deps/v8/test/mjsunit/regress/regress-356053.js
+++ b/deps/v8/test/mjsunit/regress/regress-356053.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --noconcurrent-recompilation --expose-gc --allow-natives-syntax
+// Flags: --concurrent-recompilation --block-concurrent-recompilation
-%SetFlags("--concurrent-recompilation --block-concurrent-recompilation");
gc();
try { %UnblockConcurrentRecompilation(); } catch (e) { }
diff --git a/deps/v8/test/mjsunit/regress/regress-357103.js b/deps/v8/test/mjsunit/regress/regress-357103.js
index 692729ddb3..c56c8893b3 100644
--- a/deps/v8/test/mjsunit/regress/regress-357103.js
+++ b/deps/v8/test/mjsunit/regress/regress-357103.js
@@ -4,7 +4,7 @@
// Flags: --allow-natives-syntax
-%SetFlags("--gc-interval=1");
+%SetAllocationTimeout(1, 1);
var key = "Huckleberry Finn" + "Tom Sawyer";
var o = {};
diff --git a/deps/v8/test/mjsunit/regress/regress-430201.js b/deps/v8/test/mjsunit/regress/regress-430201.js
index dfd1346a01..9443d1ddc4 100644
--- a/deps/v8/test/mjsunit/regress/regress-430201.js
+++ b/deps/v8/test/mjsunit/regress/regress-430201.js
@@ -26,10 +26,10 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --expose-gc --nostress-incremental-marking
+// Flags: --stress-compaction
var array_1 = [];
-%SetFlags("--stress-compaction");
for (var a = 0; a < 10000; a++) { array_1[a * 100] = 0; }
gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-430201b.js b/deps/v8/test/mjsunit/regress/regress-430201b.js
index 056504d1d7..7e890b7930 100644
--- a/deps/v8/test/mjsunit/regress/regress-430201b.js
+++ b/deps/v8/test/mjsunit/regress/regress-430201b.js
@@ -25,12 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --expose-gc
+// Flags: --allow-natives-syntax --expose-gc --stress-compaction
(function() {
var array_1 = [];
- %SetFlags("--stress-compaction");
for (var a = 0; a < 10000; a++) { array_1[a * 100] = 0; }
gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-4325.js b/deps/v8/test/mjsunit/regress/regress-4325.js
index e88bdd3b08..27d690d579 100644
--- a/deps/v8/test/mjsunit/regress/regress-4325.js
+++ b/deps/v8/test/mjsunit/regress/regress-4325.js
@@ -23,7 +23,7 @@ i1.p1 = 0.5;
// Let Inner.map1 die by migrating i2 to Inner.map2:
print(i2.p1);
gc();
-// o1.map's descriptor for "inner" is now a cleared WeakCell;
+// o1.map's descriptor for "inner" is now a cleared weak reference;
// o1.inner's actual map is Inner.map2.
// Prepare Inner.map3, deprecating Inner.map2.
i2.p2 = 0.5;
diff --git a/deps/v8/test/mjsunit/regress/regress-5085.js b/deps/v8/test/mjsunit/regress/regress-5085.js
index 0ed034dc2d..167bfa0f44 100644
--- a/deps/v8/test/mjsunit/regress/regress-5085.js
+++ b/deps/v8/test/mjsunit/regress/regress-5085.js
@@ -4,11 +4,71 @@
// Flags: --allow-natives-syntax
+g = async function () {
+ await 10;
+}
+assertEquals(undefined, g.prototype)
+g();
+assertEquals(undefined, g.prototype)
+
+gen = function* () {
+ yield 10;
+}
+assertTrue(gen.prototype != undefined && gen.prototype != null)
+gen()
+assertTrue(gen.prototype != undefined && gen.prototype != null)
+
+async_gen = async function* () {
+ yield 10;
+}
+assertTrue(async_gen.prototype != undefined && async_gen.prototype != null)
+async_gen()
+assertTrue(async_gen.prototype != undefined && async_gen.prototype != null)
+
function foo(x) {
return x instanceof Proxy;
}
-assertFalse(foo({}));
-assertFalse(foo({}));
+function test_for_exception() {
+ caught_exception = false;
+ try {
+ foo({});
+ } catch (e) {
+ caught_exception = true;
+ assertEquals(
+ 'Function has non-object prototype \'undefined\' in instanceof check',
+ e.message);
+ } finally {
+ assertTrue(caught_exception)
+ }
+}
+
+test_for_exception();
+test_for_exception();
%OptimizeFunctionOnNextCall(foo);
-assertFalse(foo({}));
+test_for_exception();
+
+Proxy.__proto__.prototype = Function.prototype;
+assertTrue((() => {}) instanceof Proxy);
+
+assertEquals(
+ new Proxy({}, {
+ get(o, s) {
+ return s
+ }
+ }).test,
+ 'test');
+
+Proxy.__proto__ = {
+ prototype: {b: 2},
+ a: 1
+};
+assertEquals(Proxy.prototype, {b: 2});
+
+(function testProxyCreationContext() {
+ let realm = Realm.create();
+ let p1 = new Proxy({}, {});
+ let p2 = Realm.eval(realm, "new Proxy({}, {})");
+ assertEquals(0, Realm.owner(p1));
+ assertEquals(1, Realm.owner(p2));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-599414-array-concat-fast-path.js b/deps/v8/test/mjsunit/regress/regress-599414-array-concat-fast-path.js
index 3819233f99..9febb6545d 100644
--- a/deps/v8/test/mjsunit/regress/regress-599414-array-concat-fast-path.js
+++ b/deps/v8/test/mjsunit/regress/regress-599414-array-concat-fast-path.js
@@ -7,5 +7,5 @@ var a = largeArray;
assertThrows(() => {
for (;;) {
- a = a.concat(a, a, a, a, a, a);
+ a = a.concat(a, a, a, a, a);
}}, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-6700.js b/deps/v8/test/mjsunit/regress/regress-6700.js
index 342cac485e..c20cefd02c 100644
--- a/deps/v8/test/mjsunit/regress/regress-6700.js
+++ b/deps/v8/test/mjsunit/regress/regress-6700.js
@@ -4,6 +4,8 @@
// Flags: --allow-natives-syntax
+let kMinHeapSize = 4096;
+
(function TestLeftRight() {
function Module(stdlib, foreign, heap) {
"use asm";
@@ -14,7 +16,7 @@
}
return { f:f }
}
- var buffer = new ArrayBuffer(1024);
+ var buffer = new ArrayBuffer(kMinHeapSize);
var module = new Module(this, {}, buffer);
assertTrue(%IsAsmWasmCode(Module));
new Int32Array(buffer)[42] = 23;
@@ -31,7 +33,7 @@
}
return { f:f }
}
- var buffer = new ArrayBuffer(1024);
+ var buffer = new ArrayBuffer(kMinHeapSize);
var module = new Module(this, {}, buffer)
assertTrue(%IsAsmWasmCode(Module));
new Int32Array(buffer)[42 >> 4] = 23;
@@ -48,7 +50,7 @@
}
return { f:f }
}
- var buffer = new ArrayBuffer(1024);
+ var buffer = new ArrayBuffer(kMinHeapSize);
var module = new Module(this, {}, buffer)
assertFalse(%IsAsmWasmCode(Module));
new Int32Array(buffer)[42 & 0xfc] = 23;
@@ -65,7 +67,7 @@
}
return { f:f }
}
- var buffer = new ArrayBuffer(1024);
+ var buffer = new ArrayBuffer(kMinHeapSize);
var module = new Module(this, {}, buffer)
assertFalse(%IsAsmWasmCode(Module));
new Int32Array(buffer)[42 >> 3] = 23;
@@ -82,7 +84,7 @@
}
return { f:f }
}
- var buffer = new ArrayBuffer(1024);
+ var buffer = new ArrayBuffer(kMinHeapSize);
var module = new Module(this, {}, buffer)
assertFalse(%IsAsmWasmCode(Module));
new Int32Array(buffer)[42 << 2] = 23;
diff --git a/deps/v8/test/mjsunit/regress/regress-685.js b/deps/v8/test/mjsunit/regress/regress-685.js
deleted file mode 100644
index d77d61b8fb..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-685.js
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Regression test for http://code.google.com/p/v8/issues/detail?id=685.
-//
-// Test that keyed load IC generic stub uses unsigned comparison for
-// for the length field of arrays.
-//
-// The test passes if it does not crash.
-
-function test() {
- var N = 0xFFFFFFFF;
- var a = [];
- a[N - 1] = 0;
- a[N - 2] = 1;
- a.reverse();
-}
-
-test();
diff --git a/deps/v8/test/mjsunit/regress/regress-7716.js b/deps/v8/test/mjsunit/regress/regress-7716.js
index ac891ceec5..8ce83678d9 100644
--- a/deps/v8/test/mjsunit/regress/regress-7716.js
+++ b/deps/v8/test/mjsunit/regress/regress-7716.js
@@ -10,22 +10,22 @@ for (let i = 0; i < 100000; i++) {
proxy = new Proxy(proxy, {});
}
-// We get a stack overflow in all cases except for Reflect.apply, which here
-// happens to run in constant space: Call jumps into CallProxy and CallProxy
-// jumps into the next Call.
-assertDoesNotThrow(() => Reflect.apply(proxy, {}, []));
-assertThrows(() => Reflect.construct(proxy, []), RangeError);
-assertThrows(() => Reflect.defineProperty(proxy, "x", {}), RangeError);
-assertThrows(() => Reflect.deleteProperty(proxy, "x"), RangeError);
-assertThrows(() => Reflect.get(proxy, "x"), RangeError);
-assertThrows(() => Reflect.getOwnPropertyDescriptor(proxy, "x"), RangeError);
-assertThrows(() => Reflect.getPrototypeOf(proxy), RangeError);
-assertThrows(() => Reflect.has(proxy, "x"), RangeError);
-assertThrows(() => Reflect.isExtensible(proxy), RangeError);
-assertThrows(() => Reflect.ownKeys(proxy), RangeError);
-assertThrows(() => Reflect.preventExtensions(proxy), RangeError);
-assertThrows(() => Reflect.setPrototypeOf(proxy, {}), RangeError);
-assertThrows(() => Reflect.set(proxy, "x", {}), RangeError);
+// Ensure these nested calls don't segfault. They may not all throw exceptions
+// depending on whether the compiler is able to perform tail call optimization
+// on the affected routines.
+try { Reflect.apply(proxy, {}, []) } catch(_) {}
+try { Reflect.construct(proxy, []) } catch(_) {}
+try { Reflect.defineProperty(proxy, "x", {}) } catch(_) {}
+try { Reflect.deleteProperty(proxy, "x") } catch(_) {}
+try { Reflect.get(proxy, "x") } catch(_) {}
+try { Reflect.getOwnPropertyDescriptor(proxy, "x") } catch(_) {}
+try { Reflect.getPrototypeOf(proxy) } catch(_) {}
+try { Reflect.has(proxy, "x") } catch(_) {}
+try { Reflect.isExtensible(proxy) } catch(_) {}
+try { Reflect.ownKeys(proxy) } catch(_) {}
+try { Reflect.preventExtensions(proxy) } catch(_) {}
+try { Reflect.setPrototypeOf(proxy, {}) } catch(_) {}
+try { Reflect.set(proxy, "x", {}) } catch(_) {}
// Recursive handler.
diff --git a/deps/v8/test/mjsunit/regress/regress-797581.js b/deps/v8/test/mjsunit/regress/regress-797581.js
index 17ac0ea50d..eb87e67128 100644
--- a/deps/v8/test/mjsunit/regress/regress-797581.js
+++ b/deps/v8/test/mjsunit/regress/regress-797581.js
@@ -3,6 +3,11 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --harmony-dynamic-import
+// Resources: test/mjsunit/regress/modules-skip-regress-797581-1.js
+// Resources: test/mjsunit/regress/modules-skip-regress-797581-2.js
+// Resources: test/mjsunit/regress/modules-skip-regress-797581-3.js
+// Resources: test/mjsunit/regress/modules-skip-regress-797581-4.js
+// Resources: test/mjsunit/regress/modules-skip-regress-797581-5.js
function TryToLoadModule(filename, expect_error, token) {
let caught_error;
diff --git a/deps/v8/test/mjsunit/regress/regress-8033.js b/deps/v8/test/mjsunit/regress/regress-8033.js
new file mode 100644
index 0000000000..437ca2a0a6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-8033.js
@@ -0,0 +1,45 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("foo: if (true) do { continue foo } while (false)", SyntaxError);
+assertThrows("foo: if (true) while (false) { continue foo }", SyntaxError);
+assertThrows("foo: if (true) for (; false; ) { continue foo }", SyntaxError);
+assertThrows("foo: if (true) for (let x of []) { continue foo }", SyntaxError);
+assertThrows("foo: if (true) for (let x in []) { continue foo }", SyntaxError);
+
+assertThrows("foo: if (true) { do { continue foo } while (false) }", SyntaxError);
+assertThrows("foo: if (true) { while (false) { continue foo } }", SyntaxError);
+assertThrows("foo: if (true) { for (; false; ) { continue foo } }", SyntaxError);
+assertThrows("foo: if (true) { for (let x of []) { continue foo } }", SyntaxError);
+assertThrows("foo: if (true) { for (let x in []) { continue foo } }", SyntaxError);
+
+assertThrows("foo: goo: if (true) do { continue foo } while (false)", SyntaxError);
+assertThrows("foo: goo: if (true) while (false) { continue foo }", SyntaxError);
+assertThrows("foo: goo: if (true) for (; false; ) { continue foo }", SyntaxError);
+assertThrows("foo: goo: if (true) for (let x of []) { continue foo }", SyntaxError);
+assertThrows("foo: goo: if (true) for (let x in []) { continue foo }", SyntaxError);
+
+assertThrows("foo: goo: if (true) { do { continue foo } while (false) }", SyntaxError);
+assertThrows("foo: goo: if (true) { while (false) { continue foo } }", SyntaxError);
+assertThrows("foo: goo: if (true) { for (; false; ) { continue foo } }", SyntaxError);
+assertThrows("foo: goo: if (true) { for (let x of []) { continue foo } }", SyntaxError);
+assertThrows("foo: goo: if (true) { for (let x in []) { continue foo } }", SyntaxError);
+
+assertDoesNotThrow("if (true) foo: goo: do { continue foo } while (false)");
+assertDoesNotThrow("if (true) foo: goo: while (false) { continue foo }");
+assertDoesNotThrow("if (true) foo: goo: for (; false; ) { continue foo }");
+assertDoesNotThrow("if (true) foo: goo: for (let x of []) { continue foo }");
+assertDoesNotThrow("if (true) foo: goo: for (let x in []) { continue foo }");
+
+assertThrows("if (true) foo: goo: { do { continue foo } while (false) }", SyntaxError);
+assertThrows("if (true) foo: goo: { while (false) { continue foo } }", SyntaxError);
+assertThrows("if (true) foo: goo: { for (; false; ) { continue foo } }", SyntaxError);
+assertThrows("if (true) foo: goo: { for (let x of []) { continue foo } }", SyntaxError);
+assertThrows("if (true) foo: goo: { for (let x in []) { continue foo } }", SyntaxError);
+
+assertDoesNotThrow("if (true) { foo: goo: do { continue foo } while (false) }");
+assertDoesNotThrow("if (true) { foo: goo: while (false) { continue foo } }");
+assertDoesNotThrow("if (true) { foo: goo: for (; false; ) { continue foo } }");
+assertDoesNotThrow("if (true) { foo: goo: for (let x of []) { continue foo } }");
+assertDoesNotThrow("if (true) { foo: goo: for (let x in []) { continue foo } }");
diff --git a/deps/v8/test/mjsunit/regress/regress-865310.js b/deps/v8/test/mjsunit/regress/regress-865310.js
new file mode 100644
index 0000000000..57f976991a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-865310.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+check = function() {
+ assertEquals(null, check.caller);
+}
+
+var obj = {};
+obj.valueOf = check;
+
+function f() {
+ Number(obj);
+}
+
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-869735.js b/deps/v8/test/mjsunit/regress/regress-869735.js
new file mode 100644
index 0000000000..dfa7b8385b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-869735.js
@@ -0,0 +1,14 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ return arguments.length;
+}
+
+var a = [];
+%OptimizeFunctionOnNextCall(f);
+a.length = 81832;
+f(...a);
diff --git a/deps/v8/test/mjsunit/regress/regress-875493.js b/deps/v8/test/mjsunit/regress/regress-875493.js
new file mode 100644
index 0000000000..81fbac4319
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-875493.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function test() {
+ const re = /./y;
+ re.lastIndex = 3;
+ const str = 'fg';
+ return re[Symbol.replace](str, '$');
+}
+
+%SetForceSlowPath(false);
+const fast = test();
+%SetForceSlowPath(true);
+const slow = test();
+%SetForceSlowPath(false);
+
+assertEquals(slow, fast);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-380671.js b/deps/v8/test/mjsunit/regress/regress-crbug-380671.js
index 3b03064eb9..e2909e0a43 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-380671.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-380671.js
@@ -10,4 +10,5 @@ assertEquals(0xc0000000, buffer.byteLength);
// mock allocator would allow us to allocate more than the physical memory
// available on 32bit platforms, leaving the internal counters in an invalid
// state.
+buffer = null;
gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-759327.js b/deps/v8/test/mjsunit/regress/regress-crbug-759327.js
index 9b422176b8..4aed8a456a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-759327.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-759327.js
@@ -13,7 +13,7 @@ function Module(stdlib, env, heap) {
return { f: f };
}
function instantiate() {
- var buffer = new ArrayBuffer(0);
+ var buffer = new ArrayBuffer(4096);
Module(this, {}, buffer).f();
try {} finally {}
gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-807096.js b/deps/v8/test/mjsunit/regress/regress-crbug-807096.js
index 845120db6a..c503fdad97 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-807096.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-807096.js
@@ -4,6 +4,8 @@
//
// Flags: --allow-natives-syntax --no-lazy
+load('test/mjsunit/test-async.js');
+
// For regression testing, it's important that these functions are:
// 1) toplevel
// 2) arrow functions with single-expression bodies
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-830565.js b/deps/v8/test/mjsunit/regress/regress-crbug-830565.js
index ee2c3bdaca..00f49bb9ff 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-830565.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-830565.js
@@ -4,6 +4,8 @@
// Flags: --allow-natives-syntax
+load('test/mjsunit/test-async.js');
+
testAsync(assert => {
assert.plan(1);
const error = new TypeError('Throwing');
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-865312.js b/deps/v8/test/mjsunit/regress/regress-crbug-865312.js
new file mode 100644
index 0000000000..8df1bd92d2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-865312.js
@@ -0,0 +1,34 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const intArrayConstructors = [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Uint32Array,
+ Int32Array,
+ Uint8ClampedArray
+];
+
+const floatArrayConstructors = [
+ Float32Array,
+ Float64Array
+];
+
+const typedArrayConstructors = [...intArrayConstructors,
+ ...floatArrayConstructors];
+
+for (let constructor of typedArrayConstructors) {
+ // Shadowing the length of a TypedArray should work for Array.p.fill,
+ // but not crash it.
+ let array = new constructor([2, 2]);
+ assertEquals(2, array.length);
+
+ Object.defineProperty(array, 'length', {value: 5});
+ Array.prototype.fill.call(array, 5);
+
+ assertArrayEquals([5, 5], [array[0], array[1]]);
+ assertEquals(undefined, array[2]);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-865892.js b/deps/v8/test/mjsunit/regress/regress-crbug-865892.js
new file mode 100644
index 0000000000..f2d2f6ff71
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-865892.js
@@ -0,0 +1,20 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-async-hooks
+
+let ah = async_hooks.createHook(
+{
+ init(asyncId, type) {
+ if (type !== 'PROMISE') { return; }
+ assertThrows('asyncIds.push(asyncId);');
+ }
+});
+ah.enable();
+
+async function foo() {
+ let x = { toString() { return 'modules-skip-1.js' } };
+ assertThrows('await import(x);');
+}
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-866315.js b/deps/v8/test/mjsunit/regress/regress-crbug-866315.js
new file mode 100644
index 0000000000..4eb032d78e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-866315.js
@@ -0,0 +1,12 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-async-hooks
+
+let num = 42;
+let ah = async_hooks.createHook({});
+
+num.__proto__.__proto__ = ah;
+assertThrows('num.enable()');
+assertThrows('num.disable()');
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-871886.js b/deps/v8/test/mjsunit/regress/regress-crbug-871886.js
new file mode 100644
index 0000000000..011443145b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-871886.js
@@ -0,0 +1,11 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let arr = [1.5, 2.5];
+arr.slice(0,
+ { valueOf: function () {
+ arr.length = 0;
+ return 2;
+ }
+ });
diff --git a/deps/v8/test/mjsunit/regress/regress-set-flags-stress-compact.js b/deps/v8/test/mjsunit/regress/regress-set-flags-stress-compact.js
index 5bc59a7e11..f86148659f 100644
--- a/deps/v8/test/mjsunit/regress/regress-set-flags-stress-compact.js
+++ b/deps/v8/test/mjsunit/regress/regress-set-flags-stress-compact.js
@@ -2,9 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
-
-%SetFlags("--gc-interval=164 --stress-compaction");
+// Flags: --allow-natives-syntax --gc-interval=164 --stress-compaction
var a = [];
for (var i = 0; i < 10000; i++) { a[i * 100] = 0; }
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-8070.js b/deps/v8/test/mjsunit/regress/regress-v8-8070.js
new file mode 100644
index 0000000000..a75230da1f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-8070.js
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function bar(iterator) {
+ for (const entry of iterator) {}
+}
+
+%NeverOptimizeFunction(bar);
+
+function foo(a) {
+ const iterator = a.values();
+ bar(iterator);
+ return iterator.next().done;
+}
+
+const a = [1, 2, 3];
+assertTrue(foo(a));
+assertTrue(foo(a));
+%OptimizeFunctionOnNextCall(foo);
+assertTrue(foo(a));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-776677.js b/deps/v8/test/mjsunit/regress/wasm/regress-776677.js
index 1b2357dcf2..87bf8fac7e 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-776677.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-776677.js
@@ -16,7 +16,7 @@ function module(stdlib,foreign,buffer) {
var global = {Uint32Array:Uint32Array};
var env = {};
-memory = new WebAssembly.Memory({initial:200});
+memory = new WebAssembly.Memory({initial:128});
var buffer = memory.buffer;
evil_f = module(global,env,buffer);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-8059.js b/deps/v8/test/mjsunit/regress/wasm/regress-8059.js
new file mode 100644
index 0000000000..5c421c4ee4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-8059.js
@@ -0,0 +1,42 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-wasm-disable-structured-cloning
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function TestPostModule() {
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("add", kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add])
+ .exportFunc();
+
+ let module = builder.toModule();
+
+ let workerScript = `
+ onmessage = function(module) {
+ try {
+ let instance = new WebAssembly.Instance(module);
+ let result = instance.exports.add(40, 2);
+ postMessage(result);
+ } catch(e) {
+ postMessage('ERROR: ' + e);
+ }
+ }
+ `;
+
+ let realm = Realm.create();
+ Realm.shared = { m:module, s:workerScript };
+
+ let realmScript = `
+ let worker = new Worker(Realm.shared.s);
+ worker.postMessage(Realm.shared.m);
+ let message = worker.getMessage();
+ worker.terminate();
+ message;
+ `;
+ let message = Realm.eval(realm, realmScript);
+ assertEquals(42, message);
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-873600.js b/deps/v8/test/mjsunit/regress/wasm/regress-873600.js
new file mode 100644
index 0000000000..b3f2d739a2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-873600.js
@@ -0,0 +1,50 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function DoTest() {
+
+ var stdlib = this;
+ try {
+ var buffer = new ArrayBuffer((2097120) * 1024);
+ } catch (e) {
+ // Out of memory: soft pass because 2GiB is actually a lot!
+ print("OOM: soft pass");
+ return;
+ }
+ var foreign = {}
+
+ var m = (function Module(stdlib, foreign, heap) {
+ "use asm";
+ var MEM16 = new stdlib.Int16Array(heap);
+ function load(i) {
+ i = i|0;
+ i = MEM16[i >> 1]|0;
+ return i | 0;
+ }
+ function store(i, v) {
+ i = i|0;
+ v = v|0;
+ MEM16[i >> 1] = v;
+ }
+ function load8(i) {
+ i = i|0;
+ i = MEM16[i + 8 >> 1]|0;
+ return i | 0;
+ }
+ function store8(i, v) {
+ i = i|0;
+ v = v|0;
+ MEM16[i + 8 >> 1] = v;
+ }
+ return { load: load, store: store, load8: load8, store8: store8 };
+ })(stdlib, foreign, buffer);
+
+ assertEquals(0, m.load(-8));
+ assertEquals(0, m.load8(-16));
+ m.store(2014, 2, 30, 1, 0);
+ assertEquals(0, m.load8(-8));
+ m.store8(-8, 99);
+ assertEquals(99, m.load(0));
+ assertEquals(99, m.load8(-8));
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-875556.js b/deps/v8/test/mjsunit/regress/wasm/regress-875556.js
new file mode 100644
index 0000000000..e1ea426f87
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-875556.js
@@ -0,0 +1,19 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --expose-wasm --experimental-wasm-mv
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+(function() {
+ const builder = new WasmModuleBuilder();
+ // Generate function 1 (out of 2).
+ sig1 = makeSig([kWasmI32], []);
+ builder.addFunction("main", sig1).addBodyWithEnd([
+ // signature: v_i
+ // body:
+ kExprBlock,
+ ]);
+ assertThrows(function() { builder.instantiate(); }, WebAssembly.CompileError);
+})();
diff --git a/deps/v8/test/mjsunit/sparse-array-reverse.js b/deps/v8/test/mjsunit/sparse-array-reverse.js
deleted file mode 100644
index 45a6da4b5e..0000000000
--- a/deps/v8/test/mjsunit/sparse-array-reverse.js
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/**
- * @fileoverview Test reverse on small * and large arrays.
- */
-
-var VERYLARGE = 4000000000;
-
-// Nicer for firefox 1.5. Unless you uncomment the following line,
-// smjs will appear to hang on this file.
-//var VERYLARGE = 40000;
-
-
-// Simple test of reverse on sparse array.
-var a = [];
-a.length = 2000;
-a[15] = 'a';
-a[30] = 'b';
-Array.prototype[30] = 'B'; // Should be hidden by a[30].
-a[40] = 'c';
-a[50] = 'deleted';
-delete a[50]; // Should leave no trace once deleted.
-a[1959] = 'd'; // Swapped with a[40] when reversing.
-a[1999] = 'e';
-assertEquals("abcde", a.join(''));
-a.reverse();
-delete Array.prototype[30];
-assertEquals("edcba", a.join(''));
-
-
-
-var seed = 43;
-
-// CONG pseudo random number generator. Used for fuzzing the sparse array
-// reverse code.
-function DoOrDont() {
- seed = (69069 * seed + 1234567) % 0x100000000;
- return (seed & 0x100000) != 0;
-}
-
-var sizes = [140, 40000, VERYLARGE];
-var poses = [0, 10, 50, 69];
-
-
-// Fuzzing test of reverse on sparse array.
-for (var iterations = 0; iterations < 20; iterations++) {
- for (var size_pos = 0; size_pos < sizes.length; size_pos++) {
- var size = sizes[size_pos];
-
- var to_delete = [];
-
- var a;
- // Make sure we test both array-backed and hash-table backed
- // arrays.
- if (size < 1000) {
- a = new Array(size);
- } else {
- a = new Array();
- a.length = size;
- }
-
- var expected = '';
- var expected_reversed = '';
-
- for (var pos_pos = 0; pos_pos < poses.length; pos_pos++) {
- var pos = poses[pos_pos];
- var letter = String.fromCharCode(97 + pos_pos);
- if (DoOrDont()) {
- a[pos] = letter;
- expected += letter;
- expected_reversed = letter + expected_reversed;
- } else if (DoOrDont()) {
- Array.prototype[pos] = letter;
- expected += letter;
- expected_reversed = letter + expected_reversed;
- to_delete.push(pos);
- }
- }
- var expected2 = '';
- var expected_reversed2 = '';
- for (var pos_pos = poses.length - 1; pos_pos >= 0; pos_pos--) {
- var letter = String.fromCharCode(110 + pos_pos);
- var pos = size - poses[pos_pos] - 1;
- if (DoOrDont()) {
- a[pos] = letter;
- expected2 += letter;
- expected_reversed2 = letter + expected_reversed2;
- } else if (DoOrDont()) {
- Array.prototype[pos] = letter;
- expected2 += letter;
- expected_reversed2 = letter + expected_reversed2;
- to_delete.push(pos);
- }
- }
-
- assertEquals(expected + expected2, a.join(''), 'join' + size);
- a.reverse();
-
- while (to_delete.length != 0) {
- var pos = to_delete.pop();
- delete(Array.prototype[pos]);
- }
-
- assertEquals(expected_reversed2 + expected_reversed, a.join(''), 'reverse then join' + size);
- }
-}
diff --git a/deps/v8/test/mjsunit/string-pad.js b/deps/v8/test/mjsunit/string-pad.js
new file mode 100644
index 0000000000..836ab8160e
--- /dev/null
+++ b/deps/v8/test/mjsunit/string-pad.js
@@ -0,0 +1,155 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class MyError {};
+const throwing = {toString() {throw new MyError}};
+const empties = ['', {toString() {return ''}}];
+
+{
+ const s = '';
+
+ assertThrows(_ => s.padStart(Symbol(), throwing), TypeError);
+ assertEquals(s, s.padStart(NaN, throwing));
+ assertEquals(s, s.padStart(-Infinity, throwing));
+ assertEquals(s, s.padStart(-9, throwing));
+ assertEquals(s, s.padStart(-1, throwing));
+ assertEquals(s, s.padStart(-0, throwing));
+ assertEquals(s, s.padStart(0, throwing));
+ assertThrows(_ => s.padStart(3, throwing), MyError);
+ assertThrows(_ => s.padStart(9, throwing), MyError);
+ assertThrows(_ => s.padStart(2**31-1, throwing), MyError);
+ assertThrows(_ => s.padStart(2**31, throwing), MyError);
+ assertThrows(_ => s.padStart(2**32-1, throwing), MyError);
+ assertThrows(_ => s.padStart(2**32, throwing), MyError);
+ assertThrows(_ => s.padStart(2**53-1, throwing), MyError);
+ assertThrows(_ => s.padStart(2**53, throwing), MyError);
+ assertThrows(_ => s.padStart(Infinity, throwing), MyError);
+
+ assertThrows(_ => s.padEnd(Symbol(), throwing), TypeError);
+ assertEquals(s, s.padEnd(NaN, throwing));
+ assertEquals(s, s.padEnd(-Infinity, throwing));
+ assertEquals(s, s.padEnd(-9, throwing));
+ assertEquals(s, s.padEnd(-1, throwing));
+ assertEquals(s, s.padEnd(-0, throwing));
+ assertEquals(s, s.padEnd(0, throwing));
+ assertThrows(_ => s.padEnd(3, throwing), MyError);
+ assertThrows(_ => s.padEnd(9, throwing), MyError);
+ assertThrows(_ => s.padEnd(2**31-1, throwing), MyError);
+ assertThrows(_ => s.padEnd(2**31, throwing), MyError);
+ assertThrows(_ => s.padEnd(2**32-1, throwing), MyError);
+ assertThrows(_ => s.padEnd(2**32, throwing), MyError);
+ assertThrows(_ => s.padEnd(2**53-1, throwing), MyError);
+ assertThrows(_ => s.padEnd(2**53, throwing), MyError);
+ assertThrows(_ => s.padEnd(Infinity, throwing), MyError);
+
+ for (const empty of empties) {
+ assertThrows(_ => s.padStart(Symbol(), empty), TypeError);
+ assertEquals(s, s.padStart(NaN, empty));
+ assertEquals(s, s.padStart(-Infinity, empty));
+ assertEquals(s, s.padStart(-9, empty));
+ assertEquals(s, s.padStart(-1, empty));
+ assertEquals(s, s.padStart(-0, empty));
+ assertEquals(s, s.padStart(0, empty));
+ assertEquals(s, s.padStart(3, empty));
+ assertEquals(s, s.padStart(9, empty));
+ assertEquals(s, s.padStart(2**31-1, empty));
+ assertEquals(s, s.padStart(2**31, empty));
+ assertEquals(s, s.padStart(2**32-1, empty));
+ assertEquals(s, s.padStart(2**32, empty));
+ assertEquals(s, s.padStart(2**53-1, empty));
+ assertEquals(s, s.padStart(2**53, empty));
+ assertEquals(s, s.padStart(Infinity, empty));
+
+ assertThrows(_ => s.padEnd(Symbol(), empty), TypeError);
+ assertEquals(s, s.padEnd(NaN, empty));
+ assertEquals(s, s.padEnd(-Infinity, empty));
+ assertEquals(s, s.padEnd(-9, empty));
+ assertEquals(s, s.padEnd(-1, empty));
+ assertEquals(s, s.padEnd(-0, empty));
+ assertEquals(s, s.padEnd(0, empty));
+ assertEquals(s, s.padEnd(3, empty));
+ assertEquals(s, s.padEnd(9, empty));
+ assertEquals(s, s.padEnd(2**31-1, empty));
+ assertEquals(s, s.padEnd(2**31, empty));
+ assertEquals(s, s.padEnd(2**32-1, empty));
+ assertEquals(s, s.padEnd(2**32, empty));
+ assertEquals(s, s.padEnd(2**53-1, empty));
+ assertEquals(s, s.padEnd(2**53, empty));
+ assertEquals(s, s.padEnd(Infinity, empty));
+ }
+}
+
+{
+ const s = 'hello';
+
+ assertThrows(_ => s.padStart(Symbol(), throwing), TypeError);
+ assertEquals(s, s.padStart(NaN, throwing));
+ assertEquals(s, s.padStart(-Infinity, throwing));
+ assertEquals(s, s.padStart(-9, throwing));
+ assertEquals(s, s.padStart(-1, throwing));
+ assertEquals(s, s.padStart(-0, throwing));
+ assertEquals(s, s.padStart(0, throwing));
+ assertEquals(s, s.padStart(3, throwing));
+ assertThrows(_ => s.padStart(9, throwing), MyError);
+ assertThrows(_ => s.padStart(2**31-1, throwing), MyError);
+ assertThrows(_ => s.padStart(2**31, throwing), MyError);
+ assertThrows(_ => s.padStart(2**32-1, throwing), MyError);
+ assertThrows(_ => s.padStart(2**32, throwing), MyError);
+ assertThrows(_ => s.padStart(2**53-1, throwing), MyError);
+ assertThrows(_ => s.padStart(2**53, throwing), MyError);
+ assertThrows(_ => s.padStart(Infinity, throwing), MyError);
+
+ assertThrows(_ => s.padEnd(Symbol(), throwing), TypeError);
+ assertEquals(s, s.padEnd(NaN, throwing));
+ assertEquals(s, s.padEnd(-Infinity, throwing));
+ assertEquals(s, s.padEnd(-9, throwing));
+ assertEquals(s, s.padEnd(-1, throwing));
+ assertEquals(s, s.padEnd(-0, throwing));
+ assertEquals(s, s.padEnd(0, throwing));
+ assertEquals(s, s.padEnd(3, throwing));
+ assertThrows(_ => s.padEnd(9, throwing), MyError);
+ assertThrows(_ => s.padEnd(2**31-1, throwing), MyError);
+ assertThrows(_ => s.padEnd(2**31, throwing), MyError);
+ assertThrows(_ => s.padEnd(2**32-1, throwing), MyError);
+ assertThrows(_ => s.padEnd(2**32, throwing), MyError);
+ assertThrows(_ => s.padEnd(2**53-1, throwing), MyError);
+ assertThrows(_ => s.padEnd(2**53, throwing), MyError);
+ assertThrows(_ => s.padEnd(Infinity, throwing), MyError);
+
+ for (const empty of empties) {
+ assertThrows(_ => s.padStart(Symbol(), empty), TypeError);
+ assertEquals(s, s.padStart(NaN, empty));
+ assertEquals(s, s.padStart(-Infinity, empty));
+ assertEquals(s, s.padStart(-9, empty));
+ assertEquals(s, s.padStart(-1, empty));
+ assertEquals(s, s.padStart(-0, empty));
+ assertEquals(s, s.padStart(0, empty));
+ assertEquals(s, s.padStart(3, empty));
+ assertEquals(s, s.padStart(9, empty));
+ assertEquals(s, s.padStart(2**31-1, empty));
+ assertEquals(s, s.padStart(2**31, empty));
+ assertEquals(s, s.padStart(2**32-1, empty));
+ assertEquals(s, s.padStart(2**32, empty));
+ assertEquals(s, s.padStart(2**53-1, empty));
+ assertEquals(s, s.padStart(2**53, empty));
+ assertEquals(s, s.padStart(Infinity, empty));
+
+ assertThrows(_ => s.padEnd(Symbol(), empty), TypeError);
+ assertEquals(s, s.padEnd(NaN, empty));
+ assertEquals(s, s.padEnd(-Infinity, empty));
+ assertEquals(s, s.padEnd(-9, empty));
+ assertEquals(s, s.padEnd(-1, empty));
+ assertEquals(s, s.padEnd(-0, empty));
+ assertEquals(s, s.padEnd(0, empty));
+ assertEquals(s, s.padEnd(3, empty));
+ assertEquals(s, s.padEnd(9, empty));
+ assertEquals(s, s.padEnd(2**31-1, empty));
+ assertEquals(s, s.padEnd(2**31, empty));
+ assertEquals(s, s.padEnd(2**32-1, empty));
+ assertEquals(s, s.padEnd(2**32, empty));
+ assertEquals(s, s.padEnd(2**53-1, empty));
+ assertEquals(s, s.padEnd(2**53, empty));
+ assertEquals(s, s.padEnd(Infinity, empty));
+ }
+}
diff --git a/deps/v8/test/mjsunit/test-async.js b/deps/v8/test/mjsunit/test-async.js
new file mode 100644
index 0000000000..f8a11c5238
--- /dev/null
+++ b/deps/v8/test/mjsunit/test-async.js
@@ -0,0 +1,117 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Used for async tests. See definition below for more documentation.
+var testAsync;
+
+(function () { // Scope for utility functions.
+ /**
+ * This is to be used through the testAsync helper function defined
+ * below.
+ *
+ * This requires the --allow-natives-syntax flag to allow calling
+ * runtime functions.
+ *
+ * There must be at least one assertion in an async test. A test
+ * with no assertions will fail.
+ *
+ * @example
+ * testAsync(assert => {
+ * assert.plan(1) // There should be one assertion in this test.
+ * Promise.resolve(1)
+ * .then(val => assert.equals(1, val),
+ * assert.unreachable);
+ * })
+ */
+ class AsyncAssertion {
+ constructor(test, name) {
+ this.expectedAsserts_ = -1;
+ this.actualAsserts_ = 0;
+ this.test_ = test;
+ this.name_ = name || '';
+ }
+
+ /**
+ * Sets the number of expected asserts in the test. The test fails
+ * if the number of asserts computed after running the test is not
+ * equal to this specified value.
+ * @param {number} expectedAsserts
+ */
+ plan(expectedAsserts) {
+ this.expectedAsserts_ = expectedAsserts;
+ }
+
+ fail(expectedText, found) {
+ let message = formatFailureText(expectedText, found);
+ message += "\nin test:" + this.name_
+ message += "\n" + Function.prototype.toString.apply(this.test_);
+ %AbortJS(message);
+ }
+
+ equals(expected, found, name_opt) {
+ this.actualAsserts_++;
+ if (expected !== found) {
+ this.fail(prettyPrinted(expected), found, name_opt);
+ }
+ }
+
+ unreachable() {
+ let message = "Failure: unreachable in test: " + this.name_;
+ message += "\n" + Function.prototype.toString.apply(this.test_);
+ %AbortJS(message);
+ }
+
+ unexpectedRejection(details) {
+ return (error) => {
+ let message =
+ "Failure: unexpected Promise rejection in test: " + this.name_;
+ if (details) message += "\n @" + details;
+ if (error instanceof Error) {
+ message += "\n" + String(error.stack);
+ } else {
+ message += "\n" + String(error);
+ }
+ message += "\n\n" + Function.prototype.toString.apply(this.test_);
+ %AbortJS(message);
+ };
+ }
+
+ drainMicrotasks() {
+ %RunMicrotasks();
+ }
+
+ done_() {
+ if (this.expectedAsserts_ === -1) {
+ let message = "Please call t.plan(count) to initialize test harness " +
+ "with correct assert count (Note: count > 0)";
+ %AbortJS(message);
+ }
+
+ if (this.expectedAsserts_ !== this.actualAsserts_) {
+ let message = "Expected asserts: " + this.expectedAsserts_;
+ message += ", Actual asserts: " + this.actualAsserts_;
+ message += "\nin test: " + this.name_;
+ message += "\n" + Function.prototype.toString.apply(this.test_);
+ %AbortJS(message);
+ }
+ }
+ }
+
+ /** This is used to test async functions and promises.
+ * @param {testCallback} test - test function
+ * @param {string} [name] - optional name of the test
+ *
+ *
+ * @callback testCallback
+ * @param {AsyncAssertion} assert
+ */
+ testAsync = function(test, name) {
+ let assert = new AsyncAssertion(test, name);
+ test(assert);
+ %RunMicrotasks();
+ assert.done_();
+ }
+})();
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index d31a189ba2..d843cfaf02 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -41,6 +41,22 @@ SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
NO_HARNESS_PATTERN = re.compile(r"^// NO HARNESS$", flags=re.MULTILINE)
+# Patterns for additional resource files on Android. Files that are not covered
+# by one of the other patterns below will be specified in the resources section.
+RESOURCES_PATTERN = re.compile(r"//\s+Resources:(.*)")
+# Pattern to auto-detect files to push on Android for statements like:
+# load("path/to/file.js")
+LOAD_PATTERN = re.compile(
+ r"(?:load|readbuffer|read)\((?:'|\")([^'\"]*)(?:'|\")\)")
+# Pattern to auto-detect files to push on Android for statements like:
+# import "path/to/file.js"
+MODULE_RESOURCES_PATTERN_1 = re.compile(
+ r"(?:import|export)(?:\(| )(?:'|\")([^'\"]*)(?:'|\")")
+# Pattern to auto-detect files to push on Android for statements like:
+# import foobar from "path/to/file.js"
+MODULE_RESOURCES_PATTERN_2 = re.compile(
+ r"(?:import|export).*from (?:'|\")([^'\"]*)(?:'|\")")
+
# Flags known to misbehave when combining arbitrary mjsunit tests. Can also
# be compiled regular expressions.
COMBINE_TESTS_FLAGS_BLACKLIST = [
@@ -124,6 +140,47 @@ class TestCase(testcase.TestCase):
self._files_suffix = files_suffix
self._env = self._parse_source_env(source)
+ def _get_resources_for_file(self, file):
+ """Returns for a given file a list of absolute paths of files needed by the
+ given file.
+ """
+ with open(file) as f:
+ source = f.read()
+ result = []
+ def add_path(path):
+ result.append(os.path.abspath(path.replace('/', os.path.sep)))
+ for match in RESOURCES_PATTERN.finditer(source):
+ # There are several resources per line. Relative to base dir.
+ for path in match.group(1).strip().split():
+ add_path(path)
+ for match in LOAD_PATTERN.finditer(source):
+ # Files in load statements are relative to base dir.
+ add_path(match.group(1))
+ for match in MODULE_RESOURCES_PATTERN_1.finditer(source):
+ # Imported files are side by side with the test case.
+ add_path(os.path.join(
+ self.suite.root, os.path.dirname(self.path), match.group(1)))
+ for match in MODULE_RESOURCES_PATTERN_2.finditer(source):
+ # Imported files are side by side with the test case.
+ add_path(os.path.join(
+ self.suite.root, os.path.dirname(self.path), match.group(1)))
+ return result
+
+ def _get_resources(self):
+ """Returns the list of files needed by a test case."""
+ result = set()
+ to_check = [self._get_source_path()]
+ # Recurse over all files until reaching a fixpoint.
+ while to_check:
+ next_resource = to_check.pop()
+ result.add(next_resource)
+ for resource in self._get_resources_for_file(next_resource):
+ # Only add files that exist on disc. The pattens we check for give some
+ # false positives otherwise.
+ if resource not in result and os.path.exists(resource):
+ to_check.append(resource)
+ return sorted(list(result))
+
def _parse_source_env(self, source):
env_match = ENV_PATTERN.search(source)
env = {}
diff --git a/deps/v8/test/mjsunit/tools/csvparser.js b/deps/v8/test/mjsunit/tools/csvparser.js
index ffca9dd463..91eb3e45b5 100644
--- a/deps/v8/test/mjsunit/tools/csvparser.js
+++ b/deps/v8/test/mjsunit/tools/csvparser.js
@@ -84,3 +84,7 @@ assertEquals(
assertEquals(
['code-creation','Function','0x42f0a0','163','""'],
parser.parseLine('code-creation,Function,0x42f0a0,163,""'));
+
+assertEquals(
+ ['foo C:\\Users\\someuser\\script.js:1:13'],
+ parser.parseLine('foo C:\\\\Users\\\\someuser\\\\script.js:1:13'));
diff --git a/deps/v8/test/mjsunit/tools/profviz.js b/deps/v8/test/mjsunit/tools/profviz.js
index fc0da5d4b0..f89a2adeb3 100644
--- a/deps/v8/test/mjsunit/tools/profviz.js
+++ b/deps/v8/test/mjsunit/tools/profviz.js
@@ -30,6 +30,8 @@
// Files: tools/consarray.js tools/profile.js tools/profile_view.js
// Files: tools/logreader.js tools/arguments.js tools/tickprocessor.js
// Files: tools/profviz/composer.js
+// Resources: test/mjsunit/tools/profviz-test.log
+// Resources: test/mjsunit/tools/profviz-test.default
// Env: TEST_FILE_NAME
assertEquals('string', typeof TEST_FILE_NAME);
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor.js b/deps/v8/test/mjsunit/tools/tickprocessor.js
index cf38985e78..3247ddf145 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor.js
+++ b/deps/v8/test/mjsunit/tools/tickprocessor.js
@@ -29,6 +29,14 @@
// Files: tools/splaytree.js tools/codemap.js tools/csvparser.js
// Files: tools/consarray.js tools/profile.js tools/profile_view.js
// Files: tools/logreader.js tools/arguments.js tools/tickprocessor.js
+// Resources: test/mjsunit/tools/tickprocessor-test-func-info.log
+// Resources: test/mjsunit/tools/tickprocessor-test.default
+// Resources: test/mjsunit/tools/tickprocessor-test.func-info
+// Resources: test/mjsunit/tools/tickprocessor-test.gc-state
+// Resources: test/mjsunit/tools/tickprocessor-test.ignore-unknown
+// Resources: test/mjsunit/tools/tickprocessor-test.log
+// Resources: test/mjsunit/tools/tickprocessor-test.only-summary
+// Resources: test/mjsunit/tools/tickprocessor-test.separate-ic
// Env: TEST_FILE_NAME
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-imports.js b/deps/v8/test/mjsunit/wasm/asm-wasm-imports.js
new file mode 100644
index 0000000000..bfcb25b825
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-imports.js
@@ -0,0 +1,181 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+var stdlib = this;
+
+function assertValidAsm(func) {
+ assertTrue(%IsAsmWasmCode(func), "must be valid asm code");
+}
+
+function assertWasm(expected, func, ffi) {
+ print("Testing " + func.name + "...");
+ assertEquals(
+ expected, func(stdlib, ffi, new ArrayBuffer(1024)).caller());
+ assertValidAsm(func);
+}
+
+
+function TestForeignFunctions() {
+ function AsmModule(stdlib, foreign, buffer) {
+ "use asm";
+
+ var setVal = foreign.setVal;
+ var getVal = foreign.getVal;
+
+ function caller(initial_value, new_value) {
+ initial_value = initial_value|0;
+ new_value = new_value|0;
+ if ((getVal()|0) == (initial_value|0)) {
+ setVal(new_value|0);
+ return getVal()|0;
+ }
+ return 0;
+ }
+
+ return {caller:caller};
+ }
+
+ function ffi(initial_val) {
+ var val = initial_val;
+
+ function getVal() {
+ return val;
+ }
+
+ function setVal(new_val) {
+ val = new_val;
+ }
+
+ return {getVal:getVal, setVal:setVal};
+ }
+
+ var foreign = new ffi(23);
+
+ var module = AsmModule({Math: Math}, foreign, null);
+ assertValidAsm(AsmModule);
+
+ assertEquals(103, module.caller(23, 103));
+}
+
+print("TestForeignFunctions...");
+TestForeignFunctions();
+
+
+function TestForeignFunctionMultipleUse() {
+ function AsmModule(stdlib, foreign, buffer) {
+ "use asm";
+
+ var getVal = foreign.getVal;
+
+ function caller(int_val, double_val) {
+ int_val = int_val|0;
+ double_val = +double_val;
+ if ((getVal()|0) == (int_val|0)) {
+ if ((+getVal()) == (+double_val)) {
+ return 89;
+ }
+ }
+ return 0;
+ }
+
+ return {caller:caller};
+ }
+
+ function ffi() {
+ function getVal() {
+ return 83.25;
+ }
+
+ return {getVal:getVal};
+ }
+
+ var foreign = new ffi();
+
+ var module_decl = eval('(' + AsmModule.toString() + ')');
+ var module = module_decl(stdlib, foreign, null);
+ assertValidAsm(module_decl);
+
+ assertEquals(89, module.caller(83, 83.25));
+}
+
+print("TestForeignFunctionMultipleUse...");
+TestForeignFunctionMultipleUse();
+
+function TestForeignVariables() {
+ function AsmModule(stdlib, foreign, buffer) {
+ "use asm";
+
+ var i1 = foreign.foo | 0;
+ var f1 = +foreign.bar;
+ var i2 = foreign.baz | 0;
+ var f2 = +foreign.baz;
+
+ function geti1() {
+ return i1|0;
+ }
+
+ function getf1() {
+ return +f1;
+ }
+
+ function geti2() {
+ return i2|0;
+ }
+
+ function getf2() {
+ return +f2;
+ }
+
+ return {geti1:geti1, getf1:getf1, geti2:geti2, getf2:getf2};
+ }
+
+ function TestCase(env, i1, f1, i2, f2) {
+ print("Testing foreign variables...");
+ var module_decl = eval('(' + AsmModule.toString() + ')');
+ var module = module_decl(stdlib, env);
+ assertValidAsm(module_decl);
+ assertEquals(i1, module.geti1());
+ assertEquals(f1, module.getf1());
+ assertEquals(i2, module.geti2());
+ assertEquals(f2, module.getf2());
+ }
+
+ // Check normal operation.
+ TestCase({foo: 123, bar: 234.5, baz: 345.7}, 123, 234.5, 345, 345.7);
+ // Check partial operation.
+ TestCase({baz: 345.7}, 0, NaN, 345, 345.7);
+ // Check that undefined values are converted to proper defaults.
+ TestCase({qux: 999}, 0, NaN, 0, NaN);
+ // Check that true values are converted properly.
+ TestCase({foo: true, bar: true, baz: true}, 1, 1.0, 1, 1.0);
+ // Check that false values are converted properly.
+ TestCase({foo: false, bar: false, baz: false}, 0, 0, 0, 0);
+ // Check that null values are converted properly.
+ TestCase({foo: null, bar: null, baz: null}, 0, 0, 0, 0);
+ // Check that string values are converted properly.
+ TestCase({foo: 'hi', bar: 'there', baz: 'dude'}, 0, NaN, 0, NaN);
+ TestCase({foo: '0xff', bar: '234', baz: '456.1'}, 255, 234, 456, 456.1);
+ // Check that function values are converted properly.
+ TestCase({foo: TestCase, bar: TestCase, qux: TestCase}, 0, NaN, 0, NaN);
+}
+
+print("TestForeignVariables...");
+TestForeignVariables();
+
+
+function TestGlobalBlock(stdlib, foreign, buffer) {
+ "use asm";
+
+ var x = foreign.x | 0, y = foreign.y | 0;
+
+ function test() {
+ return (x + y) | 0;
+ }
+
+ return {caller: test};
+}
+
+assertWasm(15, TestGlobalBlock, { x: 4, y: 11 });
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-memory.js b/deps/v8/test/mjsunit/wasm/asm-wasm-memory.js
new file mode 100644
index 0000000000..9c2ff77998
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-memory.js
@@ -0,0 +1,212 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+var stdlib = this;
+let kMinHeapSize = 4096;
+
+function assertValidAsm(func) {
+ assertTrue(%IsAsmWasmCode(func), "must be valid asm code");
+}
+
+function assertWasm(expected, func, ffi) {
+ print("Testing " + func.name + "...");
+ assertEquals(
+ expected, func(stdlib, ffi, new ArrayBuffer(kMinHeapSize)).caller());
+ assertValidAsm(func);
+}
+
+
+function TestInt32HeapAccess(stdlib, foreign, buffer) {
+ "use asm";
+
+ var m = new stdlib.Int32Array(buffer);
+ function caller() {
+ var i = 4;
+
+ m[0] = (i + 1) | 0;
+ m[i >> 2] = ((m[0]|0) + 1) | 0;
+ m[2] = ((m[i >> 2]|0) + 1) | 0;
+ return m[2] | 0;
+ }
+
+ return {caller: caller};
+}
+
+assertWasm(7, TestInt32HeapAccess);
+
+
+function TestInt32HeapAccessExternal() {
+ var memory = new ArrayBuffer(kMinHeapSize);
+ var memory_int32 = new Int32Array(memory);
+ var module_decl = eval('(' + TestInt32HeapAccess.toString() + ')');
+ var module = module_decl(stdlib, null, memory);
+ assertValidAsm(module_decl);
+ assertEquals(7, module.caller());
+ assertEquals(7, memory_int32[2]);
+}
+
+TestInt32HeapAccessExternal();
+
+
+function TestHeapAccessIntTypes() {
+ var types = [
+ [Int8Array, 'Int8Array', '>> 0'],
+ [Uint8Array, 'Uint8Array', '>> 0'],
+ [Int16Array, 'Int16Array', '>> 1'],
+ [Uint16Array, 'Uint16Array', '>> 1'],
+ [Int32Array, 'Int32Array', '>> 2'],
+ [Uint32Array, 'Uint32Array', '>> 2'],
+ ];
+ for (var i = 0; i < types.length; i++) {
+ var code = TestInt32HeapAccess.toString();
+ code = code.replace('Int32Array', types[i][1]);
+ code = code.replace(/>> 2/g, types[i][2]);
+ var memory = new ArrayBuffer(kMinHeapSize);
+ var memory_view = new types[i][0](memory);
+ var module_decl = eval('(' + code + ')');
+ var module = module_decl(stdlib, null, memory);
+ assertValidAsm(module_decl);
+ assertEquals(7, module.caller());
+ assertEquals(7, memory_view[2]);
+ assertValidAsm(module_decl);
+ }
+}
+
+TestHeapAccessIntTypes();
+
+
+function TestFloatHeapAccess(stdlib, foreign, buffer) {
+ "use asm";
+
+ var f32 = new stdlib.Float32Array(buffer);
+ var f64 = new stdlib.Float64Array(buffer);
+ var fround = stdlib.Math.fround;
+ function caller() {
+ var i = 8;
+ var j = 8;
+ var v = 6.0;
+
+ f64[2] = v + 1.0;
+ f64[i >> 3] = +f64[2] + 1.0;
+ f64[j >> 3] = +f64[j >> 3] + 1.0;
+ i = +f64[i >> 3] == 9.0;
+ return i|0;
+ }
+
+ return {caller: caller};
+}
+
+assertWasm(1, TestFloatHeapAccess);
+
+
+function TestFloatHeapAccessExternal() {
+ var memory = new ArrayBuffer(kMinHeapSize);
+ var memory_float64 = new Float64Array(memory);
+ var module_decl = eval('(' + TestFloatHeapAccess.toString() + ')');
+ var module = module_decl(stdlib, null, memory);
+ assertValidAsm(module_decl);
+ assertEquals(1, module.caller());
+ assertEquals(9.0, memory_float64[1]);
+}
+
+TestFloatHeapAccessExternal();
+
+
+(function() {
+ function TestByteHeapAccessCompat(stdlib, foreign, buffer) {
+ "use asm";
+
+ var HEAP8 = new stdlib.Uint8Array(buffer);
+ var HEAP32 = new stdlib.Int32Array(buffer);
+
+ function store(i, v) {
+ i = i | 0;
+ v = v | 0;
+ HEAP32[i >> 2] = v;
+ }
+
+ function storeb(i, v) {
+ i = i | 0;
+ v = v | 0;
+ HEAP8[i | 0] = v;
+ }
+
+ function load(i) {
+ i = i | 0;
+ return HEAP8[i] | 0;
+ }
+
+ function iload(i) {
+ i = i | 0;
+ return HEAP8[HEAP32[i >> 2] | 0] | 0;
+ }
+
+ return {load: load, iload: iload, store: store, storeb: storeb};
+ }
+
+ var memory = new ArrayBuffer(kMinHeapSize);
+ var module_decl = eval('(' + TestByteHeapAccessCompat.toString() + ')');
+ var m = module_decl(stdlib, null, memory);
+ assertValidAsm(module_decl);
+ m.store(0, 20);
+ m.store(4, 21);
+ m.store(8, 22);
+ m.storeb(20, 123);
+ m.storeb(21, 42);
+ m.storeb(22, 77);
+ assertEquals(123, m.load(20));
+ assertEquals(42, m.load(21));
+ assertEquals(77, m.load(22));
+ assertEquals(123, m.iload(0));
+ assertEquals(42, m.iload(4));
+ assertEquals(77, m.iload(8));
+})();
+
+
+function TestIntishAssignment(stdlib, foreign, heap) {
+ "use asm";
+ var HEAP32 = new stdlib.Int32Array(heap);
+ function func() {
+ var a = 1;
+ var b = 2;
+ HEAP32[0] = a + b;
+ return HEAP32[0] | 0;
+ }
+ return {caller: func};
+}
+
+assertWasm(3, TestIntishAssignment);
+
+
+function TestFloatishAssignment(stdlib, foreign, heap) {
+ "use asm";
+ var HEAPF32 = new stdlib.Float32Array(heap);
+ var fround = stdlib.Math.fround;
+ function func() {
+ var a = fround(1.0);
+ var b = fround(2.0);
+ HEAPF32[0] = a + b;
+ return +HEAPF32[0];
+ }
+ return {caller: func};
+}
+
+assertWasm(3, TestFloatishAssignment);
+
+
+function TestDoubleToFloatAssignment(stdlib, foreign, heap) {
+ "use asm";
+ var HEAPF32 = new stdlib.Float32Array(heap);
+ var fround = stdlib.Math.fround;
+ function func() {
+ var a = 1.23;
+ HEAPF32[0] = a;
+ return +HEAPF32[0];
+ }
+ return {caller: func};
+}
+
+assertWasm(Math.fround(1.23), TestDoubleToFloatAssignment);
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm.js b/deps/v8/test/mjsunit/wasm/asm-wasm.js
index 71c6b10490..97219f113b 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm.js
@@ -5,6 +5,7 @@
// Flags: --validate-asm --allow-natives-syntax
var stdlib = this;
+let kMinHeapSize = 4096;
function assertValidAsm(func) {
assertTrue(%IsAsmWasmCode(func), "must be valid asm code");
@@ -13,7 +14,7 @@ function assertValidAsm(func) {
function assertWasm(expected, func, ffi) {
print("Testing " + func.name + "...");
assertEquals(
- expected, func(stdlib, ffi, new ArrayBuffer(1024)).caller());
+ expected, func(stdlib, ffi, new ArrayBuffer(kMinHeapSize)).caller());
assertValidAsm(func);
}
@@ -514,100 +515,6 @@ function TestMixedAdd() {
assertWasm(23, TestMixedAdd);
-function TestInt32HeapAccess(stdlib, foreign, buffer) {
- "use asm";
-
- var m = new stdlib.Int32Array(buffer);
- function caller() {
- var i = 4;
-
- m[0] = (i + 1) | 0;
- m[i >> 2] = ((m[0]|0) + 1) | 0;
- m[2] = ((m[i >> 2]|0) + 1) | 0;
- return m[2] | 0;
- }
-
- return {caller: caller};
-}
-
-assertWasm(7, TestInt32HeapAccess);
-
-
-function TestInt32HeapAccessExternal() {
- var memory = new ArrayBuffer(1024);
- var memory_int32 = new Int32Array(memory);
- var module_decl = eval('(' + TestInt32HeapAccess.toString() + ')');
- var module = module_decl(stdlib, null, memory);
- assertValidAsm(module_decl);
- assertEquals(7, module.caller());
- assertEquals(7, memory_int32[2]);
-}
-
-TestInt32HeapAccessExternal();
-
-
-function TestHeapAccessIntTypes() {
- var types = [
- [Int8Array, 'Int8Array', '>> 0'],
- [Uint8Array, 'Uint8Array', '>> 0'],
- [Int16Array, 'Int16Array', '>> 1'],
- [Uint16Array, 'Uint16Array', '>> 1'],
- [Int32Array, 'Int32Array', '>> 2'],
- [Uint32Array, 'Uint32Array', '>> 2'],
- ];
- for (var i = 0; i < types.length; i++) {
- var code = TestInt32HeapAccess.toString();
- code = code.replace('Int32Array', types[i][1]);
- code = code.replace(/>> 2/g, types[i][2]);
- var memory = new ArrayBuffer(1024);
- var memory_view = new types[i][0](memory);
- var module_decl = eval('(' + code + ')');
- var module = module_decl(stdlib, null, memory);
- assertValidAsm(module_decl);
- assertEquals(7, module.caller());
- assertEquals(7, memory_view[2]);
- assertValidAsm(module_decl);
- }
-}
-
-TestHeapAccessIntTypes();
-
-
-function TestFloatHeapAccess(stdlib, foreign, buffer) {
- "use asm";
-
- var f32 = new stdlib.Float32Array(buffer);
- var f64 = new stdlib.Float64Array(buffer);
- var fround = stdlib.Math.fround;
- function caller() {
- var i = 8;
- var j = 8;
- var v = 6.0;
-
- f64[2] = v + 1.0;
- f64[i >> 3] = +f64[2] + 1.0;
- f64[j >> 3] = +f64[j >> 3] + 1.0;
- i = +f64[i >> 3] == 9.0;
- return i|0;
- }
-
- return {caller: caller};
-}
-
-assertWasm(1, TestFloatHeapAccess);
-
-
-function TestFloatHeapAccessExternal() {
- var memory = new ArrayBuffer(1024);
- var memory_float64 = new Float64Array(memory);
- var module_decl = eval('(' + TestFloatHeapAccess.toString() + ')');
- var module = module_decl(stdlib, null, memory);
- assertValidAsm(module_decl);
- assertEquals(1, module.caller());
- assertEquals(9.0, memory_float64[1]);
-}
-
-TestFloatHeapAccessExternal();
function TestConvertI32() {
@@ -1031,219 +938,6 @@ assertEquals(31, module.caller(1, 0, 30, 11));
})();
-function TestForeignFunctions() {
- function AsmModule(stdlib, foreign, buffer) {
- "use asm";
-
- var setVal = foreign.setVal;
- var getVal = foreign.getVal;
-
- function caller(initial_value, new_value) {
- initial_value = initial_value|0;
- new_value = new_value|0;
- if ((getVal()|0) == (initial_value|0)) {
- setVal(new_value|0);
- return getVal()|0;
- }
- return 0;
- }
-
- return {caller:caller};
- }
-
- function ffi(initial_val) {
- var val = initial_val;
-
- function getVal() {
- return val;
- }
-
- function setVal(new_val) {
- val = new_val;
- }
-
- return {getVal:getVal, setVal:setVal};
- }
-
- var foreign = new ffi(23);
-
- var module = AsmModule({Math: Math}, foreign, null);
- assertValidAsm(AsmModule);
-
- assertEquals(103, module.caller(23, 103));
-}
-
-print("TestForeignFunctions...");
-TestForeignFunctions();
-
-
-function TestForeignFunctionMultipleUse() {
- function AsmModule(stdlib, foreign, buffer) {
- "use asm";
-
- var getVal = foreign.getVal;
-
- function caller(int_val, double_val) {
- int_val = int_val|0;
- double_val = +double_val;
- if ((getVal()|0) == (int_val|0)) {
- if ((+getVal()) == (+double_val)) {
- return 89;
- }
- }
- return 0;
- }
-
- return {caller:caller};
- }
-
- function ffi() {
- function getVal() {
- return 83.25;
- }
-
- return {getVal:getVal};
- }
-
- var foreign = new ffi();
-
- var module_decl = eval('(' + AsmModule.toString() + ')');
- var module = module_decl(stdlib, foreign, null);
- assertValidAsm(module_decl);
-
- assertEquals(89, module.caller(83, 83.25));
-}
-
-print("TestForeignFunctionMultipleUse...");
-TestForeignFunctionMultipleUse();
-
-function TestForeignVariables() {
- function AsmModule(stdlib, foreign, buffer) {
- "use asm";
-
- var i1 = foreign.foo | 0;
- var f1 = +foreign.bar;
- var i2 = foreign.baz | 0;
- var f2 = +foreign.baz;
-
- function geti1() {
- return i1|0;
- }
-
- function getf1() {
- return +f1;
- }
-
- function geti2() {
- return i2|0;
- }
-
- function getf2() {
- return +f2;
- }
-
- return {geti1:geti1, getf1:getf1, geti2:geti2, getf2:getf2};
- }
-
- function TestCase(env, i1, f1, i2, f2) {
- print("Testing foreign variables...");
- var module_decl = eval('(' + AsmModule.toString() + ')');
- var module = module_decl(stdlib, env);
- assertValidAsm(module_decl);
- assertEquals(i1, module.geti1());
- assertEquals(f1, module.getf1());
- assertEquals(i2, module.geti2());
- assertEquals(f2, module.getf2());
- }
-
- // Check normal operation.
- TestCase({foo: 123, bar: 234.5, baz: 345.7}, 123, 234.5, 345, 345.7);
- // Check partial operation.
- TestCase({baz: 345.7}, 0, NaN, 345, 345.7);
- // Check that undefined values are converted to proper defaults.
- TestCase({qux: 999}, 0, NaN, 0, NaN);
- // Check that true values are converted properly.
- TestCase({foo: true, bar: true, baz: true}, 1, 1.0, 1, 1.0);
- // Check that false values are converted properly.
- TestCase({foo: false, bar: false, baz: false}, 0, 0, 0, 0);
- // Check that null values are converted properly.
- TestCase({foo: null, bar: null, baz: null}, 0, 0, 0, 0);
- // Check that string values are converted properly.
- TestCase({foo: 'hi', bar: 'there', baz: 'dude'}, 0, NaN, 0, NaN);
- TestCase({foo: '0xff', bar: '234', baz: '456.1'}, 255, 234, 456, 456.1);
- // Check that function values are converted properly.
- TestCase({foo: TestCase, bar: TestCase, qux: TestCase}, 0, NaN, 0, NaN);
-}
-
-print("TestForeignVariables...");
-TestForeignVariables();
-
-
-(function() {
- function TestByteHeapAccessCompat(stdlib, foreign, buffer) {
- "use asm";
-
- var HEAP8 = new stdlib.Uint8Array(buffer);
- var HEAP32 = new stdlib.Int32Array(buffer);
-
- function store(i, v) {
- i = i | 0;
- v = v | 0;
- HEAP32[i >> 2] = v;
- }
-
- function storeb(i, v) {
- i = i | 0;
- v = v | 0;
- HEAP8[i | 0] = v;
- }
-
- function load(i) {
- i = i | 0;
- return HEAP8[i] | 0;
- }
-
- function iload(i) {
- i = i | 0;
- return HEAP8[HEAP32[i >> 2] | 0] | 0;
- }
-
- return {load: load, iload: iload, store: store, storeb: storeb};
- }
-
- var memory = new ArrayBuffer(1024);
- var module_decl = eval('(' + TestByteHeapAccessCompat.toString() + ')');
- var m = module_decl(stdlib, null, memory);
- assertValidAsm(module_decl);
- m.store(0, 20);
- m.store(4, 21);
- m.store(8, 22);
- m.storeb(20, 123);
- m.storeb(21, 42);
- m.storeb(22, 77);
- assertEquals(123, m.load(20));
- assertEquals(42, m.load(21));
- assertEquals(77, m.load(22));
- assertEquals(123, m.iload(0));
- assertEquals(42, m.iload(4));
- assertEquals(77, m.iload(8));
-})();
-
-
-function TestGlobalBlock(stdlib, foreign, buffer) {
- "use asm";
-
- var x = foreign.x | 0, y = foreign.y | 0;
-
- function test() {
- return (x + y) | 0;
- }
-
- return {caller: test};
-}
-
-assertWasm(15, TestGlobalBlock, { x: 4, y: 11 });
-
(function TestComma() {
function CommaModule() {
"use asm";
@@ -1322,52 +1016,6 @@ function TestXor() {
assertWasm(1, TestXor);
-function TestIntishAssignment(stdlib, foreign, heap) {
- "use asm";
- var HEAP32 = new stdlib.Int32Array(heap);
- function func() {
- var a = 1;
- var b = 2;
- HEAP32[0] = a + b;
- return HEAP32[0] | 0;
- }
- return {caller: func};
-}
-
-assertWasm(3, TestIntishAssignment);
-
-
-function TestFloatishAssignment(stdlib, foreign, heap) {
- "use asm";
- var HEAPF32 = new stdlib.Float32Array(heap);
- var fround = stdlib.Math.fround;
- function func() {
- var a = fround(1.0);
- var b = fround(2.0);
- HEAPF32[0] = a + b;
- return +HEAPF32[0];
- }
- return {caller: func};
-}
-
-assertWasm(3, TestFloatishAssignment);
-
-
-function TestDoubleToFloatAssignment(stdlib, foreign, heap) {
- "use asm";
- var HEAPF32 = new stdlib.Float32Array(heap);
- var fround = stdlib.Math.fround;
- function func() {
- var a = 1.23;
- HEAPF32[0] = a;
- return +HEAPF32[0];
- }
- return {caller: func};
-}
-
-assertWasm(Math.fround(1.23), TestDoubleToFloatAssignment);
-
-
function TestIntegerMultiplyBothWays(stdlib, foreign, heap) {
"use asm";
function func() {
diff --git a/deps/v8/test/mjsunit/wasm/atomics.js b/deps/v8/test/mjsunit/wasm/atomics.js
index 63d8eb0ca8..58d3d950d5 100644
--- a/deps/v8/test/mjsunit/wasm/atomics.js
+++ b/deps/v8/test/mjsunit/wasm/atomics.js
@@ -145,109 +145,109 @@ function Test8Op(operation, func) {
}
(function TestAtomicAdd() {
- print("TestAtomicAdd");
+ print(arguments.callee.name);
let wasmAdd = GetAtomicBinOpFunction(kExprI32AtomicAdd, 2, 0);
Test32Op(Add, wasmAdd);
})();
(function TestAtomicAdd16U() {
- print("TestAtomicAdd16U");
+ print(arguments.callee.name);
let wasmAdd = GetAtomicBinOpFunction(kExprI32AtomicAdd16U, 1, 0);
Test16Op(Add, wasmAdd);
})();
(function TestAtomicAdd8U() {
- print("TestAtomicAdd8U");
+ print(arguments.callee.name);
let wasmAdd = GetAtomicBinOpFunction(kExprI32AtomicAdd8U, 0, 0);
Test8Op(Add, wasmAdd);
})();
(function TestAtomicSub() {
- print("TestAtomicSub");
+ print(arguments.callee.name);
let wasmSub = GetAtomicBinOpFunction(kExprI32AtomicSub, 2, 0);
Test32Op(Sub, wasmSub);
})();
(function TestAtomicSub16U() {
- print("TestAtomicSub16U");
+ print(arguments.callee.name);
let wasmSub = GetAtomicBinOpFunction(kExprI32AtomicSub16U, 1, 0);
Test16Op(Sub, wasmSub);
})();
(function TestAtomicSub8U() {
- print("TestAtomicSub8U");
+ print(arguments.callee.name);
let wasmSub = GetAtomicBinOpFunction(kExprI32AtomicSub8U, 0, 0);
Test8Op(Sub, wasmSub);
})();
(function TestAtomicAnd() {
- print("TestAtomicAnd");
+ print(arguments.callee.name);
let wasmAnd = GetAtomicBinOpFunction(kExprI32AtomicAnd, 2, 0);
Test32Op(And, wasmAnd);
})();
(function TestAtomicAnd16U() {
- print("TestAtomicAnd16U");
+ print(arguments.callee.name);
let wasmAnd = GetAtomicBinOpFunction(kExprI32AtomicAnd16U, 1, 0);
Test16Op(And, wasmAnd);
})();
(function TestAtomicAnd8U() {
- print("TestAtomicAnd8U");
+ print(arguments.callee.name);
let wasmAnd = GetAtomicBinOpFunction(kExprI32AtomicAnd8U, 0, 0);
Test8Op(And, wasmAnd);
})();
(function TestAtomicOr() {
- print("TestAtomicOr");
+ print(arguments.callee.name);
let wasmOr = GetAtomicBinOpFunction(kExprI32AtomicOr, 2, 0);
Test32Op(Or, wasmOr);
})();
(function TestAtomicOr16U() {
- print("TestAtomicOr16U");
+ print(arguments.callee.name);
let wasmOr = GetAtomicBinOpFunction(kExprI32AtomicOr16U, 1, 0);
Test16Op(Or, wasmOr);
})();
(function TestAtomicOr8U() {
- print("TestAtomicOr8U");
+ print(arguments.callee.name);
let wasmOr = GetAtomicBinOpFunction(kExprI32AtomicOr8U, 0, 0);
Test8Op(Or, wasmOr);
})();
(function TestAtomicXor() {
- print("TestAtomicXor");
+ print(arguments.callee.name);
let wasmXor = GetAtomicBinOpFunction(kExprI32AtomicXor, 2, 0);
Test32Op(Xor, wasmXor);
})();
(function TestAtomicXor16U() {
- print("TestAtomicXor16U");
+ print(arguments.callee.name);
let wasmXor = GetAtomicBinOpFunction(kExprI32AtomicXor16U, 1, 0);
Test16Op(Xor, wasmXor);
})();
(function TestAtomicXor8U() {
- print("TestAtomicXor8U");
+ print(arguments.callee.name);
let wasmXor = GetAtomicBinOpFunction(kExprI32AtomicXor8U, 0, 0);
Test8Op(Xor, wasmXor);
})();
(function TestAtomicExchange() {
- print("TestAtomicExchange");
+ print(arguments.callee.name);
let wasmExchange = GetAtomicBinOpFunction(kExprI32AtomicExchange, 2, 0);
Test32Op(Exchange, wasmExchange);
})();
(function TestAtomicExchange16U() {
- print("TestAtomicExchange16U");
+ print(arguments.callee.name);
let wasmExchange = GetAtomicBinOpFunction(kExprI32AtomicExchange16U, 1, 0);
Test16Op(Exchange, wasmExchange);
})();
(function TestAtomicExchange8U() {
- print("TestAtomicExchange8U");
+ print(arguments.callee.name);
let wasmExchange = GetAtomicBinOpFunction(kExprI32AtomicExchange8U, 0, 0);
Test8Op(Exchange, wasmExchange);
})();
@@ -268,7 +268,7 @@ function TestCmpExchange(func, buffer, params, size) {
}
(function TestAtomicCompareExchange() {
- print("TestAtomicCompareExchange");
+ print(arguments.callee.name);
let wasmCmpExchange =
GetAtomicCmpExchangeFunction(kExprI32AtomicCompareExchange, 2, 0);
let i32 = new Uint32Array(memory.buffer);
@@ -277,7 +277,7 @@ function TestCmpExchange(func, buffer, params, size) {
})();
(function TestAtomicCompareExchange16U() {
- print("TestAtomicCompareExchange16U");
+ print(arguments.callee.name);
let wasmCmpExchange =
GetAtomicCmpExchangeFunction(kExprI32AtomicCompareExchange16U, 1, 0);
let i16 = new Uint16Array(memory.buffer);
@@ -286,7 +286,7 @@ function TestCmpExchange(func, buffer, params, size) {
})();
(function TestAtomicCompareExchange8U() {
- print("TestAtomicCompareExchange8U");
+ print(arguments.callee.name);
let wasmCmpExchange =
GetAtomicCmpExchangeFunction(kExprI32AtomicCompareExchange8U, 0, 0);
let i8 = new Uint8Array(memory.buffer);
@@ -303,7 +303,7 @@ function TestLoad(func, buffer, value, size) {
}
(function TestAtomicLoad() {
- print("TestAtomicLoad");
+ print(arguments.callee.name);
let wasmLoad = GetAtomicLoadFunction(kExprI32AtomicLoad, 2, 0);
let i32 = new Uint32Array(memory.buffer);
let value = 0xacedaced;
@@ -311,7 +311,7 @@ function TestLoad(func, buffer, value, size) {
})();
(function TestAtomicLoad16U() {
- print("TestAtomicLoad16U");
+ print(arguments.callee.name);
let wasmLoad = GetAtomicLoadFunction(kExprI32AtomicLoad16U, 1, 0);
let i16 = new Uint16Array(memory.buffer);
let value = 0xaced;
@@ -319,7 +319,7 @@ function TestLoad(func, buffer, value, size) {
})();
(function TestAtomicLoad8U() {
- print("TestAtomicLoad8U");
+ print(arguments.callee.name);
let wasmLoad = GetAtomicLoadFunction(kExprI32AtomicLoad8U, 0, 0);
let i8 = new Uint8Array(memory.buffer);
let value = 0xac;
@@ -335,7 +335,7 @@ function TestStore(func, buffer, value, size) {
}
(function TestAtomicStore() {
- print("TestAtomicStore");
+ print(arguments.callee.name);
let wasmStore = GetAtomicStoreFunction(kExprI32AtomicStore, 2, 0);
let i32 = new Uint32Array(memory.buffer);
let value = 0xacedaced;
@@ -343,7 +343,7 @@ function TestStore(func, buffer, value, size) {
})();
(function TestAtomicStore16U() {
- print("TestAtomicStore16U");
+ print(arguments.callee.name);
let wasmStore = GetAtomicStoreFunction(kExprI32AtomicStore16U, 1, 0);
let i16 = new Uint16Array(memory.buffer);
let value = 0xaced;
@@ -351,7 +351,7 @@ function TestStore(func, buffer, value, size) {
})();
(function TestAtomicStore8U() {
- print("TestAtomicStore8U");
+ print(arguments.callee.name);
let wasmStore = GetAtomicStoreFunction(kExprI32AtomicStore8U, 0, 0);
let i8 = new Uint8Array(memory.buffer);
let value = 0xac;
@@ -359,7 +359,7 @@ function TestStore(func, buffer, value, size) {
})();
(function TestAtomicLoadStoreOffset() {
- print("TestAtomicLoadStoreOffset");
+ print(arguments.callee.name);
var builder = new WasmModuleBuilder();
let memory = new WebAssembly.Memory({
initial: 16, maximum: 128, shared: true});
@@ -390,7 +390,7 @@ function TestStore(func, buffer, value, size) {
})();
(function TestAtomicOpinLoop() {
- print("TestAtomicOpinLoop");
+ print(arguments.callee.name);
var builder = new WasmModuleBuilder();
let memory = new WebAssembly.Memory({
initial: 16, maximum: 128, shared: true});
@@ -415,3 +415,20 @@ function TestStore(func, buffer, value, size) {
{m: {imported_mem: memory}}));
assertEquals(20, instance.exports.main());
})();
+
+(function TestUnalignedAtomicAccesses() {
+ print(arguments.callee.name);
+ let wasmAdd = GetAtomicBinOpFunction(kExprI32AtomicAdd, 2, 17);
+ assertTraps(kTrapUnalignedAccess, () => wasmAdd(4, 1001));
+ let wasmLoad = GetAtomicLoadFunction(kExprI32AtomicLoad16U, 1, 0);
+ assertTraps(kTrapUnalignedAccess, () => wasmLoad(15));
+ let wasmStore = GetAtomicStoreFunction(kExprI32AtomicStore, 2, 0);
+ assertTraps(kTrapUnalignedAccess, () => wasmStore(22, 5));
+ let wasmCmpExchange =
+ GetAtomicCmpExchangeFunction(kExprI32AtomicCompareExchange, 2, 0x16);
+ assertTraps(kTrapUnalignedAccess, () => wasmCmpExchange(11, 6, 5));
+
+ // Building functions with bad alignment should fail to compile
+ assertThrows(() => GetAtomicBinOpFunction(kExprI32AtomicSub16U, 3, 0),
+ WebAssembly.CompileError);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/bounds-check-64bit.js b/deps/v8/test/mjsunit/wasm/bounds-check-64bit.js
index 8643093db0..9308393da4 100644
--- a/deps/v8/test/mjsunit/wasm/bounds-check-64bit.js
+++ b/deps/v8/test/mjsunit/wasm/bounds-check-64bit.js
@@ -21,13 +21,14 @@ builder.addFunction('load', kSig_i_ii)
const module = builder.instantiate();
let start = 12;
let address = start;
-for (i = 1; i < 64; i++) {
+for (i = 0; i < 64; i++) {
// This is the address which will be accessed in the code. We cannot use
// shifts to calculate the address because JS shifts work on 32-bit integers.
- address = (address * 2) % 4294967296;
+ print(`address=${address}`);
if (address < kPageSize) {
assertEquals(0, module.exports.load(start, i));
} else {
assertTraps(kTrapMemOutOfBounds, _ => { module.exports.load(start, i);});
}
+ address = (address * 2) % 4294967296;
}
diff --git a/deps/v8/test/mjsunit/wasm/empirical_max_memory.js b/deps/v8/test/mjsunit/wasm/empirical_max_memory.js
new file mode 100644
index 0000000000..262dfe10ef
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/empirical_max_memory.js
@@ -0,0 +1,85 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let k1MiB = 1 * 1024 * 1024;
+let k1GiB = 1 * 1024 * 1024 * 1024;
+let k4GiB = 4 * k1GiB;
+let kMaxMemory = 2 * k1GiB - kPageSize; // TODO(titzer): raise this to 4GiB
+
+(function Test() {
+ var memory;
+
+ function BuildAccessors(type, load_opcode, store_opcode) {
+ builder = new WasmModuleBuilder();
+ builder.addImportedMemory("i", "mem");
+ builder.addFunction("load", makeSig([kWasmI32], [type]))
+ .addBody([ // --
+ kExprGetLocal, 0, // --
+ load_opcode, 0, 0, // --
+ ]) // --
+ .exportFunc();
+ builder.addFunction("store", makeSig([kWasmI32, type], []))
+ .addBody([ // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ store_opcode, 0, 0, // --
+ ]) // --
+ .exportFunc();
+ let i = builder.instantiate({i: {mem: memory}});
+ return {load: i.exports.load, store: i.exports.store};
+ }
+
+ function probe(a, f) {
+ print("------------------------");
+ let stride = kPageSize;
+ let max = kMaxMemory;
+ for (let i = 0; i < max; i += stride) {
+ a.store(i, f(i));
+ }
+ for (let i = 0; i < max; i += stride) {
+ // print(`${i} = ${f(i)}`);
+ assertEquals(f(i), a.load(i));
+ }
+ }
+
+ try {
+ let kPages = kMaxMemory / kPageSize;
+ memory = new WebAssembly.Memory({initial: kPages, maximum: kPages});
+ } catch (e) {
+ print("OOM: sorry, best effort max memory size test.");
+ return;
+ }
+
+ assertEquals(kMaxMemory, memory.buffer.byteLength);
+
+ {
+ let a = BuildAccessors(kWasmI32, kExprI32LoadMem, kExprI32StoreMem);
+ probe(a, i => (0xaabbccee ^ ((i >> 11) * 0x110005)) | 0);
+ }
+
+ {
+ let a = BuildAccessors(kWasmI32, kExprI32LoadMem16U, kExprI32StoreMem16);
+ probe(a, i => (0xccee ^ ((i >> 11) * 0x110005)) & 0xFFFF);
+ }
+
+ {
+ let a = BuildAccessors(kWasmI32, kExprI32LoadMem8U, kExprI32StoreMem8);
+ probe(a, i => (0xee ^ ((i >> 11) * 0x05)) & 0xFF);
+ }
+
+ {
+ let a = BuildAccessors(kWasmF64, kExprF64LoadMem, kExprF64StoreMem);
+ probe(a, i => 0xaabbccee ^ ((i >> 11) * 0x110005));
+ }
+
+ {
+ let a = BuildAccessors(kWasmF32, kExprF32LoadMem, kExprF32StoreMem);
+ probe(a, i => Math.fround(0xaabbccee ^ ((i >> 11) * 0x110005)));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/ffi-error.js b/deps/v8/test/mjsunit/wasm/ffi-error.js
index e7811d2b78..2b4cb9a4eb 100644
--- a/deps/v8/test/mjsunit/wasm/ffi-error.js
+++ b/deps/v8/test/mjsunit/wasm/ffi-error.js
@@ -4,180 +4,222 @@
// Flags: --expose-wasm
-load("test/mjsunit/wasm/wasm-constants.js");
-load("test/mjsunit/wasm/wasm-module-builder.js");
-
-function instantiateWithFFI(ffi) {
- var builder = new WasmModuleBuilder();
-
- var sig_index = kSig_i_dd;
- builder.addImport("mod", "fun", sig_index);
- builder.addFunction("main", sig_index)
- .addBody([
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- kExprCallFunction, 0, // --
- ]) // --
- .exportFunc();
-
- return builder.instantiate(ffi);
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+function CreateDefaultBuilder() {
+ const builder = new WasmModuleBuilder();
+
+ const sig_index = kSig_i_dd;
+ builder.addImport('mod', 'fun', sig_index);
+ builder.addFunction('main', sig_index)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallFunction, 0, // --
+ ]) // --
+ .exportFunc();
+ return builder;
}
-// everything is good.
-(function() {
- var ffi = {"mod": {fun: function(a, b) { print(a, b); }}}
- instantiateWithFFI(ffi);
-})();
-
-
-// FFI object should be an object.
-assertThrows(function() {
- var ffi = 0;
- instantiateWithFFI(ffi);
-});
+function checkSuccessfulInstantiation(builder, ffi, handler) {
+ // Test synchronous instantiation.
+ const instance = builder.instantiate(ffi);
+ if (handler) handler(instance);
+ // Test asynchronous instantiation.
+ assertPromiseResult(builder.asyncInstantiate(ffi), handler);
+}
-// FFI object should have a "mod" property.
-assertThrows(function() {
- instantiateWithFFI({});
-});
+function checkFailingInstantiation(builder, ffi, error, message) {
+ // Test synchronous instantiation.
+ assertThrows(_ => builder.instantiate(ffi), error, message);
+ // Test asynchronous instantiation.
+ assertPromiseResult(builder.asyncInstantiate(ffi), assertUnreachable, e => {
+ assertInstanceof(e, error);
+ assertEquals(message, e.message);
+ });
+}
-// FFI object should have a "fun" property.
-assertThrows(function() {
- instantiateWithFFI({mod: {}});
-});
+(function testValidFFI() {
+ print(arguments.callee.name);
+ let ffi = {'mod': {fun: print}};
+ checkSuccessfulInstantiation(CreateDefaultBuilder(), ffi, undefined);
+})();
+(function testInvalidFFIs() {
+ print(arguments.callee.name);
+ checkFailingInstantiation(
+ CreateDefaultBuilder(), 17, TypeError,
+ 'WebAssembly Instantiation: Argument 1 must be an object');
+ checkFailingInstantiation(
+ CreateDefaultBuilder(), {}, TypeError,
+ 'WebAssembly Instantiation: Import #0 module="mod" error: module is not an object or function');
+ checkFailingInstantiation(
+ CreateDefaultBuilder(), {mod: {}}, WebAssembly.LinkError,
+ 'WebAssembly Instantiation: Import #0 module="mod" function="fun" error: function import requires a callable');
+ checkFailingInstantiation(
+ CreateDefaultBuilder(), {mod: {fun: {}}}, WebAssembly.LinkError,
+ 'WebAssembly Instantiation: Import #0 module="mod" function="fun" error: function import requires a callable');
+ checkFailingInstantiation(
+ CreateDefaultBuilder(), {mod: {fun: 0}}, WebAssembly.LinkError,
+ 'WebAssembly Instantiation: Import #0 module="mod" function="fun" error: function import requires a callable');
+})();
-// "fun" should be a JS function.
-assertThrows(function() {
- instantiateWithFFI({mod: {fun: new Object()}});
-});
+(function testImportWithInvalidSignature() {
+ print(arguments.callee.name);
+ // "fun" should have signature "i_dd"
+ let builder = new WasmModuleBuilder();
+
+ let sig_index = kSig_i_dd;
+ builder.addFunction('exp', kSig_i_i)
+ .addBody([
+ kExprGetLocal,
+ 0,
+ ]) // --
+ .exportFunc();
+ let exported = builder.instantiate().exports.exp;
+ checkFailingInstantiation(
+ CreateDefaultBuilder(), {mod: {fun: exported}}, WebAssembly.LinkError,
+ 'WebAssembly Instantiation: Import #0 module="mod" function="fun" error: imported function does not match the expected type');
+})();
-// "fun" should be a JS function.
-assertThrows(function() {
- instantiateWithFFI({mod: {fun: 0}});
-});
+(function regression870646() {
+ print(arguments.callee.name);
+ const ffi = {mod: {fun: function() {}}};
+ Object.defineProperty(ffi, 'mod', {
+ get: function() {
+ throw new Error('my_exception');
+ }
+ });
-// "fun" should have signature "i_dd"
-assertThrows(function () {
- var builder = new WasmModuleBuilder();
+ checkFailingInstantiation(CreateDefaultBuilder(), ffi, Error, 'my_exception');
+})();
- var sig_index = kSig_i_dd;
- builder.addFunction("exp", kSig_i_i)
- .addBody([
- kExprGetLocal, 0,
- ]) // --
- .exportFunc();
+// "fun" matches signature "i_dd"
+(function testImportWithValidSignature() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+
+ builder.addFunction('exp', kSig_i_dd)
+ .addBody([
+ kExprI32Const,
+ 33,
+ ]) // --
+ .exportFunc();
- var exported = builder.instantiate().exports.exp;
- instantiateWithFFI({mod: {fun: exported}});
-});
+ let exported = builder.instantiate().exports.exp;
-// "fun" matches signature "i_dd"
-(function () {
- var builder = new WasmModuleBuilder();
-
- builder.addFunction("exp", kSig_i_dd)
- .addBody([
- kExprI32Const, 33,
- ]) // --
- .exportFunc();
-
- var exported = builder.instantiate().exports.exp;
- var instance = instantiateWithFFI({mod: {fun: exported}});
- assertEquals(33, instance.exports.main());
+ checkSuccessfulInstantiation(
+ CreateDefaultBuilder(), {mod: {fun: exported}},
+ instance => assertEquals(33, instance.exports.main()));
})();
(function I64InSignatureThrows() {
- var builder = new WasmModuleBuilder();
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
builder.addMemory(1, 1, true);
- builder.addFunction("function_with_invalid_signature", kSig_l_ll)
+ builder.addFunction('function_with_invalid_signature', kSig_l_ll)
.addBody([ // --
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
kExprI64Sub]) // --
.exportFunc()
- var module = builder.instantiate();
-
- assertThrows(function() {
- module.exports.function_with_invalid_signature(33, 88);
- }, TypeError);
+ checkSuccessfulInstantiation(
+ builder, undefined,
+ instance => assertThrows(function() {
+ instance.exports.function_with_invalid_signature(33, 88);
+ }, TypeError, 'wasm function signature contains illegal type'));
})();
(function I64ParamsInSignatureThrows() {
- var builder = new WasmModuleBuilder();
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
builder.addMemory(1, 1, true);
- builder.addFunction("function_with_invalid_signature", kSig_i_l)
- .addBody([
- kExprGetLocal, 0,
- kExprI32ConvertI64
- ])
- .exportFunc()
+ builder.addFunction('function_with_invalid_signature', kSig_i_l)
+ .addBody([kExprGetLocal, 0, kExprI32ConvertI64])
+ .exportFunc();
- var module = builder.instantiate();
+ checkSuccessfulInstantiation(
+ builder, undefined,
+ instance => assertThrows(
+ _ => instance.exports.function_with_invalid_signature(12), TypeError,
+ 'wasm function signature contains illegal type'));
- assertThrows(function() {
- module.exports.function_with_invalid_signature(33);
- }, TypeError);
})();
(function I64JSImportThrows() {
- var builder = new WasmModuleBuilder();
- var sig_index = builder.addType(kSig_i_i);
- var sig_i64_index = builder.addType(kSig_i_l);
- var index = builder.addImport("", "func", sig_i64_index);
- builder.addFunction("main", sig_index)
- .addBody([
- kExprGetLocal, 0,
- kExprI64SConvertI32,
- kExprCallFunction, index // --
- ]) // --
- .exportFunc();
- var func = function() {return {};};
- var main = builder.instantiate({"": {func: func}}).exports.main;
- assertThrows(function() {
- main(13);
- }, TypeError);
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let sig_index = builder.addType(kSig_i_i);
+ let sig_i64_index = builder.addType(kSig_i_l);
+ let index = builder.addImport('', 'func', sig_i64_index);
+ builder.addFunction('main', sig_index)
+ .addBody([
+ kExprGetLocal, 0, kExprI64SConvertI32, kExprCallFunction, index // --
+ ]) // --
+ .exportFunc();
+
+ checkSuccessfulInstantiation(
+ builder, {'': {func: _ => {}}},
+ instance => assertThrows(
+ instance.exports.main, TypeError,
+ 'wasm function signature contains illegal type'));
+
})();
(function ImportI64ParamWithF64ReturnThrows() {
+ print(arguments.callee.name);
// This tests that we generate correct code by using the correct return
// register. See bug 6096.
- var builder = new WasmModuleBuilder();
+ let builder = new WasmModuleBuilder();
builder.addImport('', 'f', makeSig([kWasmI64], [kWasmF64]));
builder.addFunction('main', kSig_v_v)
.addBody([kExprI64Const, 0, kExprCallFunction, 0, kExprDrop])
.exportFunc();
- var instance = builder.instantiate({'': {f: i => i}});
- assertThrows(() => instance.exports.main(), TypeError);
+ checkSuccessfulInstantiation(
+ builder, {'': {f: i => i}},
+ instance => assertThrows(
+ instance.exports.main, TypeError,
+ 'wasm function signature contains illegal type'));
+
})();
(function ImportI64Return() {
+ print(arguments.callee.name);
// This tests that we generate correct code by using the correct return
// register(s). See bug 6104.
- var builder = new WasmModuleBuilder();
+ let builder = new WasmModuleBuilder();
builder.addImport('', 'f', makeSig([], [kWasmI64]));
builder.addFunction('main', kSig_v_v)
.addBody([kExprCallFunction, 0, kExprDrop])
.exportFunc();
- var instance = builder.instantiate({'': {f: () => 1}});
- assertThrows(() => instance.exports.main(), TypeError);
+ checkSuccessfulInstantiation(
+ builder, {'': {f: _ => 1}},
+ instance => assertThrows(
+ instance.exports.main, TypeError,
+ 'wasm function signature contains illegal type'));
+
})();
(function ImportSymbolToNumberThrows() {
- var builder = new WasmModuleBuilder();
- var index = builder.addImport("", "func", kSig_i_v);
- builder.addFunction("main", kSig_i_v)
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let index = builder.addImport('', 'f', kSig_i_v);
+ builder.addFunction('main', kSig_i_v)
.addBody([kExprCallFunction, 0])
.exportFunc();
- var func = () => Symbol();
- var main = builder.instantiate({"": {func: func}}).exports.main;
- assertThrows(() => main(), TypeError);
+
+ checkSuccessfulInstantiation(
+ builder, {'': {f: _ => Symbol()}},
+ instance => assertThrows(
+ instance.exports.main, TypeError,
+ 'Cannot convert a Symbol value to a number'));
})();
diff --git a/deps/v8/test/mjsunit/wasm/function-names.js b/deps/v8/test/mjsunit/wasm/function-names.js
index 9320c50789..fe7c401177 100644
--- a/deps/v8/test/mjsunit/wasm/function-names.js
+++ b/deps/v8/test/mjsunit/wasm/function-names.js
@@ -40,7 +40,7 @@ var module = builder.instantiate();
for (var i = 0; i < names.length; ++i) {
var line = lines[i].trim();
if (names[i] === null) continue;
- var printed_name = names[i] === undefined ? "<WASM UNNAMED>" : names[i]
+ var printed_name = names[i];
var expected_start = "at " + printed_name + " (";
assertTrue(line.startsWith(expected_start),
"should start with '" + expected_start + "': '" + line + "'");
diff --git a/deps/v8/test/mjsunit/wasm/graceful_shutdown_during_tierup.js b/deps/v8/test/mjsunit/wasm/graceful_shutdown_during_tierup.js
new file mode 100644
index 0000000000..c40bf2f11d
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/graceful_shutdown_during_tierup.js
@@ -0,0 +1,28 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-wait-for-wasm --wasm-tier-up
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function ShutdownDuringTierUp() {
+ // Create a big module.
+ var builder = new WasmModuleBuilder();
+
+ builder.addMemory(1, 1, true);
+ for (i = 0; i < 100; i++) {
+ builder.addFunction("sub" + i, kSig_i_i)
+ .addBody([ // --
+ kExprGetLocal, 0, // --
+ kExprI32Const, i % 61, // --
+ kExprI32Sub]) // --
+ .exportFunc()
+ }
+
+ var buffer = builder.toBuffer();
+ // Wait for compilation to finish, but then shutdown while tier-up is still
+ // running.
+ assertPromiseResult(WebAssembly.compile(buffer));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/interpreter-mixed.js b/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
index 08cfc8c96d..6225220c8d 100644
--- a/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
+++ b/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
@@ -193,3 +193,27 @@ function redirectToInterpreter(
}
}
})();
+
+(function testInterpreterPreservedOnTierUp() {
+ print(arguments.callee.name);
+ var builder = new WasmModuleBuilder();
+ var fun_body = [kExprI32Const, 23];
+ var fun = builder.addFunction('fun', kSig_i_v).addBody(fun_body).exportFunc();
+ var instance = builder.instantiate();
+ var exp = instance.exports;
+
+ // Initially the interpreter is not being called.
+ var initial_interpreted = %WasmNumInterpretedCalls(instance);
+ assertEquals(23, exp.fun());
+ assertEquals(initial_interpreted + 0, %WasmNumInterpretedCalls(instance));
+
+ // Redirection will cause the interpreter to be called.
+ %RedirectToWasmInterpreter(instance, fun.index);
+ assertEquals(23, exp.fun());
+ assertEquals(initial_interpreted + 1, %WasmNumInterpretedCalls(instance));
+
+ // Requesting a tier-up still ensure the interpreter is being called.
+ %WasmTierUpFunction(instance, fun.index);
+ assertEquals(23, exp.fun());
+ assertEquals(initial_interpreted + 2, %WasmNumInterpretedCalls(instance));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/interpreter.js b/deps/v8/test/mjsunit/wasm/interpreter.js
index 0bf3f8610b..b887b40918 100644
--- a/deps/v8/test/mjsunit/wasm/interpreter.js
+++ b/deps/v8/test/mjsunit/wasm/interpreter.js
@@ -310,7 +310,7 @@ function checkStack(stack, expected_lines) {
if (!(e instanceof TypeError)) throw e;
checkStack(stripPath(e.stack), [
'TypeError: ' + kTrapMsgs[kTrapTypeError], // -
- ' at indirect (wasm-function[2]:1)', // -
+ ' at indirect (wasm-function[2]:3)', // -
' at main (wasm-function[3]:3)', // -
/^ at testIllegalImports \(interpreter.js:\d+:22\)$/, // -
/^ at interpreter.js:\d+:3$/
diff --git a/deps/v8/test/mjsunit/wasm/memory_1gb_oob.js b/deps/v8/test/mjsunit/wasm/memory_1gb_oob.js
new file mode 100644
index 0000000000..f9593e84f7
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/memory_1gb_oob.js
@@ -0,0 +1,99 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-max-mem-pages=16384
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const k1MiB = 1 * 1024 * 1024;
+const k1GiB = 1 * 1024 * 1024 * 1024;
+const k2GiB = 2 * k1GiB;
+const k3GiB = 3 * k1GiB;
+const k4GiB = 4 * k1GiB;
+const kMaxMemory = k1GiB;
+
+// Indexes (and offsets) used to systematically probe the memory.
+const indexes = (() => {
+ const a = k1GiB, b = k2GiB, c = k3GiB, d = k4GiB;
+ return [
+ 0, 1, 2, 3, 4, 5, 7, 8, 9, // near 0
+ a-8, a-4, a+0, a+1, a+2, a+3, a+4, a+5, a+7, a+8, a+9, // near 1GiB
+ b-8, b-4, b+0, b+1, b+2, b+3, b+4, b+5, b+7, b+8, b+9, // near 2GiB
+ c-8, c-4, c+0, c+1, c+2, c+3, c+4, c+5, c+7, c+8, c+9, // near 3GiB
+ d-9, d-8, d-7, d-5, d-4, d-3, d-2, d-1 // near 4GiB
+];
+})();
+
+(function Test() {
+ var memory;
+
+ function BuildAccessors(type, load_opcode, store_opcode, offset) {
+ builder = new WasmModuleBuilder();
+ builder.addImportedMemory("i", "mem");
+ const h = 0x80;
+ const m = 0x7f;
+ let offset_bytes = [h|((offset >>> 0) & m), // LEB encoding of offset
+ h|((offset >>> 7) & m),
+ h|((offset >>> 14) & m),
+ h|((offset >>> 21) & m),
+ 0|((offset >>> 28) & m)];
+ builder.addFunction("load", makeSig([kWasmI32], [type]))
+ .addBody([ // --
+ kExprGetLocal, 0, // --
+ load_opcode, 0, ...offset_bytes, // --
+ ]) // --
+ .exportFunc();
+ builder.addFunction("store", makeSig([kWasmI32, type], []))
+ .addBody([ // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ store_opcode, 0, ...offset_bytes, // --
+ ]) // --
+ .exportFunc();
+ let i = builder.instantiate({i: {mem: memory}});
+ return {offset: offset, load: i.exports.load, store: i.exports.store};
+ }
+
+ function probe(a, size, offset, f) {
+ print(`size=${size} offset=${offset}`);
+ for (let i of indexes) {
+ let oob = (i + size + offset) > kMaxMemory;
+ if (oob) {
+// print(` ${i} + ${offset} OOB`);
+ assertThrows(() => a.store(i, f(i)));
+ assertThrows(() => a.load(i));
+ } else {
+// print(` ${i} = ${f(i)}`);
+ a.store(i, f(i));
+ assertEquals(f(i), a.load(i));
+ }
+ }
+ }
+
+ try {
+ const kPages = kMaxMemory / kPageSize;
+ memory = new WebAssembly.Memory({initial: kPages, maximum: kPages});
+ } catch (e) {
+ print("OOM: sorry, best effort max memory size test.");
+ return;
+ }
+
+ assertEquals(kMaxMemory, memory.buffer.byteLength);
+
+ for (let offset of indexes) {
+ let a = BuildAccessors(kWasmI32, kExprI32LoadMem, kExprI32StoreMem, offset);
+ probe(a, 4, offset, i => (0xaabbccee ^ ((i >> 11) * 0x110005)) | 0);
+ }
+
+ for (let offset of indexes) {
+ let a = BuildAccessors(kWasmI32, kExprI32LoadMem8U, kExprI32StoreMem8, offset);
+ probe(a, 1, offset, i => (0xee ^ ((i >> 11) * 0x05)) & 0xFF);
+ }
+
+ for (let offset of indexes) {
+ let a = BuildAccessors(kWasmF64, kExprF64LoadMem, kExprF64StoreMem, offset);
+ probe(a, 8, offset, i => 0xaabbccee ^ ((i >> 11) * 0x110005));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/memory_2gb_oob.js b/deps/v8/test/mjsunit/wasm/memory_2gb_oob.js
new file mode 100644
index 0000000000..db344f30f3
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/memory_2gb_oob.js
@@ -0,0 +1,99 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-max-mem-pages=32768
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const k1MiB = 1 * 1024 * 1024;
+const k1GiB = 1 * 1024 * 1024 * 1024;
+const k2GiB = 2 * k1GiB;
+const k3GiB = 3 * k1GiB;
+const k4GiB = 4 * k1GiB;
+const kMaxMemory = k2GiB;
+
+// Indexes (and offsets) used to systematically probe the memory.
+const indexes = (() => {
+ const a = k1GiB, b = k2GiB, c = k3GiB, d = k4GiB;
+ return [
+ 0, 1, 2, 3, 4, 5, 7, 8, 9, // near 0
+ a-8, a-4, a+0, a+1, a+2, a+3, a+4, a+5, a+7, a+8, a+9, // near 1GiB
+ b-8, b-4, b+0, b+1, b+2, b+3, b+4, b+5, b+7, b+8, b+9, // near 2GiB
+ c-8, c-4, c+0, c+1, c+2, c+3, c+4, c+5, c+7, c+8, c+9, // near 3GiB
+ d-9, d-8, d-7, d-5, d-4, d-3, d-2, d-1 // near 4GiB
+];
+})();
+
+(function Test() {
+ var memory;
+
+ function BuildAccessors(type, load_opcode, store_opcode, offset) {
+ builder = new WasmModuleBuilder();
+ builder.addImportedMemory("i", "mem");
+ const h = 0x80;
+ const m = 0x7f;
+ let offset_bytes = [h|((offset >>> 0) & m), // LEB encoding of offset
+ h|((offset >>> 7) & m),
+ h|((offset >>> 14) & m),
+ h|((offset >>> 21) & m),
+ 0|((offset >>> 28) & m)];
+ builder.addFunction("load", makeSig([kWasmI32], [type]))
+ .addBody([ // --
+ kExprGetLocal, 0, // --
+ load_opcode, 0, ...offset_bytes, // --
+ ]) // --
+ .exportFunc();
+ builder.addFunction("store", makeSig([kWasmI32, type], []))
+ .addBody([ // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ store_opcode, 0, ...offset_bytes, // --
+ ]) // --
+ .exportFunc();
+ let i = builder.instantiate({i: {mem: memory}});
+ return {offset: offset, load: i.exports.load, store: i.exports.store};
+ }
+
+ function probe(a, size, offset, f) {
+ print(`size=${size} offset=${offset}`);
+ for (let i of indexes) {
+ let oob = (i + size + offset) > kMaxMemory;
+ if (oob) {
+// print(` ${i} + ${offset} OOB`);
+ assertThrows(() => a.store(i, f(i)));
+ assertThrows(() => a.load(i));
+ } else {
+// print(` ${i} = ${f(i)}`);
+ a.store(i, f(i));
+ assertEquals(f(i), a.load(i));
+ }
+ }
+ }
+
+ try {
+ let kPages = kMaxMemory / kPageSize;
+ memory = new WebAssembly.Memory({initial: kPages, maximum: kPages});
+ } catch (e) {
+ print("OOM: sorry, best effort max memory size test.");
+ return;
+ }
+
+ assertEquals(kMaxMemory, memory.buffer.byteLength);
+
+ for (let offset of indexes) {
+ let a = BuildAccessors(kWasmI32, kExprI32LoadMem, kExprI32StoreMem, offset);
+ probe(a, 4, offset, i => (0xaabbccee ^ ((i >> 11) * 0x110005)) | 0);
+ }
+
+ for (let offset of indexes) {
+ let a = BuildAccessors(kWasmI32, kExprI32LoadMem8U, kExprI32StoreMem8, offset);
+ probe(a, 1, offset, i => (0xee ^ ((i >> 11) * 0x05)) & 0xFF);
+ }
+
+ for (let offset of indexes) {
+ let a = BuildAccessors(kWasmF64, kExprF64LoadMem, kExprF64StoreMem, offset);
+ probe(a, 8, offset, i => 0xaabbccee ^ ((i >> 11) * 0x110005));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/memory_4gb_oob.js b/deps/v8/test/mjsunit/wasm/memory_4gb_oob.js
new file mode 100644
index 0000000000..d5cb006a79
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/memory_4gb_oob.js
@@ -0,0 +1,97 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const k1MiB = 1 * 1024 * 1024;
+const k1GiB = 1 * 1024 * 1024 * 1024;
+const k2GiB = 2 * k1GiB;
+const k3GiB = 3 * k1GiB;
+const k4GiB = 4 * k1GiB;
+const kMaxMemory = k4GiB;
+
+// Indexes (and offsets) used to systematically probe the memory.
+const indexes = (() => {
+ const a = k1GiB, b = k2GiB, c = k3GiB, d = k4GiB;
+ return [
+ 0, 1, 2, 3, 4, 5, 7, 8, 9, // near 0
+ a-8, a-4, a+0, a+1, a+2, a+3, a+4, a+5, a+7, a+8, a+9, // near 1GiB
+ b-8, b-4, b+0, b+1, b+2, b+3, b+4, b+5, b+7, b+8, b+9, // near 2GiB
+ c-8, c-4, c+0, c+1, c+2, c+3, c+4, c+5, c+7, c+8, c+9, // near 3GiB
+ d-9, d-8, d-7, d-5, d-4, d-3, d-2, d-1 // near 4GiB
+];
+})();
+
+(function Test() {
+ var memory;
+
+ function BuildAccessors(type, load_opcode, store_opcode, offset) {
+ builder = new WasmModuleBuilder();
+ builder.addImportedMemory("i", "mem");
+ const h = 0x80;
+ const m = 0x7f;
+ let offset_bytes = [h|((offset >>> 0) & m), // LEB encoding of offset
+ h|((offset >>> 7) & m),
+ h|((offset >>> 14) & m),
+ h|((offset >>> 21) & m),
+ 0|((offset >>> 28) & m)];
+ builder.addFunction("load", makeSig([kWasmI32], [type]))
+ .addBody([ // --
+ kExprGetLocal, 0, // --
+ load_opcode, 0, ...offset_bytes, // --
+ ]) // --
+ .exportFunc();
+ builder.addFunction("store", makeSig([kWasmI32, type], []))
+ .addBody([ // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ store_opcode, 0, ...offset_bytes, // --
+ ]) // --
+ .exportFunc();
+ let i = builder.instantiate({i: {mem: memory}});
+ return {offset: offset, load: i.exports.load, store: i.exports.store};
+ }
+
+ function probe(a, size, offset, f) {
+ print(`size=${size} offset=${offset}`);
+ for (let i of indexes) {
+ let oob = (i + size + offset) > kMaxMemory;
+ if (oob) {
+// print(` ${i} + ${offset} OOB`);
+ assertThrows(() => a.store(i, f(i)));
+ assertThrows(() => a.load(i));
+ } else {
+// print(` ${i} = ${f(i)}`);
+ a.store(i, f(i));
+ assertEquals(f(i), a.load(i));
+ }
+ }
+ }
+
+ try {
+ let kPages = kMaxMemory / kPageSize;
+ memory = new WebAssembly.Memory({initial: kPages, maximum: kPages});
+ } catch (e) {
+ print("OOM: sorry, best effort max memory size test.");
+ return;
+ }
+
+ assertEquals(kMaxMemory, memory.buffer.byteLength);
+
+ for (let offset of indexes) {
+ let a = BuildAccessors(kWasmI32, kExprI32LoadMem, kExprI32StoreMem, offset);
+ probe(a, 4, offset, i => (0xaabbccee ^ ((i >> 11) * 0x110005)) | 0);
+ }
+
+ for (let offset of indexes) {
+ let a = BuildAccessors(kWasmI32, kExprI32LoadMem8U, kExprI32StoreMem8, offset);
+ probe(a, 1, offset, i => (0xee ^ ((i >> 11) * 0x05)) & 0xFF);
+ }
+
+ for (let offset of indexes) {
+ let a = BuildAccessors(kWasmF64, kExprF64LoadMem, kExprF64StoreMem, offset);
+ probe(a, 8, offset, i => 0xaabbccee ^ ((i >> 11) * 0x110005));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/module-memory.js b/deps/v8/test/mjsunit/wasm/module-memory.js
index de05dd4657..2ca5d9a725 100644
--- a/deps/v8/test/mjsunit/wasm/module-memory.js
+++ b/deps/v8/test/mjsunit/wasm/module-memory.js
@@ -160,13 +160,15 @@ function testOOBThrows() {
assertEquals(0, write());
}
-
+ // Note that this test might be run concurrently in multiple Isolates, which
+ // makes an exact comparison of the expected trap count unreliable. But is is
+ // still possible to check the lower bound for the expected trap count.
for (offset = 65534; offset < 66536; offset++) {
const trap_count = %GetWasmRecoveredTrapCount();
assertTraps(kTrapMemOutOfBounds, read);
assertTraps(kTrapMemOutOfBounds, write);
if (%IsWasmTrapHandlerEnabled()) {
- assertEquals(trap_count + 2, %GetWasmRecoveredTrapCount());
+ assertTrue(trap_count + 2 <= %GetWasmRecoveredTrapCount());
}
}
}
diff --git a/deps/v8/test/mjsunit/wasm/origin-trial-flags.js b/deps/v8/test/mjsunit/wasm/origin-trial-flags.js
new file mode 100644
index 0000000000..d41f581e13
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/origin-trial-flags.js
@@ -0,0 +1,35 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --noexperimental-wasm-threads --allow-natives-syntax
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function instantiateModuleWithThreads() {
+ // Build a WebAssembly module which uses threads-features.
+ const builder = new WasmModuleBuilder();
+ const shared = true;
+ builder.addMemory(2, 10, false, shared);
+ builder.addFunction('main', kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0, kExprGetLocal, 1, kAtomicPrefix, kExprI32AtomicAdd, 2,
+ 0
+ ])
+ .exportFunc();
+
+ return builder.instantiate();
+}
+
+// Disable WebAssembly threads initially.
+%SetWasmThreadsEnabled(false);
+assertThrows(instantiateModuleWithThreads, WebAssembly.CompileError);
+
+// Enable WebAssembly threads.
+%SetWasmThreadsEnabled(true);
+assertInstanceof(instantiateModuleWithThreads(), WebAssembly.Instance);
+
+// Disable WebAssembly threads.
+%SetWasmThreadsEnabled(false);
+assertThrows(instantiateModuleWithThreads, WebAssembly.CompileError);
diff --git a/deps/v8/test/mjsunit/wasm/wasm-constants.js b/deps/v8/test/mjsunit/wasm/wasm-constants.js
index 8afe209e9b..f5aead9fb6 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-constants.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-constants.js
@@ -382,6 +382,7 @@ let kTrapFloatUnrepresentable = 5;
let kTrapFuncInvalid = 6;
let kTrapFuncSigMismatch = 7;
let kTrapTypeError = 8;
+let kTrapUnalignedAccess = 9;
let kTrapMsgs = [
"unreachable",
@@ -392,7 +393,8 @@ let kTrapMsgs = [
"float unrepresentable in integer range",
"invalid index into function table",
"function signature mismatch",
- "wasm function signature contains illegal type"
+ "wasm function signature contains illegal type",
+ "operation does not support unaligned accesses"
];
function assertTraps(trap, code) {
diff --git a/deps/v8/test/mjsunit/wasm/worker-interpreter.js b/deps/v8/test/mjsunit/wasm/worker-interpreter.js
new file mode 100644
index 0000000000..d730ed7a74
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/worker-interpreter.js
@@ -0,0 +1,63 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-wasm-disable-structured-cloning
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function TestPostInterpretedModule() {
+ let builder = new WasmModuleBuilder();
+ let add = builder.addFunction("add", kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add])
+ .exportFunc();
+
+ let module = builder.toModule();
+ let instance = new WebAssembly.Instance(module);
+ let exp = instance.exports;
+
+ let workerScript = `
+ var instance = null;
+ onmessage = function(message) {
+ try {
+ if (message.command == 'module') {
+ instance = new WebAssembly.Instance(message.module);
+ postMessage('OK');
+ }
+ if (message.command == 'call') {
+ let result = instance.exports.add(40, 2);
+ postMessage(result);
+ }
+ } catch(e) {
+ postMessage('ERROR: ' + e);
+ }
+ }
+ `;
+ let worker = new Worker(workerScript);
+
+ // Call method without using the interpreter.
+ var initial_interpreted = %WasmNumInterpretedCalls(instance);
+ assertEquals(23, exp.add(20, 3));
+ assertEquals(initial_interpreted + 0, %WasmNumInterpretedCalls(instance));
+
+ // Send module to the worker, still not interpreting.
+ worker.postMessage({ command:'module', module:module });
+ assertEquals('OK', worker.getMessage());
+ worker.postMessage({ command:'call' });
+ assertEquals(42, worker.getMessage());
+ assertEquals(initial_interpreted + 0, %WasmNumInterpretedCalls(instance));
+
+ // Switch to the interpreter and call method.
+ %RedirectToWasmInterpreter(instance, add.index);
+ assertEquals(23, exp.add(20, 3));
+ assertEquals(initial_interpreted + 1, %WasmNumInterpretedCalls(instance));
+
+ // Let worker call interpreted function.
+ worker.postMessage({ command:'call' });
+ assertEquals(42, worker.getMessage());
+ assertEquals(initial_interpreted + 1, %WasmNumInterpretedCalls(instance));
+
+ // All done.
+ worker.terminate();
+})();
diff --git a/deps/v8/test/mjsunit/wasm/worker-module.js b/deps/v8/test/mjsunit/wasm/worker-module.js
index 00615b8434..72645f8dbf 100644
--- a/deps/v8/test/mjsunit/wasm/worker-module.js
+++ b/deps/v8/test/mjsunit/wasm/worker-module.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-wasm-disable-structured-cloning
+// Flags: --wasm-shared-engine --no-wasm-disable-structured-cloning
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.cc b/deps/v8/test/mkgrokdump/mkgrokdump.cc
index d610bf228a..713e952378 100644
--- a/deps/v8/test/mkgrokdump/mkgrokdump.cc
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.cc
@@ -16,7 +16,7 @@
namespace v8 {
static const char* kHeader =
- "# Copyright 2017 the V8 project authors. All rights reserved.\n"
+ "# Copyright 2018 the V8 project authors. All rights reserved.\n"
"# Use of this source code is governed by a BSD-style license that can\n"
"# be found in the LICENSE file.\n"
"\n"
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 59adae65fc..c581a9806c 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -419,10 +419,6 @@
'intl402/NumberFormat/prototype/format/format-fraction-digits': [FAIL],
'intl402/NumberFormat/prototype/format/format-significant-digits': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=7480
- 'intl402/Collator/unicode-ext-seq-in-private-tag': [FAIL],
- 'intl402/Collator/unicode-ext-seq-with-attribute': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=7481
'intl402/NumberFormat/ignore-invalid-unicode-ext-values': [FAIL],
'intl402/DateTimeFormat/ignore-invalid-unicode-ext-values': [FAIL],
@@ -435,7 +431,6 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=7669
'intl402/Intl/getCanonicalLocales/canonicalized-tags': [FAIL],
- 'intl402/Intl/getCanonicalLocales/preferred-grandfathered': [FAIL],
# Tests assume that the sort order of "same elements" (comparator returns 0)
# is deterministic.
@@ -467,12 +462,6 @@
'intl402/Locale/invalid-tag-throws': [FAIL],
'intl402/Locale/likely-subtags': [FAIL],
'intl402/Locale/likely-subtags-grandfathered': [FAIL],
- 'intl402/Locale/prototype/maximize/length': [FAIL],
- 'intl402/Locale/prototype/maximize/name': [FAIL],
- 'intl402/Locale/prototype/maximize/prop-desc': [FAIL],
- 'intl402/Locale/prototype/minimize/length': [FAIL],
- 'intl402/Locale/prototype/minimize/name': [FAIL],
- 'intl402/Locale/prototype/minimize/prop-desc': [FAIL],
'intl402/Locale/prototype/toStringTag/toStringTag': [FAIL],
'intl402/Locale/prototype/toStringTag/toString': [FAIL],
@@ -481,15 +470,9 @@
'intl402/RelativeTimeFormat/constructor/supportedLocalesOf/length': [FAIL],
'intl402/RelativeTimeFormat/constructor/supportedLocalesOf/name': [FAIL],
'intl402/RelativeTimeFormat/constructor/supportedLocalesOf/prop-desc': [FAIL],
- 'intl402/RelativeTimeFormat/prototype/format/length': [FAIL],
- 'intl402/RelativeTimeFormat/prototype/format/name': [FAIL],
- 'intl402/RelativeTimeFormat/prototype/format/prop-desc': [FAIL],
- 'intl402/RelativeTimeFormat/prototype/formatToParts/length': [FAIL],
- 'intl402/RelativeTimeFormat/prototype/formatToParts/name': [FAIL],
- 'intl402/RelativeTimeFormat/prototype/formatToParts/prop-desc': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=7813
- 'built-ins/Array/prototype/lastIndexOf/calls-only-has-on-prototype-after-length-zeroed': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=7993
+ 'intl402/RelativeTimeFormat/prototype/toStringTag/toStringTag': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=7814
'built-ins/Array/prototype/splice/property-traps-order-with-species': [FAIL],
@@ -534,7 +517,6 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=6538
'built-ins/Array/prototype/unshift/throws-if-integer-limit-exceeded': [SKIP],
- 'built-ins/Array/prototype/reverse/length-exceeding-integer-limit-with-proxy': [FAIL],
'built-ins/Array/prototype/splice/create-species-length-exceeding-integer-limit': [FAIL],
'built-ins/Array/prototype/splice/throws-if-integer-limit-exceeded': [SKIP],
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index e944b3ebec..7a1de38ce1 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -50,6 +50,7 @@ FEATURE_FLAGS = {
'String.prototype.matchAll': '--harmony-string-matchall',
'Symbol.matchAll': '--harmony-string-matchall',
'numeric-separator-literal': '--harmony-numeric-separator',
+ 'Intl.ListFormat': '--harmony-intl-list-format',
'Intl.Locale': '--harmony-locale',
'Intl.RelativeTimeFormat': '--harmony-intl-relative-time-format',
'Symbol.prototype.description': '--harmony-symbol-description',
diff --git a/deps/v8/test/torque/test-torque.tq b/deps/v8/test/torque/test-torque.tq
index 1ed8e986b6..3c258607fc 100644
--- a/deps/v8/test/torque/test-torque.tq
+++ b/deps/v8/test/torque/test-torque.tq
@@ -98,10 +98,11 @@ module test {
}
macro LabelTestHelper4(flag: constexpr bool): never labels Label4, Label5 {
- if
- constexpr(flag) goto Label4;
- else
+ if constexpr(flag) {
+ goto Label4;
+ } else {
goto Label5;
+ }
}
macro CallLabelTestHelper4(flag: constexpr bool): bool {
@@ -120,10 +121,11 @@ module test {
let r1: bool = CallLabelTestHelper4(true);
let r2: bool = CallLabelTestHelper4(false);
- if (r1 && !r2)
+ if (r1 && !r2) {
return True;
- else
+ } else {
return False;
+ }
}
macro GenericMacroTest<T : type>(param: T): Object {
@@ -242,8 +244,17 @@ module test {
}
macro TestLocalConstBindings() {
- const kSmi: Smi = 3;
- check(kSmi == 3);
+ const x : constexpr int31 = 3;
+ const x_smi : Smi = x;
+ {
+ const x : Smi = x + from_constexpr<Smi>(1);
+ check(x == x_smi + 1);
+ const x_smi : Smi = x;
+ check(x == x_smi);
+ check(x == 4);
+ }
+ check(x_smi == 3);
+ check(x == x_smi);
}
struct TestStructA {
@@ -289,4 +300,150 @@ module test {
macro TestStruct4(): TestStructC {
return TestStructC{TestStruct2(), TestStruct2()};
}
+
+ // This macro tests different versions of the for-loop where some parts
+ // are (not) present.
+ macro TestForLoop() {
+ let sum: Smi = 0;
+ for (let i: Smi = 0; i < 5; ++i) sum += i;
+ check(sum == 10);
+
+ sum = 0;
+ let j: Smi = 0;
+ for (; j < 5; ++j) sum += j;
+ check(sum == 10);
+
+ sum = 0;
+ j = 0;
+ for (; j < 5;) sum += j++;
+ check(sum == 10);
+
+ // Check that break works. No test expression.
+ sum = 0;
+ for (let i: Smi = 0;; ++i) {
+ if (i == 5) break;
+ sum += i;
+ }
+ check(sum == 10);
+
+ sum = 0;
+ j = 0;
+ for (;;) {
+ if (j == 5) break;
+ sum += j;
+ j++;
+ }
+ check(sum == 10);
+
+ // The following tests are the same as above, but use continue to skip
+ // index 3.
+ sum = 0;
+ for (let i: Smi = 0; i < 5; ++i) {
+ if (i == 3) continue;
+ sum += i;
+ }
+ check(sum == 7);
+
+ sum = 0;
+ j = 0;
+ for (; j < 5; ++j) {
+ if (j == 3) continue;
+ sum += j;
+ }
+ check(sum == 7);
+
+ sum = 0;
+ j = 0;
+ for (; j < 5;) {
+ if (j == 3) {
+ j++;
+ continue;
+ }
+ sum += j;
+ j++;
+ }
+ check(sum == 7);
+
+ sum = 0;
+ for (let i: Smi = 0;; ++i) {
+ if (i == 3) continue;
+ if (i == 5) break;
+ sum += i;
+ }
+ check(sum == 7);
+
+ sum = 0;
+ j = 0;
+ for (;;) {
+ if (j == 3) {
+ j++;
+ continue;
+ }
+
+ if (j == 5) break;
+ sum += j;
+ j++;
+ }
+ check(sum == 7);
+ }
+
+ macro TestSubtyping(x : Smi) {
+ const foo : Object = x;
+ }
+
+ macro IncrementIfSmi<A : type>(x : A) : A {
+ typeswitch (x) {
+ case (x : Smi) {
+ return x + 1;
+ } case (o : A) {
+ return o;
+ }
+ }
+ }
+
+ macro TypeswitchExample(x : Number | FixedArray) : int32 {
+ let result : int32 = 0;
+ typeswitch (IncrementIfSmi<(Number|FixedArray)>(x)) {
+ case (x : FixedArray) {
+ result = result + 1;
+ } case (Number) {
+ result = result + 2;
+ }
+ }
+
+ result = result * 10;
+
+ typeswitch (IncrementIfSmi<(Number|FixedArray)>(x)) {
+ case (x : Smi) {
+ result = result + convert<int32>(x);
+ } case (a : FixedArray) {
+ result = result + convert<int32>(a.length);
+ } case (x : HeapNumber) {
+ result = result + 7;
+ }
+ }
+
+ return result;
+ }
+
+ macro TestTypeswitch() {
+ check(TypeswitchExample(from_constexpr<Smi>(5)) == 26);
+ const a : FixedArray = AllocateZeroedFixedArray(3);
+ check(TypeswitchExample(a) == 13);
+ check(TypeswitchExample(from_constexpr<Number>(0.5)) == 27);
+ }
+
+ macro ExampleGenericOverload<A: type>(o : Object) : A {
+ return o;
+ }
+ macro ExampleGenericOverload<A: type>(o : Smi) : A {
+ return o + 1;
+ }
+
+ macro TestGenericOverload() {
+ const x_smi : Smi = 5;
+ const x_object : Object = x_smi;
+ check(ExampleGenericOverload<Smi>(x_smi) == 6);
+ check(unsafe_cast<Smi>(ExampleGenericOverload<Object>(x_object)) == 5);
+ }
}
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index a6cf82f163..606fe9c343 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -41,6 +41,7 @@ v8_source_set("unittests_sources") {
testonly = true
sources = [
+ "../../test/common/assembler-tester.h",
"../../test/common/wasm/wasm-macro-gen.h",
"../../testing/gmock-support.h",
"../../testing/gtest-support.h",
@@ -191,6 +192,7 @@ v8_source_set("unittests_sources") {
"test-helpers.h",
"test-utils.cc",
"test-utils.h",
+ "torque/earley-parser-unittest.cc",
"unicode-unittest.cc",
"utils-unittest.cc",
"value-serializer-unittest.cc",
@@ -222,21 +224,45 @@ v8_source_set("unittests_sources") {
}
if (v8_current_cpu == "arm") {
- sources += [ "compiler/arm/instruction-selector-arm-unittest.cc" ]
+ sources += [
+ "assembler/turbo-assembler-arm-unittest.cc",
+ "compiler/arm/instruction-selector-arm-unittest.cc",
+ ]
} else if (v8_current_cpu == "arm64") {
- sources += [ "compiler/arm64/instruction-selector-arm64-unittest.cc" ]
+ sources += [
+ "assembler/turbo-assembler-arm64-unittest.cc",
+ "compiler/arm64/instruction-selector-arm64-unittest.cc",
+ ]
} else if (v8_current_cpu == "x86") {
- sources += [ "compiler/ia32/instruction-selector-ia32-unittest.cc" ]
+ sources += [
+ "assembler/turbo-assembler-ia32-unittest.cc",
+ "compiler/ia32/instruction-selector-ia32-unittest.cc",
+ ]
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
- sources += [ "compiler/mips/instruction-selector-mips-unittest.cc" ]
+ sources += [
+ "assembler/turbo-assembler-mips-unittest.cc",
+ "compiler/mips/instruction-selector-mips-unittest.cc",
+ ]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
- sources += [ "compiler/mips64/instruction-selector-mips64-unittest.cc" ]
+ sources += [
+ "assembler/turbo-assembler-mips64-unittest.cc",
+ "compiler/mips64/instruction-selector-mips64-unittest.cc",
+ ]
} else if (v8_current_cpu == "x64") {
- sources += [ "compiler/x64/instruction-selector-x64-unittest.cc" ]
+ sources += [
+ "assembler/turbo-assembler-x64-unittest.cc",
+ "compiler/x64/instruction-selector-x64-unittest.cc",
+ ]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
- sources += [ "compiler/ppc/instruction-selector-ppc-unittest.cc" ]
+ sources += [
+ "assembler/turbo-assembler-ppc-unittest.cc",
+ "compiler/ppc/instruction-selector-ppc-unittest.cc",
+ ]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
- sources += [ "compiler/s390/instruction-selector-s390-unittest.cc" ]
+ sources += [
+ "assembler/turbo-assembler-s390-unittest.cc",
+ "compiler/s390/instruction-selector-s390-unittest.cc",
+ ]
}
configs = [
diff --git a/deps/v8/test/unittests/api/remote-object-unittest.cc b/deps/v8/test/unittests/api/remote-object-unittest.cc
index 40754d50f4..5fa0646425 100644
--- a/deps/v8/test/unittests/api/remote-object-unittest.cc
+++ b/deps/v8/test/unittests/api/remote-object-unittest.cc
@@ -5,7 +5,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "include/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/handles.h"
#include "src/objects-inl.h"
#include "test/unittests/test-utils.h"
diff --git a/deps/v8/test/unittests/asmjs/asm-types-unittest.cc b/deps/v8/test/unittests/asmjs/asm-types-unittest.cc
index 7430ce6b35..f17528977c 100644
--- a/deps/v8/test/unittests/asmjs/asm-types-unittest.cc
+++ b/deps/v8/test/unittests/asmjs/asm-types-unittest.cc
@@ -63,12 +63,12 @@ class AsmTypeTest : public TestWithZone {
class FunctionTypeBuilder {
public:
- FunctionTypeBuilder(FunctionTypeBuilder&& b)
+ FunctionTypeBuilder(FunctionTypeBuilder&& b) V8_NOEXCEPT
: function_type_(b.function_type_) {
b.function_type_ = nullptr;
}
- FunctionTypeBuilder& operator=(FunctionTypeBuilder&& b) {
+ FunctionTypeBuilder& operator=(FunctionTypeBuilder&& b) V8_NOEXCEPT {
if (this != &b) {
function_type_ = b.function_type_;
b.function_type_ = nullptr;
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc
new file mode 100644
index 0000000000..056bd1c2c6
--- /dev/null
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc
@@ -0,0 +1,78 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/arm/assembler-arm-inl.h"
+#include "src/macro-assembler.h"
+#include "src/simulator.h"
+#include "test/common/assembler-tester.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ tasm.
+
+// If we are running on android and the output is not redirected (i.e. ends up
+// in the android log) then we cannot find the error message in the output. This
+// macro just returns the empty string in that case.
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+#define ERROR_MESSAGE(msg) ""
+#else
+#define ERROR_MESSAGE(msg) msg
+#endif
+
+// Test the x64 assembler by compiling some simple functions into
+// a buffer and executing them. These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
+
+class TurboAssemblerTest : public TestWithIsolate {};
+
+TEST_F(TurboAssemblerTest, TestHardAbort) {
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
+ static_cast<int>(allocated), CodeObjectRequired::kNo);
+ __ set_abort_hard(true);
+
+ __ Abort(AbortReason::kNoReason);
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer);
+
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason"));
+}
+
+TEST_F(TurboAssemblerTest, TestCheck) {
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
+ static_cast<int>(allocated), CodeObjectRequired::kNo);
+ __ set_abort_hard(true);
+
+ // Fail if the first parameter is 17.
+ __ Move32BitImmediate(r1, Operand(17));
+ __ cmp(r0, r1); // 1st parameter is in {r0}.
+ __ Check(Condition::ne, AbortReason::kNoReason);
+ __ Ret();
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer);
+
+ f.Call(0);
+ f.Call(18);
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, ERROR_MESSAGE("abort: no reason"));
+}
+
+#undef __
+#undef ERROR_MESSAGE
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
new file mode 100644
index 0000000000..e354fb91d9
--- /dev/null
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
@@ -0,0 +1,78 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/arm64/macro-assembler-arm64-inl.h"
+#include "src/macro-assembler.h"
+#include "src/simulator.h"
+#include "test/common/assembler-tester.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ tasm.
+
+// If we are running on android and the output is not redirected (i.e. ends up
+// in the android log) then we cannot find the error message in the output. This
+// macro just returns the empty string in that case.
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+#define ERROR_MESSAGE(msg) ""
+#else
+#define ERROR_MESSAGE(msg) msg
+#endif
+
+// Test the x64 assembler by compiling some simple functions into
+// a buffer and executing them. These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
+
+class TurboAssemblerTest : public TestWithIsolate {};
+
+TEST_F(TurboAssemblerTest, TestHardAbort) {
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
+ static_cast<int>(allocated), CodeObjectRequired::kNo);
+ __ set_abort_hard(true);
+
+ __ Abort(AbortReason::kNoReason);
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer);
+
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason"));
+}
+
+TEST_F(TurboAssemblerTest, TestCheck) {
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
+ static_cast<int>(allocated), CodeObjectRequired::kNo);
+ __ set_abort_hard(true);
+
+ // Fail if the first parameter is 17.
+ __ Mov(w1, Immediate(17));
+ __ Cmp(w0, w1); // 1st parameter is in {w0}.
+ __ Check(Condition::ne, AbortReason::kNoReason);
+ __ Ret();
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer);
+
+ f.Call(0);
+ f.Call(18);
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, ERROR_MESSAGE("abort: no reason"));
+}
+
+#undef __
+#undef ERROR_MESSAGE
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc
new file mode 100644
index 0000000000..ba3634314f
--- /dev/null
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc
@@ -0,0 +1,62 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/macro-assembler.h"
+#include "src/simulator.h"
+#include "test/common/assembler-tester.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ tasm.
+
+// Test the x64 assembler by compiling some simple functions into
+// a buffer and executing them. These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
+
+TEST(TurboAssemblerTest, TestHardAbort) {
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
+ static_cast<int>(allocated), CodeObjectRequired::kNo);
+ __ set_abort_hard(true);
+
+ __ Abort(AbortReason::kNoReason);
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ auto f = GeneratedCode<void>::FromBuffer(nullptr, buffer);
+
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
+}
+
+TEST(TurboAssemblerTest, TestCheck) {
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
+ static_cast<int>(allocated), CodeObjectRequired::kNo);
+ __ set_abort_hard(true);
+
+ // Fail if the first parameter is 17.
+ __ mov(eax, 17);
+ __ cmp(eax, Operand(esp, 4)); // compare with 1st parameter.
+ __ Check(Condition::not_equal, AbortReason::kNoReason);
+ __ ret(0);
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ auto f = GeneratedCode<void, int>::FromBuffer(nullptr, buffer);
+
+ f.Call(0);
+ f.Call(18);
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason");
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc
new file mode 100644
index 0000000000..abba0ff30b
--- /dev/null
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc
@@ -0,0 +1,66 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/macro-assembler.h"
+#include "src/mips/assembler-mips-inl.h"
+#include "src/simulator.h"
+#include "test/common/assembler-tester.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ tasm.
+
+// Test the x64 assembler by compiling some simple functions into
+// a buffer and executing them. These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
+
+class TurboAssemblerTest : public TestWithIsolate {};
+
+TEST_F(TurboAssemblerTest, TestHardAbort) {
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
+ static_cast<int>(allocated), CodeObjectRequired::kNo);
+ __ set_abort_hard(true);
+
+ __ Abort(AbortReason::kNoReason);
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer);
+
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
+}
+
+TEST_F(TurboAssemblerTest, TestCheck) {
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
+ static_cast<int>(allocated), CodeObjectRequired::kNo);
+ __ set_abort_hard(true);
+
+ // Fail if the first parameter (in {a0}) is 17.
+ __ Check(Condition::ne, AbortReason::kNoReason, a0, Operand(17));
+ __ Ret();
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer);
+
+ f.Call(0);
+ f.Call(18);
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason");
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc
new file mode 100644
index 0000000000..8d8bc0756c
--- /dev/null
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc
@@ -0,0 +1,66 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/macro-assembler.h"
+#include "src/mips64/assembler-mips64-inl.h"
+#include "src/simulator.h"
+#include "test/common/assembler-tester.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ tasm.
+
+// Test the x64 assembler by compiling some simple functions into
+// a buffer and executing them. These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
+
+class TurboAssemblerTest : public TestWithIsolate {};
+
+TEST_F(TurboAssemblerTest, TestHardAbort) {
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
+ static_cast<int>(allocated), CodeObjectRequired::kNo);
+ __ set_abort_hard(true);
+
+ __ Abort(AbortReason::kNoReason);
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer);
+
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
+}
+
+TEST_F(TurboAssemblerTest, TestCheck) {
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
+ static_cast<int>(allocated), CodeObjectRequired::kNo);
+ __ set_abort_hard(true);
+
+ // Fail if the first parameter (in {a0}) is 17.
+ __ Check(Condition::ne, AbortReason::kNoReason, a0, Operand(17));
+ __ Ret();
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer);
+
+ f.Call(0);
+ f.Call(18);
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason");
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc
new file mode 100644
index 0000000000..8054eb1da5
--- /dev/null
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc
@@ -0,0 +1,68 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/macro-assembler.h"
+#include "src/ppc/assembler-ppc-inl.h"
+#include "src/simulator.h"
+#include "test/common/assembler-tester.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ tasm.
+
+// Test the ppc assembler by compiling some simple functions into
+// a buffer and executing them. These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
+
+class TurboAssemblerTest : public TestWithIsolate {};
+
+TEST_F(TurboAssemblerTest, TestHardAbort) {
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
+ static_cast<int>(allocated), CodeObjectRequired::kNo);
+ __ set_abort_hard(true);
+
+ __ Abort(AbortReason::kNoReason);
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer);
+
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
+}
+
+TEST_F(TurboAssemblerTest, TestCheck) {
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
+ static_cast<int>(allocated), CodeObjectRequired::kNo);
+ __ set_abort_hard(true);
+
+ // Fail if the first parameter is 17.
+ __ mov(r4, Operand(17));
+ __ cmp(r3, r4); // 1st parameter is in {r3}.
+ __ Check(Condition::ne, AbortReason::kNoReason);
+ __ Ret();
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer);
+
+ f.Call(0);
+ f.Call(18);
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason");
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc
new file mode 100644
index 0000000000..7d45ec907f
--- /dev/null
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc
@@ -0,0 +1,68 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/macro-assembler.h"
+#include "src/s390/assembler-s390-inl.h"
+#include "src/simulator.h"
+#include "test/common/assembler-tester.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ tasm.
+
+// Test the s390 assembler by compiling some simple functions into
+// a buffer and executing them. These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
+
+class TurboAssemblerTest : public TestWithIsolate {};
+
+TEST_F(TurboAssemblerTest, TestHardAbort) {
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
+ static_cast<int>(allocated), CodeObjectRequired::kNo);
+ __ set_abort_hard(true);
+
+ __ Abort(AbortReason::kNoReason);
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer);
+
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
+}
+
+TEST_F(TurboAssemblerTest, TestCheck) {
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
+ static_cast<int>(allocated), CodeObjectRequired::kNo);
+ __ set_abort_hard(true);
+
+ // Fail if the first parameter is 17.
+ __ lgfi(r3, Operand(17));
+ __ CmpP(r2, r3); // 1st parameter is in {r2}.
+ __ Check(Condition::ne, AbortReason::kNoReason);
+ __ Ret();
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer);
+
+ f.Call(0);
+ f.Call(18);
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason");
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc
new file mode 100644
index 0000000000..060060c762
--- /dev/null
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc
@@ -0,0 +1,62 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/macro-assembler.h"
+#include "src/simulator.h"
+#include "test/common/assembler-tester.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ tasm.
+
+// Test the x64 assembler by compiling some simple functions into
+// a buffer and executing them. These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
+
+TEST(TurboAssemblerTest, TestHardAbort) {
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
+ static_cast<int>(allocated), CodeObjectRequired::kNo);
+ __ set_abort_hard(true);
+
+ __ Abort(AbortReason::kNoReason);
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ auto f = GeneratedCode<void>::FromBuffer(nullptr, buffer);
+
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
+}
+
+TEST(TurboAssemblerTest, TestCheck) {
+ size_t allocated;
+ byte* buffer = AllocateAssemblerBuffer(&allocated);
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, buffer,
+ static_cast<int>(allocated), CodeObjectRequired::kNo);
+ __ set_abort_hard(true);
+
+ // Fail if the first parameter is 17.
+ __ movl(rax, Immediate(17));
+ __ cmpl(rax, arg_reg_1);
+ __ Check(Condition::not_equal, AbortReason::kNoReason);
+ __ ret(0);
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ MakeAssemblerBufferExecutable(buffer, allocated);
+ auto f = GeneratedCode<void, int>::FromBuffer(nullptr, buffer);
+
+ f.Call(0);
+ f.Call(18);
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason");
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/functional-unittest.cc b/deps/v8/test/unittests/base/functional-unittest.cc
index 10f9f32c7d..b9295d49a0 100644
--- a/deps/v8/test/unittests/base/functional-unittest.cc
+++ b/deps/v8/test/unittests/base/functional-unittest.cc
@@ -74,7 +74,9 @@ TYPED_TEST(FunctionalTest, EqualToImpliesSameHashCode) {
this->rng()->NextBytes(values, sizeof(values));
TRACED_FOREACH(TypeParam, v1, values) {
TRACED_FOREACH(TypeParam, v2, values) {
- if (e(v1, v2)) EXPECT_EQ(h(v1), h(v2));
+ if (e(v1, v2)) {
+ EXPECT_EQ(h(v1), h(v2));
+ }
}
}
}
@@ -143,7 +145,9 @@ TYPED_TEST(FunctionalTest, BitEqualToImpliesSameBitHash) {
this->rng()->NextBytes(&values, sizeof(values));
TRACED_FOREACH(TypeParam, v1, values) {
TRACED_FOREACH(TypeParam, v2, values) {
- if (e(v1, v2)) EXPECT_EQ(h(v1), h(v2));
+ if (e(v1, v2)) {
+ EXPECT_EQ(h(v1), h(v2));
+ }
}
}
}
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index c523906027..45121aedb3 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -7,7 +7,7 @@
#include <sstream>
#include "include/v8-platform.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/ast/ast-value-factory.h"
#include "src/base/platform/semaphore.h"
#include "src/base/template-utils.h"
diff --git a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
index 95052c9b75..b796e457d4 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
@@ -4,6 +4,7 @@
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
+#include "src/api-inl.h"
#include "src/base/atomic-utils.h"
#include "src/base/platform/semaphore.h"
#include "src/compiler.h"
diff --git a/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc
index f7fb335ac6..5a0e89326b 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc
@@ -5,7 +5,7 @@
#include <memory>
#include "include/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/base/platform/semaphore.h"
diff --git a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index 70e04043c4..011cc67c81 100644
--- a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -1183,12 +1183,12 @@ TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithImmediate) {
}
}
-
-TEST_P(InstructionSelectorShiftTest, Word32NotWithParameters) {
+TEST_P(InstructionSelectorShiftTest, Word32BitwiseNotWithParameters) {
const Shift shift = GetParam();
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
- m.Return(m.Word32Not((m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
+ m.Return(m.Word32BitwiseNot(
+ (m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
@@ -1197,12 +1197,11 @@ TEST_P(InstructionSelectorShiftTest, Word32NotWithParameters) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-
-TEST_P(InstructionSelectorShiftTest, Word32NotWithImmediate) {
+TEST_P(InstructionSelectorShiftTest, Word32BitwiseNotWithImmediate) {
const Shift shift = GetParam();
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
- m.Return(m.Word32Not(
+ m.Return(m.Word32BitwiseNot(
(m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1214,13 +1213,14 @@ TEST_P(InstructionSelectorShiftTest, Word32NotWithImmediate) {
}
}
-
-TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithParameters) {
+TEST_P(InstructionSelectorShiftTest,
+ Word32AndWithWord32BitwiseNotWithParameters) {
const Shift shift = GetParam();
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32(), MachineType::Int32());
- m.Return(m.Word32And(m.Parameter(0), m.Word32Not((m.*shift.constructor)(
- m.Parameter(1), m.Parameter(2)))));
+ m.Return(
+ m.Word32And(m.Parameter(0), m.Word32BitwiseNot((m.*shift.constructor)(
+ m.Parameter(1), m.Parameter(2)))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmBic, s[0]->arch_opcode());
@@ -1229,14 +1229,14 @@ TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithParameters) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
-
-TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithImmediate) {
+TEST_P(InstructionSelectorShiftTest,
+ Word32AndWithWord32BitwiseNotWithImmediate) {
const Shift shift = GetParam();
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
m.Return(m.Word32And(m.Parameter(0),
- m.Word32Not((m.*shift.constructor)(
+ m.Word32BitwiseNot((m.*shift.constructor)(
m.Parameter(1), m.Int32Constant(imm)))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2971,12 +2971,11 @@ TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediateForARMv7) {
}
}
-
-TEST_F(InstructionSelectorTest, Word32AndWithWord32Not) {
+TEST_F(InstructionSelectorTest, Word32AndWithWord32BitwiseNot) {
{
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
- m.Return(m.Word32And(m.Parameter(0), m.Word32Not(m.Parameter(1))));
+ m.Return(m.Word32And(m.Parameter(0), m.Word32BitwiseNot(m.Parameter(1))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmBic, s[0]->arch_opcode());
@@ -2987,7 +2986,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithWord32Not) {
{
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
- m.Return(m.Word32And(m.Word32Not(m.Parameter(0)), m.Parameter(1)));
+ m.Return(m.Word32And(m.Word32BitwiseNot(m.Parameter(0)), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmBic, s[0]->arch_opcode());
@@ -3076,10 +3075,9 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
}
}
-
-TEST_F(InstructionSelectorTest, Word32NotWithParameter) {
+TEST_F(InstructionSelectorTest, Word32BitwiseNotWithParameter) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
- m.Return(m.Word32Not(m.Parameter(0)));
+ m.Return(m.Word32BitwiseNot(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index be876c7cb3..aa54abe320 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -1327,6 +1327,70 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnLeft) {
}
}
+TEST_F(InstructionSelectorTest, TestAndBranch64EqualWhenCanCoverFalse) {
+ TRACED_FORRANGE(int, bit, 0, 63) {
+ uint64_t mask = uint64_t{1} << bit;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b, c;
+ Node* n = m.Word64And(m.Parameter(0), m.Int64Constant(mask));
+ m.Branch(m.Word64Equal(n, m.Int64Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Branch(m.Word64Equal(n, m.Int64Constant(3)), &b, &c);
+ m.Bind(&c);
+ m.Return(m.Int64Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int64Constant(0));
+
+ Stream s = m.Build();
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(kArm64And, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ EXPECT_EQ(kArm64TestAndBranch, s[1]->arch_opcode());
+ EXPECT_EQ(kEqual, s[1]->flags_condition());
+ EXPECT_EQ(kArm64Cmp, s[2]->arch_opcode());
+ EXPECT_EQ(kEqual, s[2]->flags_condition());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, TestAndBranch64AndWhenCanCoverFalse) {
+ TRACED_FORRANGE(int, bit, 0, 63) {
+ uint64_t mask = uint64_t{1} << bit;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b, c;
+ m.Branch(m.Word64And(m.Parameter(0), m.Int64Constant(mask)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int64Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int64Constant(0));
+
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, TestAndBranch32AndWhenCanCoverFalse) {
+ TRACED_FORRANGE(int, bit, 0, 31) {
+ uint32_t mask = uint32_t{1} << bit;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b, c;
+ m.Branch(m.Word32And(m.Parameter(0), m.Int32Constant(mask)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ }
+}
+
TEST_F(InstructionSelectorTest, Word32EqualZeroAndBranchWithOneBitMask) {
TRACED_FORRANGE(int, bit, 0, 31) {
uint32_t mask = 1 << bit;
@@ -3746,8 +3810,8 @@ TEST_P(InstructionSelectorLogicalWithNotRHSTest, Parameter) {
{
StreamBuilder m(this, type, type, type);
if (type == MachineType::Int32()) {
- m.Return(
- (m.*inst.constructor)(m.Parameter(0), m.Word32Not(m.Parameter(1))));
+ m.Return((m.*inst.constructor)(m.Parameter(0),
+ m.Word32BitwiseNot(m.Parameter(1))));
} else {
ASSERT_EQ(MachineType::Int64(), type);
m.Return(
@@ -3762,8 +3826,8 @@ TEST_P(InstructionSelectorLogicalWithNotRHSTest, Parameter) {
{
StreamBuilder m(this, type, type, type);
if (type == MachineType::Int32()) {
- m.Return(
- (m.*inst.constructor)(m.Word32Not(m.Parameter(0)), m.Parameter(1)));
+ m.Return((m.*inst.constructor)(m.Word32BitwiseNot(m.Parameter(0)),
+ m.Parameter(1)));
} else {
ASSERT_EQ(MachineType::Int64(), type);
m.Return(
@@ -3782,10 +3846,9 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorLogicalWithNotRHSTest,
::testing::ValuesIn(kLogicalWithNotRHSs));
-
-TEST_F(InstructionSelectorTest, Word32NotWithParameter) {
+TEST_F(InstructionSelectorTest, Word32BitwiseNotWithParameter) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
- m.Return(m.Word32Not(m.Parameter(0)));
+ m.Return(m.Word32BitwiseNot(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Not32, s[0]->arch_opcode());
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
index 086fa2ec7d..cb5b5fd806 100644
--- a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -29,7 +29,7 @@ class CommonOperatorReducerTest : public GraphTest {
Reduction Reduce(
AdvancedReducer::Editor* editor, Node* node,
MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags) {
- JSHeapBroker broker(isolate());
+ JSHeapBroker broker(isolate(), zone());
MachineOperatorBuilder machine(zone(), MachineType::PointerRepresentation(),
flags);
CommonOperatorReducer reducer(editor, graph(), &broker, common(), &machine,
diff --git a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
index 6780bf8500..464ee3a971 100644
--- a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
@@ -63,7 +63,7 @@ class ConstantFoldingReducerTest : public TypedGraphTest {
public:
ConstantFoldingReducerTest()
: TypedGraphTest(3),
- js_heap_broker_(isolate()),
+ js_heap_broker_(isolate(), zone()),
simplified_(zone()),
deps_(isolate(), zone()) {}
~ConstantFoldingReducerTest() override {}
@@ -81,7 +81,7 @@ class ConstantFoldingReducerTest : public TypedGraphTest {
}
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
- const JSHeapBroker* js_heap_broker() const { return &js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() { return &js_heap_broker_; }
private:
JSHeapBroker js_heap_broker_;
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index a731a8f1cb..af2c382f5b 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -16,9 +16,10 @@ namespace compiler {
GraphTest::GraphTest(int num_parameters)
: TestWithNativeContext(),
TestWithIsolateAndZone(),
+ canonical_(isolate()),
common_(zone()),
graph_(zone()),
- js_heap_broker_(isolate()),
+ js_heap_broker_(isolate(), zone()),
source_positions_(&graph_),
node_origins_(&graph_) {
graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
index 1a9c83bb8a..d9b9934770 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -10,6 +10,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/node-origin-table.h"
#include "src/compiler/typer.h"
+#include "src/handles.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -17,8 +18,6 @@ namespace v8 {
namespace internal {
// Forward declarations.
-template <class T>
-class Handle;
class HeapObject;
namespace compiler {
@@ -62,9 +61,10 @@ class GraphTest : public virtual TestWithNativeContext,
Graph* graph() { return &graph_; }
SourcePositionTable* source_positions() { return &source_positions_; }
NodeOriginTable* node_origins() { return &node_origins_; }
- const JSHeapBroker* js_heap_broker() { return &js_heap_broker_; }
+ JSHeapBroker* js_heap_broker() { return &js_heap_broker_; }
private:
+ CanonicalHandleScope canonical_;
CommonOperatorBuilder common_;
Graph graph_;
JSHeapBroker js_heap_broker_;
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index 5897187ae4..4df81d5d59 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -882,7 +882,7 @@ TEST_F(Int64LoweringTest, I64PhiWord32) {
}
TEST_F(Int64LoweringTest, I64ReverseBytes) {
- LowerGraph(graph()->NewNode(machine()->Word64ReverseBytes().placeholder(),
+ LowerGraph(graph()->NewNode(machine()->Word64ReverseBytes(),
Int64Constant(value(0))),
MachineRepresentation::kWord64);
EXPECT_THAT(
diff --git a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
index 1f5e666eb2..53e3b48762 100644
--- a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
@@ -24,7 +24,7 @@ class JSCallReducerTest : public TypedGraphTest {
: TypedGraphTest(3),
javascript_(zone()),
deps_(isolate(), zone()),
- js_heap_broker(isolate()) {}
+ js_heap_broker(isolate(), zone()) {}
~JSCallReducerTest() override {}
protected:
diff --git a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
index 8cf5bd3236..5c49468991 100644
--- a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
@@ -410,9 +410,10 @@ TEST_F(LoadEliminationTest, LoadFieldWithTypeMismatch) {
Node* load = graph()->NewNode(simplified()->LoadField(access), object, effect,
control);
+ EXPECT_CALL(editor, ReplaceWithValue(load, IsTypeGuard(value, _), _, _));
Reduction r = load_elimination.Reduce(load);
ASSERT_TRUE(r.Changed());
- EXPECT_EQ(load, r.replacement());
+ EXPECT_THAT(r.replacement(), IsTypeGuard(value, _));
}
TEST_F(LoadEliminationTest, LoadElementWithTypeMismatch) {
diff --git a/deps/v8/test/unittests/compiler/mips/OWNERS b/deps/v8/test/unittests/compiler/mips/OWNERS
index 4ce9d7f91d..8bbcab4c2d 100644
--- a/deps/v8/test/unittests/compiler/mips/OWNERS
+++ b/deps/v8/test/unittests/compiler/mips/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com \ No newline at end of file
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com \ No newline at end of file
diff --git a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
index 34faec9690..15f5de7b2f 100644
--- a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -411,33 +411,35 @@ TEST_F(InstructionSelectorTest, Word32ShlWithWord32And) {
}
TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
- {
- StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
- Node* const p0 = m.Parameter(0);
- Node* const r =
- m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)), m.Int32Constant(24));
- m.Return(r);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMipsSeb, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
- }
- {
- StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
- Node* const p0 = m.Parameter(0);
- Node* const r =
- m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)), m.Int32Constant(16));
- m.Return(r);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMipsSeh, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r = m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)),
+ m.Int32Constant(24));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsSeb, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r = m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)),
+ m.Int32Constant(16));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsSeh, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
}
}
diff --git a/deps/v8/test/unittests/compiler/mips64/OWNERS b/deps/v8/test/unittests/compiler/mips64/OWNERS
index 4ce9d7f91d..8bbcab4c2d 100644
--- a/deps/v8/test/unittests/compiler/mips64/OWNERS
+++ b/deps/v8/test/unittests/compiler/mips64/OWNERS
@@ -1,3 +1,2 @@
-ivica.bogosavljevic@mips.com
-Miran.Karic@mips.com
-sreten.kovacevic@mips.com \ No newline at end of file
+ibogosavljevic@wavecomp.com
+skovacevic@wavecomp.com \ No newline at end of file
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index f0e463265e..7913d6398c 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -29,7 +29,7 @@ class SimplifiedOperatorReducerTest : public GraphTest {
protected:
Reduction Reduce(Node* node) {
- JSHeapBroker js_heap_broker(isolate());
+ JSHeapBroker js_heap_broker(isolate(), zone());
MachineOperatorBuilder machine(zone());
JSOperatorBuilder javascript(zone());
JSGraph jsgraph(isolate(), graph(), common(), &javascript, simplified(),
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index d1283a8ad1..53459c314a 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -22,7 +22,7 @@ class TyperTest : public TypedGraphTest {
public:
TyperTest()
: TypedGraphTest(3),
- js_heap_broker_(isolate()),
+ js_heap_broker_(isolate(), zone()),
operation_typer_(isolate(), &js_heap_broker_, zone()),
types_(zone(), isolate(), random_number_generator()),
javascript_(zone()),
diff --git a/deps/v8/test/unittests/counters-unittest.cc b/deps/v8/test/unittests/counters-unittest.cc
index d4772934d6..d137d68ee9 100644
--- a/deps/v8/test/unittests/counters-unittest.cc
+++ b/deps/v8/test/unittests/counters-unittest.cc
@@ -4,6 +4,7 @@
#include <vector>
+#include "src/api-inl.h"
#include "src/base/atomic-utils.h"
#include "src/base/platform/time.h"
#include "src/counters-inl.h"
@@ -570,7 +571,7 @@ TEST_F(RuntimeCallStatsTest, BasicJavaScript) {
{
NativeTimeScope native_timer_scope;
- RunJS("function f() { return 1; }");
+ RunJS("function f() { return 1; };");
}
EXPECT_EQ(1, counter->count());
int64_t time = counter->time().InMicroseconds();
@@ -578,7 +579,7 @@ TEST_F(RuntimeCallStatsTest, BasicJavaScript) {
{
NativeTimeScope native_timer_scope;
- RunJS("f()");
+ RunJS("f();");
}
EXPECT_EQ(2, counter->count());
EXPECT_LE(time, counter->time().InMicroseconds());
@@ -587,38 +588,43 @@ TEST_F(RuntimeCallStatsTest, BasicJavaScript) {
TEST_F(RuntimeCallStatsTest, FunctionLengthGetter) {
RuntimeCallCounter* getter_counter =
stats()->GetCounter(RuntimeCallCounterId::kFunctionLengthGetter);
- RuntimeCallCounter* js_counter =
- stats()->GetCounter(RuntimeCallCounterId::kJS_Execution);
EXPECT_EQ(0, getter_counter->count());
- EXPECT_EQ(0, js_counter->count());
+ EXPECT_EQ(0, js_counter()->count());
EXPECT_EQ(0, getter_counter->time().InMicroseconds());
- EXPECT_EQ(0, js_counter->time().InMicroseconds());
+ EXPECT_EQ(0, js_counter()->time().InMicroseconds());
{
NativeTimeScope native_timer_scope;
- RunJS("function f(array) { return array.length; }");
+ RunJS("function f(array) { return array.length; };");
}
EXPECT_EQ(0, getter_counter->count());
- EXPECT_EQ(1, js_counter->count());
+ EXPECT_EQ(1, js_counter()->count());
EXPECT_EQ(0, getter_counter->time().InMicroseconds());
- int64_t js_time = js_counter->time().InMicroseconds();
+ int64_t js_time = js_counter()->time().InMicroseconds();
EXPECT_LT(0, js_time);
{
NativeTimeScope native_timer_scope;
- RunJS("f.length");
+ RunJS("f.length;");
}
EXPECT_EQ(1, getter_counter->count());
- EXPECT_EQ(2, js_counter->count());
+ EXPECT_EQ(2, js_counter()->count());
EXPECT_LE(0, getter_counter->time().InMicroseconds());
- EXPECT_LE(js_time, js_counter->time().InMicroseconds());
+ EXPECT_LE(js_time, js_counter()->time().InMicroseconds());
{
NativeTimeScope native_timer_scope;
- RunJS("for (let i = 0; i < 50; i++) { f.length }");
+ RunJS("for (let i = 0; i < 50; i++) { f.length };");
}
EXPECT_EQ(51, getter_counter->count());
- EXPECT_EQ(3, js_counter->count());
+ EXPECT_EQ(3, js_counter()->count());
+
+ {
+ NativeTimeScope native_timer_scope;
+ RunJS("for (let i = 0; i < 1000; i++) { f.length; };");
+ }
+ EXPECT_EQ(1051, getter_counter->count());
+ EXPECT_EQ(4, js_counter()->count());
}
namespace {
@@ -631,7 +637,10 @@ static void CustomCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
}
} // namespace
-TEST_F(RuntimeCallStatsTest, CustomCallback) {
+TEST_F(RuntimeCallStatsTest, CallbackFunction) {
+ RuntimeCallCounter* callback_counter =
+ stats()->GetCounter(RuntimeCallCounterId::kFunctionCallback);
+
current_test = this;
// Set up a function template with a custom callback.
v8::Isolate* isolate = v8_isolate();
@@ -645,9 +654,9 @@ TEST_F(RuntimeCallStatsTest, CustomCallback) {
object_template->NewInstance(v8_context()).ToLocalChecked();
SetGlobalProperty("custom_object", object);
- // TODO(cbruni): Check api accessor timer (one above the custom callback).
EXPECT_EQ(0, js_counter()->count());
EXPECT_EQ(0, counter()->count());
+ EXPECT_EQ(0, callback_counter->count());
EXPECT_EQ(0, counter2()->count());
{
RuntimeCallTimerScope scope(stats(), counter_id());
@@ -655,29 +664,105 @@ TEST_F(RuntimeCallStatsTest, CustomCallback) {
RunJS("custom_object.callback();");
}
EXPECT_EQ(1, js_counter()->count());
+ EXPECT_EQ(1, counter()->count());
+ EXPECT_EQ(1, callback_counter->count());
+ EXPECT_EQ(1, counter2()->count());
// Given that no native timers are used, only the two scopes explitly
// mentioned above will track the time.
EXPECT_EQ(0, js_counter()->time().InMicroseconds());
- EXPECT_EQ(1, counter()->count());
+ EXPECT_EQ(0, callback_counter->time().InMicroseconds());
EXPECT_EQ(100, counter()->time().InMicroseconds());
- EXPECT_EQ(1, counter2()->count());
EXPECT_EQ(kCustomCallbackTime, counter2()->time().InMicroseconds());
- RunJS("for (let i = 0; i < 9; i++) { custom_object.callback() };");
+ RunJS("for (let i = 0; i < 9; i++) { custom_object.callback(); };");
EXPECT_EQ(2, js_counter()->count());
- EXPECT_EQ(0, js_counter()->time().InMicroseconds());
EXPECT_EQ(1, counter()->count());
- EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(10, callback_counter->count());
EXPECT_EQ(10, counter2()->count());
+ EXPECT_EQ(0, js_counter()->time().InMicroseconds());
+ EXPECT_EQ(0, callback_counter->time().InMicroseconds());
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
EXPECT_EQ(kCustomCallbackTime * 10, counter2()->time().InMicroseconds());
- RunJS("for (let i = 0; i < 4000; i++) { custom_object.callback() };");
+ RunJS("for (let i = 0; i < 4000; i++) { custom_object.callback(); };");
EXPECT_EQ(3, js_counter()->count());
+ EXPECT_EQ(1, counter()->count());
+ EXPECT_EQ(4010, callback_counter->count());
+ EXPECT_EQ(4010, counter2()->count());
EXPECT_EQ(0, js_counter()->time().InMicroseconds());
+ EXPECT_EQ(0, callback_counter->time().InMicroseconds());
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(kCustomCallbackTime * 4010, counter2()->time().InMicroseconds());
+}
+
+TEST_F(RuntimeCallStatsTest, ApiGetter) {
+ RuntimeCallCounter* callback_counter =
+ stats()->GetCounter(RuntimeCallCounterId::kFunctionCallback);
+ current_test = this;
+ // Set up a function template with an api accessor.
+ v8::Isolate* isolate = v8_isolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::ObjectTemplate> object_template =
+ v8::ObjectTemplate::New(isolate);
+ object_template->SetAccessorProperty(
+ NewString("apiGetter"),
+ v8::FunctionTemplate::New(isolate, CustomCallback));
+ v8::Local<v8::Object> object =
+ object_template->NewInstance(v8_context()).ToLocalChecked();
+ SetGlobalProperty("custom_object", object);
+
+ // TODO(cbruni): Check api accessor timer (one above the custom callback).
+ EXPECT_EQ(0, js_counter()->count());
+ EXPECT_EQ(0, counter()->count());
+ EXPECT_EQ(0, callback_counter->count());
+ EXPECT_EQ(0, counter2()->count());
+
+ {
+ RuntimeCallTimerScope scope(stats(), counter_id());
+ Sleep(100);
+ RunJS("custom_object.apiGetter;");
+ }
+ PrintStats();
+
+ EXPECT_EQ(1, js_counter()->count());
+ EXPECT_EQ(1, counter()->count());
+ EXPECT_EQ(1, callback_counter->count());
+ EXPECT_EQ(1, counter2()->count());
+ // Given that no native timers are used, only the two scopes explitly
+ // mentioned above will track the time.
+ EXPECT_EQ(0, js_counter()->time().InMicroseconds());
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(0, callback_counter->time().InMicroseconds());
+ EXPECT_EQ(kCustomCallbackTime, counter2()->time().InMicroseconds());
+
+ RunJS("for (let i = 0; i < 9; i++) { custom_object.apiGetter };");
+ PrintStats();
+
+ EXPECT_EQ(2, js_counter()->count());
EXPECT_EQ(1, counter()->count());
+ EXPECT_EQ(10, callback_counter->count());
+ EXPECT_EQ(10, counter2()->count());
+
+ EXPECT_EQ(0, js_counter()->time().InMicroseconds());
EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(0, callback_counter->time().InMicroseconds());
+ EXPECT_EQ(kCustomCallbackTime * 10, counter2()->time().InMicroseconds());
+
+ RunJS("for (let i = 0; i < 4000; i++) { custom_object.apiGetter };");
+ PrintStats();
+
+ EXPECT_EQ(3, js_counter()->count());
+ EXPECT_EQ(1, counter()->count());
+ EXPECT_EQ(4010, callback_counter->count());
EXPECT_EQ(4010, counter2()->count());
+
+ EXPECT_EQ(0, js_counter()->time().InMicroseconds());
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(0, callback_counter->time().InMicroseconds());
EXPECT_EQ(kCustomCallbackTime * 4010, counter2()->time().InMicroseconds());
+
+ PrintStats();
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
index e07fa1b327..ac2cb3e2ee 100644
--- a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
+++ b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/heap/embedder-tracing.h"
+#include "src/heap/heap.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -33,12 +34,11 @@ class MockEmbedderHeapTracer : public EmbedderHeapTracer {
MOCK_METHOD0(TracePrologue, void());
MOCK_METHOD0(TraceEpilogue, void());
MOCK_METHOD0(AbortTracing, void());
- MOCK_METHOD0(EnterFinalPause, void());
+ MOCK_METHOD1(EnterFinalPause, void(EmbedderHeapTracer::EmbedderStackState));
MOCK_METHOD0(IsTracingDone, bool());
MOCK_METHOD1(RegisterV8References,
void(const std::vector<std::pair<void*, void*> >&));
- MOCK_METHOD2(AdvanceTracing,
- bool(double deadline_in_ms, AdvanceTracingActions actions));
+ MOCK_METHOD1(AdvanceTracing, bool(double deadline_in_ms));
};
TEST(LocalEmbedderHeapTracer, InUse) {
@@ -55,10 +55,8 @@ TEST(LocalEmbedderHeapTracer, NoRemoteTracer) {
EXPECT_FALSE(local_tracer.InUse());
local_tracer.TracePrologue();
local_tracer.EnterFinalPause();
- bool more_work = local_tracer.Trace(
- 0, EmbedderHeapTracer::AdvanceTracingActions(
- EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
- EXPECT_FALSE(more_work);
+ bool done = local_tracer.Trace(std::numeric_limits<double>::infinity());
+ EXPECT_TRUE(done);
local_tracer.TraceEpilogue();
}
@@ -100,7 +98,38 @@ TEST(LocalEmbedderHeapTracer, EnterFinalPauseForwards) {
StrictMock<MockEmbedderHeapTracer> remote_tracer;
LocalEmbedderHeapTracer local_tracer(nullptr);
local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_CALL(remote_tracer, EnterFinalPause());
+ EXPECT_CALL(remote_tracer, EnterFinalPause(_));
+ local_tracer.EnterFinalPause();
+}
+
+TEST(LocalEmbedderHeapTracer, EnterFinalPauseDefaultStackStateUnkown) {
+ StrictMock<MockEmbedderHeapTracer> remote_tracer;
+ LocalEmbedderHeapTracer local_tracer(nullptr);
+ local_tracer.SetRemoteTracer(&remote_tracer);
+ // The default stack state is expected to be unkown.
+ EXPECT_CALL(remote_tracer, EnterFinalPause(EmbedderHeapTracer::kUnknown));
+ local_tracer.EnterFinalPause();
+}
+
+TEST(LocalEmbedderHeapTracer, EnterFinalPauseStackStateIsForwarded) {
+ StrictMock<MockEmbedderHeapTracer> remote_tracer;
+ LocalEmbedderHeapTracer local_tracer(nullptr);
+ local_tracer.SetRemoteTracer(&remote_tracer);
+ local_tracer.SetEmbedderStackStateForNextFinalization(
+ EmbedderHeapTracer::kEmpty);
+ EXPECT_CALL(remote_tracer, EnterFinalPause(EmbedderHeapTracer::kEmpty));
+ local_tracer.EnterFinalPause();
+}
+
+TEST(LocalEmbedderHeapTracer, EnterFinalPauseStackStateResets) {
+ StrictMock<MockEmbedderHeapTracer> remote_tracer;
+ LocalEmbedderHeapTracer local_tracer(nullptr);
+ local_tracer.SetRemoteTracer(&remote_tracer);
+ local_tracer.SetEmbedderStackStateForNextFinalization(
+ EmbedderHeapTracer::kEmpty);
+ EXPECT_CALL(remote_tracer, EnterFinalPause(EmbedderHeapTracer::kEmpty));
+ local_tracer.EnterFinalPause();
+ EXPECT_CALL(remote_tracer, EnterFinalPause(EmbedderHeapTracer::kUnknown));
local_tracer.EnterFinalPause();
}
@@ -140,10 +169,8 @@ TEST(LocalEmbedderHeapTracer, TraceFinishes) {
EXPECT_EQ(1u, local_tracer.NumberOfCachedWrappersToTrace());
EXPECT_CALL(remote_tracer, RegisterV8References(_));
local_tracer.RegisterWrappersWithRemoteTracer();
- EXPECT_CALL(remote_tracer, AdvanceTracing(0, _)).WillOnce(Return(false));
- EXPECT_FALSE(local_tracer.Trace(
- 0, EmbedderHeapTracer::AdvanceTracingActions(
- EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION)));
+ EXPECT_CALL(remote_tracer, AdvanceTracing(_)).WillOnce(Return(true));
+ EXPECT_TRUE(local_tracer.Trace(std::numeric_limits<double>::infinity()));
EXPECT_EQ(0u, local_tracer.NumberOfCachedWrappersToTrace());
}
@@ -155,10 +182,8 @@ TEST(LocalEmbedderHeapTracer, TraceDoesNotFinish) {
EXPECT_EQ(1u, local_tracer.NumberOfCachedWrappersToTrace());
EXPECT_CALL(remote_tracer, RegisterV8References(_));
local_tracer.RegisterWrappersWithRemoteTracer();
- EXPECT_CALL(remote_tracer, AdvanceTracing(0, _)).WillOnce(Return(true));
- EXPECT_TRUE(local_tracer.Trace(
- 0, EmbedderHeapTracer::AdvanceTracingActions(
- EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION)));
+ EXPECT_CALL(remote_tracer, AdvanceTracing(_)).WillOnce(Return(false));
+ EXPECT_FALSE(local_tracer.Trace(1.0));
EXPECT_EQ(0u, local_tracer.NumberOfCachedWrappersToTrace());
}
diff --git a/deps/v8/test/unittests/heap/heap-controller-unittest.cc b/deps/v8/test/unittests/heap/heap-controller-unittest.cc
index dc75820f64..b2446afa84 100644
--- a/deps/v8/test/unittests/heap/heap-controller-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-controller-unittest.cc
@@ -32,80 +32,82 @@ void CheckEqualRounded(double expected, double actual) {
EXPECT_DOUBLE_EQ(expected, actual);
}
-TEST(HeapController, HeapGrowingFactor) {
- CheckEqualRounded(HeapController::kMaxHeapGrowingFactor,
- HeapController::HeapGrowingFactor(34, 1, 4.0));
- CheckEqualRounded(3.553, HeapController::HeapGrowingFactor(45, 1, 4.0));
- CheckEqualRounded(2.830, HeapController::HeapGrowingFactor(50, 1, 4.0));
- CheckEqualRounded(1.478, HeapController::HeapGrowingFactor(100, 1, 4.0));
- CheckEqualRounded(1.193, HeapController::HeapGrowingFactor(200, 1, 4.0));
- CheckEqualRounded(1.121, HeapController::HeapGrowingFactor(300, 1, 4.0));
- CheckEqualRounded(HeapController::HeapGrowingFactor(300, 1, 4.0),
- HeapController::HeapGrowingFactor(600, 2, 4.0));
- CheckEqualRounded(HeapController::kMinHeapGrowingFactor,
- HeapController::HeapGrowingFactor(400, 1, 4.0));
+TEST_F(HeapControllerTest, HeapGrowingFactor) {
+ HeapController heap_controller(i_isolate()->heap());
+ double min_factor = heap_controller.kMinGrowingFactor;
+ double max_factor = heap_controller.kMaxGrowingFactor;
+
+ CheckEqualRounded(max_factor, heap_controller.GrowingFactor(34, 1, 4.0));
+ CheckEqualRounded(3.553, heap_controller.GrowingFactor(45, 1, 4.0));
+ CheckEqualRounded(2.830, heap_controller.GrowingFactor(50, 1, 4.0));
+ CheckEqualRounded(1.478, heap_controller.GrowingFactor(100, 1, 4.0));
+ CheckEqualRounded(1.193, heap_controller.GrowingFactor(200, 1, 4.0));
+ CheckEqualRounded(1.121, heap_controller.GrowingFactor(300, 1, 4.0));
+ CheckEqualRounded(heap_controller.GrowingFactor(300, 1, 4.0),
+ heap_controller.GrowingFactor(600, 2, 4.0));
+ CheckEqualRounded(min_factor, heap_controller.GrowingFactor(400, 1, 4.0));
}
-TEST(HeapController, MaxHeapGrowingFactor) {
- CheckEqualRounded(1.3, HeapController::MaxHeapGrowingFactor(
- HeapController::kMinOldGenerationSize * MB));
- CheckEqualRounded(1.600, HeapController::MaxHeapGrowingFactor(
- HeapController::kMaxOldGenerationSize / 2 * MB));
- CheckEqualRounded(1.999, HeapController::MaxHeapGrowingFactor(
- (HeapController::kMaxOldGenerationSize -
- Heap::kPointerMultiplier) *
- MB));
+TEST_F(HeapControllerTest, MaxHeapGrowingFactor) {
+ HeapController heap_controller(i_isolate()->heap());
CheckEqualRounded(
- 4.0,
- HeapController::MaxHeapGrowingFactor(
- static_cast<size_t>(HeapController::kMaxOldGenerationSize) * MB));
+ 1.3, heap_controller.MaxGrowingFactor(heap_controller.kMinSize * MB));
+ CheckEqualRounded(1.600, heap_controller.MaxGrowingFactor(
+ heap_controller.kMaxSize / 2 * MB));
+ CheckEqualRounded(
+ 1.999, heap_controller.MaxGrowingFactor(
+ (heap_controller.kMaxSize - Heap::kPointerMultiplier) * MB));
+ CheckEqualRounded(4.0,
+ heap_controller.MaxGrowingFactor(
+ static_cast<size_t>(heap_controller.kMaxSize) * MB));
}
TEST_F(HeapControllerTest, OldGenerationAllocationLimit) {
Heap* heap = i_isolate()->heap();
+ HeapController heap_controller(heap);
size_t old_gen_size = 128 * MB;
size_t max_old_generation_size = 512 * MB;
double gc_speed = 100;
double mutator_speed = 1;
size_t new_space_capacity = 16 * MB;
- double max_factor =
- HeapController::MaxHeapGrowingFactor(max_old_generation_size);
+ double max_factor = heap_controller.MaxGrowingFactor(max_old_generation_size);
double factor =
- HeapController::HeapGrowingFactor(gc_speed, mutator_speed, max_factor);
+ heap_controller.GrowingFactor(gc_speed, mutator_speed, max_factor);
EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
- heap->heap_controller()->CalculateOldGenerationAllocationLimit(
+ heap->heap_controller()->CalculateAllocationLimit(
old_gen_size, max_old_generation_size, gc_speed, mutator_speed,
new_space_capacity, Heap::HeapGrowingMode::kDefault));
- factor = Min(factor, HeapController::kConservativeHeapGrowingFactor);
+ factor = Min(factor, heap_controller.kConservativeGrowingFactor);
EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
- heap->heap_controller()->CalculateOldGenerationAllocationLimit(
+ heap->heap_controller()->CalculateAllocationLimit(
old_gen_size, max_old_generation_size, gc_speed, mutator_speed,
new_space_capacity, Heap::HeapGrowingMode::kSlow));
- factor = Min(factor, HeapController::kConservativeHeapGrowingFactor);
+ factor = Min(factor, heap_controller.kConservativeGrowingFactor);
EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
- heap->heap_controller()->CalculateOldGenerationAllocationLimit(
+ heap->heap_controller()->CalculateAllocationLimit(
old_gen_size, max_old_generation_size, gc_speed, mutator_speed,
new_space_capacity, Heap::HeapGrowingMode::kConservative));
- factor = HeapController::kMinHeapGrowingFactor;
+ factor = heap_controller.kMinGrowingFactor;
EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
- heap->heap_controller()->CalculateOldGenerationAllocationLimit(
+ heap->heap_controller()->CalculateAllocationLimit(
old_gen_size, max_old_generation_size, gc_speed, mutator_speed,
new_space_capacity, Heap::HeapGrowingMode::kMinimal));
}
-TEST(HeapController, MaxOldGenerationSize) {
+TEST_F(HeapControllerTest, MaxOldGenerationSize) {
+ HeapController heap_controller(i_isolate()->heap());
uint64_t configurations[][2] = {
- {0, HeapController::kMinOldGenerationSize},
- {512, HeapController::kMinOldGenerationSize},
+ {0, heap_controller.kMinSize},
+ {512, heap_controller.kMinSize},
{1 * GB, 256 * Heap::kPointerMultiplier},
{2 * static_cast<uint64_t>(GB), 512 * Heap::kPointerMultiplier},
- {4 * static_cast<uint64_t>(GB), HeapController::kMaxOldGenerationSize},
- {8 * static_cast<uint64_t>(GB), HeapController::kMaxOldGenerationSize}};
+ {4 * static_cast<uint64_t>(GB), heap_controller.kMaxSize},
+ {8 * static_cast<uint64_t>(GB), heap_controller.kMaxSize}};
for (auto configuration : configurations) {
ASSERT_EQ(configuration[1],
diff --git a/deps/v8/test/unittests/heap/spaces-unittest.cc b/deps/v8/test/unittests/heap/spaces-unittest.cc
index 13c0297489..d81b7e1413 100644
--- a/deps/v8/test/unittests/heap/spaces-unittest.cc
+++ b/deps/v8/test/unittests/heap/spaces-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/heap/heap-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/spaces-inl.h"
#include "src/isolate.h"
#include "test/unittests/test-utils.h"
@@ -51,6 +52,69 @@ TEST_F(SpacesTest, CompactionSpaceMerge) {
delete compaction_space;
}
+TEST_F(SpacesTest, WriteBarrierFromHeapObject) {
+ constexpr Address address1 = Page::kPageSize;
+ HeapObject* object1 = reinterpret_cast<HeapObject*>(address1);
+ MemoryChunk* chunk1 = MemoryChunk::FromHeapObject(object1);
+ heap_internals::MemoryChunk* slim_chunk1 =
+ heap_internals::MemoryChunk::FromHeapObject(object1);
+ EXPECT_EQ(static_cast<void*>(chunk1), static_cast<void*>(slim_chunk1));
+ constexpr Address address2 = 2 * Page::kPageSize - 1;
+ HeapObject* object2 = reinterpret_cast<HeapObject*>(address2);
+ MemoryChunk* chunk2 = MemoryChunk::FromHeapObject(object2);
+ heap_internals::MemoryChunk* slim_chunk2 =
+ heap_internals::MemoryChunk::FromHeapObject(object2);
+ EXPECT_EQ(static_cast<void*>(chunk2), static_cast<void*>(slim_chunk2));
+}
+
+TEST_F(SpacesTest, WriteBarrierIsMarking) {
+ char memory[256];
+ memset(&memory, 0, sizeof(memory));
+ MemoryChunk* chunk = reinterpret_cast<MemoryChunk*>(&memory);
+ heap_internals::MemoryChunk* slim_chunk =
+ reinterpret_cast<heap_internals::MemoryChunk*>(&memory);
+ EXPECT_FALSE(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING));
+ EXPECT_FALSE(slim_chunk->IsMarking());
+ chunk->SetFlag(MemoryChunk::INCREMENTAL_MARKING);
+ EXPECT_TRUE(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING));
+ EXPECT_TRUE(slim_chunk->IsMarking());
+ chunk->ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
+ EXPECT_FALSE(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING));
+ EXPECT_FALSE(slim_chunk->IsMarking());
+}
+
+TEST_F(SpacesTest, WriteBarrierInNewSpaceToSpace) {
+ char memory[256];
+ memset(&memory, 0, sizeof(memory));
+ MemoryChunk* chunk = reinterpret_cast<MemoryChunk*>(&memory);
+ heap_internals::MemoryChunk* slim_chunk =
+ reinterpret_cast<heap_internals::MemoryChunk*>(&memory);
+ EXPECT_FALSE(chunk->InNewSpace());
+ EXPECT_FALSE(slim_chunk->InNewSpace());
+ chunk->SetFlag(MemoryChunk::IN_TO_SPACE);
+ EXPECT_TRUE(chunk->InNewSpace());
+ EXPECT_TRUE(slim_chunk->InNewSpace());
+ chunk->ClearFlag(MemoryChunk::IN_TO_SPACE);
+ EXPECT_FALSE(chunk->InNewSpace());
+ EXPECT_FALSE(slim_chunk->InNewSpace());
+}
+
+TEST_F(SpacesTest, WriteBarrierInNewSpaceFromSpace) {
+ char memory[256];
+ memset(&memory, 0, sizeof(memory));
+ MemoryChunk* chunk = reinterpret_cast<MemoryChunk*>(&memory);
+ heap_internals::MemoryChunk* slim_chunk =
+ reinterpret_cast<heap_internals::MemoryChunk*>(&memory);
+ EXPECT_FALSE(chunk->InNewSpace());
+ EXPECT_FALSE(slim_chunk->InNewSpace());
+ chunk->SetFlag(MemoryChunk::IN_FROM_SPACE);
+ EXPECT_TRUE(chunk->InNewSpace());
+ EXPECT_TRUE(slim_chunk->InNewSpace());
+ chunk->ClearFlag(MemoryChunk::IN_FROM_SPACE);
+ EXPECT_FALSE(chunk->InNewSpace());
+ EXPECT_FALSE(slim_chunk->InNewSpace());
+}
+
TEST_F(SpacesTest, CodeRangeAddressReuse) {
CodeRangeAddressHint hint;
// Create code ranges.
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 1ae636eceb..5030d3897d 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -376,7 +376,8 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CreateArrayLiteral(0, 0, 0)
.CreateEmptyArrayLiteral(0)
.CreateObjectLiteral(0, 0, 0, reg)
- .CreateEmptyObjectLiteral();
+ .CreateEmptyObjectLiteral()
+ .CloneObject(reg, 0, 0);
// Emit load and store operations for module variables.
builder.LoadModuleVariable(-1, 42)
diff --git a/deps/v8/test/unittests/object-unittest.cc b/deps/v8/test/unittests/object-unittest.cc
index 0b603298ad..ad8d631961 100644
--- a/deps/v8/test/unittests/object-unittest.cc
+++ b/deps/v8/test/unittests/object-unittest.cc
@@ -6,6 +6,7 @@
#include <iostream>
#include <limits>
+#include "src/api-inl.h"
#include "src/compiler.h"
#include "src/objects-inl.h"
#include "src/objects.h"
diff --git a/deps/v8/test/unittests/parser/preparser-unittest.cc b/deps/v8/test/unittests/parser/preparser-unittest.cc
index a9f77b4b7a..f20fbb2cee 100644
--- a/deps/v8/test/unittests/parser/preparser-unittest.cc
+++ b/deps/v8/test/unittests/parser/preparser-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/objects-inl.h"
#include "test/unittests/test-helpers.h"
#include "test/unittests/test-utils.h"
diff --git a/deps/v8/test/unittests/test-helpers.cc b/deps/v8/test/unittests/test-helpers.cc
index ee601da900..c771906dc2 100644
--- a/deps/v8/test/unittests/test-helpers.cc
+++ b/deps/v8/test/unittests/test-helpers.cc
@@ -42,7 +42,8 @@ Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
// Ensure that the function can be compiled lazily.
shared->set_uncompiled_data(
*isolate->factory()->NewUncompiledDataWithoutPreParsedScope(
- 0, source->length(), function_literal_id));
+ ReadOnlyRoots(isolate).empty_string_handle(), 0, source->length(),
+ function_literal_id));
// Make sure we have an outer scope info, even though it's empty
shared->set_raw_outer_scope_info_or_feedback_metadata(
ScopeInfo::Empty(isolate));
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index d19c337239..2b099e0ea5 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -6,7 +6,7 @@
#include "include/libplatform/libplatform.h"
#include "include/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/base/platform/time.h"
#include "src/flags.h"
#include "src/isolate.h"
@@ -67,17 +67,19 @@ Local<Value> TestWithIsolate::RunJS(const char* source) {
TestWithContext::TestWithContext()
: context_(Context::New(isolate())), context_scope_(context_) {}
-
TestWithContext::~TestWithContext() {}
+v8::Local<v8::String> TestWithContext::NewString(const char* string) {
+ return v8::String::NewFromUtf8(v8_isolate(), string,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
+}
+
void TestWithContext::SetGlobalProperty(const char* name,
v8::Local<v8::Value> value) {
- v8::Local<v8::String> property_name =
- v8::String::NewFromUtf8(v8_isolate(), name, v8::NewStringType::kNormal)
- .ToLocalChecked();
CHECK(v8_context()
->Global()
- ->Set(v8_context(), property_name, value)
+ ->Set(v8_context(), NewString(name), value)
.FromJust());
}
@@ -89,6 +91,10 @@ TestWithIsolateAndZone::~TestWithIsolateAndZone() {}
Factory* TestWithIsolate::factory() const { return isolate()->factory(); }
+Handle<Object> TestWithIsolate::RunJSInternal(const char* source) {
+ return Utils::OpenHandle(*::v8::TestWithIsolate::RunJS(source));
+}
+
base::RandomNumberGenerator* TestWithIsolate::random_number_generator() const {
return isolate()->random_number_generator();
}
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index 17a5eb7c21..c361810219 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -8,7 +8,6 @@
#include <vector>
#include "include/v8.h"
-#include "src/api.h"
#include "src/base/macros.h"
#include "src/base/utils/random-number-generator.h"
#include "src/handles.h"
@@ -61,6 +60,7 @@ class TestWithContext : public virtual v8::TestWithIsolate {
const Local<Context>& context() const { return v8_context(); }
const Local<Context>& v8_context() const { return context_; }
+ v8::Local<v8::String> NewString(const char* string);
void SetGlobalProperty(const char* name, v8::Local<v8::Value> value);
private:
@@ -85,10 +85,9 @@ class TestWithIsolate : public virtual ::v8::TestWithIsolate {
Isolate* isolate() const { return i_isolate(); }
template <typename T = Object>
Handle<T> RunJS(const char* source) {
- Handle<Object> result =
- Utils::OpenHandle(*::v8::TestWithIsolate::RunJS(source));
- return Handle<T>::cast(result);
+ return Handle<T>::cast(RunJSInternal(source));
}
+ Handle<Object> RunJSInternal(const char* source);
base::RandomNumberGenerator* random_number_generator() const;
private:
diff --git a/deps/v8/test/unittests/torque/earley-parser-unittest.cc b/deps/v8/test/unittests/torque/earley-parser-unittest.cc
new file mode 100644
index 0000000000..9718a404c9
--- /dev/null
+++ b/deps/v8/test/unittests/torque/earley-parser-unittest.cc
@@ -0,0 +1,84 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/earley-parser.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+namespace {
+
+template <int op(int, int)>
+base::Optional<ParseResult> MakeBinop(ParseResultIterator* child_results) {
+ // Ideally, we would want to use int as a result type here instead of
+ // std::string. This is possible, but requires adding int to the list of
+ // supported ParseResult types in torque-parser.cc. To avoid changing that
+ // code, we use std::string here, which is already used in the Torque parser.
+ auto a = child_results->NextAs<std::string>();
+ auto b = child_results->NextAs<std::string>();
+ return ParseResult{std::to_string(op(std::stoi(a), std::stoi(b)))};
+}
+
+int plus(int a, int b) { return a + b; }
+int minus(int a, int b) { return a - b; }
+int mul(int a, int b) { return a * b; }
+
+} // namespace
+
+struct SimpleArithmeticGrammar : Grammar {
+ static bool MatchWhitespace(InputPosition* pos) {
+ while (MatchChar(std::isspace, pos)) {
+ }
+ return true;
+ }
+
+ static bool MatchInteger(InputPosition* pos) {
+ InputPosition current = *pos;
+ MatchString("-", &current);
+ if (MatchChar(std::isdigit, &current)) {
+ while (MatchChar(std::isdigit, &current)) {
+ }
+ *pos = current;
+ return true;
+ }
+ return false;
+ }
+
+ SimpleArithmeticGrammar() : Grammar(&sum_expression) {
+ SetWhitespace(MatchWhitespace);
+ }
+
+ Symbol integer = {Rule({Pattern(MatchInteger)}, YieldMatchedInput)};
+
+ Symbol atomic_expression = {Rule({&integer}),
+ Rule({Token("("), &sum_expression, Token(")")})};
+
+ Symbol mul_expression = {
+ Rule({&atomic_expression}),
+ Rule({&mul_expression, Token("*"), &atomic_expression}, MakeBinop<mul>)};
+
+ Symbol sum_expression = {
+ Rule({&mul_expression}),
+ Rule({&sum_expression, Token("+"), &mul_expression}, MakeBinop<plus>),
+ Rule({&sum_expression, Token("-"), &mul_expression}, MakeBinop<minus>)};
+};
+
+TEST(EarleyParser, SimpleArithmetic) {
+ SimpleArithmeticGrammar grammar;
+ SourceFileMap::Scope source_file_map;
+ CurrentSourceFile::Scope current_source_file{
+ SourceFileMap::AddSource("dummy_filename")};
+ std::string result1 =
+ grammar.Parse("-5 - 5 + (3 + 5) * 2")->Cast<std::string>();
+ ASSERT_EQ("6", result1);
+ std::string result2 = grammar.Parse("((-1 + (1) * 2 + 3 - 4 * 5 + -6 * 7))")
+ ->Cast<std::string>();
+ ASSERT_EQ("-58", result2);
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/value-serializer-unittest.cc b/deps/v8/test/unittests/value-serializer-unittest.cc
index 92603b588a..77f609052a 100644
--- a/deps/v8/test/unittests/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/value-serializer-unittest.cc
@@ -8,7 +8,7 @@
#include <string>
#include "include/v8.h"
-#include "src/api.h"
+#include "src/api-inl.h"
#include "src/base/build_config.h"
#include "src/objects-inl.h"
#include "src/wasm/wasm-objects.h"
@@ -1689,12 +1689,12 @@ TEST_F(ValueSerializerTest, RoundTripTypedArray) {
// Check that the right type comes out the other side for every kind of typed
// array.
Local<Value> value;
-#define TYPED_ARRAY_ROUND_TRIP_TEST(Type, type, TYPE, ctype, size) \
- value = RoundTripTest("new " #Type "Array(2)"); \
- ASSERT_TRUE(value->Is##Type##Array()); \
- EXPECT_EQ(2u * size, TypedArray::Cast(*value)->ByteLength()); \
- EXPECT_EQ(2u, TypedArray::Cast(*value)->Length()); \
- ExpectScriptTrue("Object.getPrototypeOf(result) === " #Type \
+#define TYPED_ARRAY_ROUND_TRIP_TEST(Type, type, TYPE, ctype) \
+ value = RoundTripTest("new " #Type "Array(2)"); \
+ ASSERT_TRUE(value->Is##Type##Array()); \
+ EXPECT_EQ(2u * sizeof(ctype), TypedArray::Cast(*value)->ByteLength()); \
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length()); \
+ ExpectScriptTrue("Object.getPrototypeOf(result) === " #Type \
"Array.prototype");
TYPED_ARRAYS(TYPED_ARRAY_ROUND_TRIP_TEST)
@@ -2514,7 +2514,8 @@ TEST_F(ValueSerializerTestWithWasm, DefaultSerializationDelegate) {
Local<Message> message = InvalidEncodeTest(MakeWasm());
size_t msg_len = static_cast<size_t>(message->Get()->Length());
std::unique_ptr<char[]> buff(new char[msg_len + 1]);
- message->Get()->WriteOneByte(reinterpret_cast<uint8_t*>(buff.get()));
+ message->Get()->WriteOneByte(isolate(),
+ reinterpret_cast<uint8_t*>(buff.get()));
// the message ends with the custom error string
size_t custom_msg_len = strlen(kUnsupportedSerialization);
ASSERT_GE(msg_len, custom_msg_len);
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index 3b25056160..771c61e237 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -92,6 +92,9 @@ static const WasmOpcode kInt32BinopOpcodes[] = {
class FunctionBodyDecoderTest : public TestWithZone {
public:
typedef std::pair<uint32_t, ValueType> LocalsDecl;
+ // All features are disabled by default and must be activated with
+ // a WASM_FEATURE_SCOPE in individual tests.
+ WasmFeatures enabled_features_;
FunctionBodyDecoderTest() : module(nullptr), local_decls(zone()) {}
@@ -133,8 +136,11 @@ class FunctionBodyDecoderTest : public TestWithZone {
PrepareBytecode(&start, &end, append_end);
// Verify the code.
+ FunctionBody body(sig, 0, start, end);
+ WasmFeatures unused_detected_features;
DecodeResult result =
- VerifyWasmCode(zone()->allocator(), module, sig, start, end);
+ VerifyWasmCode(zone()->allocator(), enabled_features_, module,
+ &unused_detected_features, body);
uint32_t pc = result.error_offset();
std::ostringstream str;
@@ -198,6 +204,17 @@ class FunctionBodyDecoderTest : public TestWithZone {
namespace {
+class EnableBoolScope {
+ public:
+ bool prev_;
+ bool* ptr_;
+ explicit EnableBoolScope(bool* ptr) : prev_(*ptr), ptr_(ptr) { *ptr = true; }
+ ~EnableBoolScope() { *ptr_ = prev_; }
+};
+
+#define WASM_FEATURE_SCOPE(feat) \
+ EnableBoolScope feat##_scope(&this->enabled_features_.feat);
+
constexpr size_t kMaxByteSizedLeb128 = 127;
// A helper for tests that require a module environment for functions,
@@ -263,7 +280,7 @@ TEST_F(FunctionBodyDecoderTest, Int32Const1) {
}
TEST_F(FunctionBodyDecoderTest, RefNull) {
- FlagScope<bool> flag_scope(&FLAG_experimental_wasm_anyref, true);
+ WASM_FEATURE_SCOPE(anyref);
byte code[] = {kExprRefNull};
EXPECT_VERIFIES_C(r_v, code);
}
@@ -1236,8 +1253,8 @@ TEST_F(FunctionBodyDecoderTest, MacrosInt64) {
}
TEST_F(FunctionBodyDecoderTest, AllSimpleExpressions) {
- EXPERIMENTAL_FLAG_SCOPE(se);
- EXPERIMENTAL_FLAG_SCOPE(anyref);
+ WASM_FEATURE_SCOPE(se);
+ WASM_FEATURE_SCOPE(anyref);
// Test all simple expressions which are described by a signature.
#define DECODE_TEST(name, opcode, sig) \
{ \
@@ -1476,7 +1493,7 @@ TEST_F(FunctionBodyDecoderTest, CallsWithMismatchedSigs3) {
}
TEST_F(FunctionBodyDecoderTest, MultiReturn) {
- EXPERIMENTAL_FLAG_SCOPE(mv);
+ WASM_FEATURE_SCOPE(mv);
ValueType storage[] = {kWasmI32, kWasmI32};
FunctionSig sig_ii_v(2, 0, storage);
FunctionSig sig_v_ii(0, 2, storage);
@@ -1492,7 +1509,7 @@ TEST_F(FunctionBodyDecoderTest, MultiReturn) {
}
TEST_F(FunctionBodyDecoderTest, MultiReturnType) {
- EXPERIMENTAL_FLAG_SCOPE(mv);
+ WASM_FEATURE_SCOPE(mv);
for (size_t a = 0; a < arraysize(kValueTypes); a++) {
for (size_t b = 0; b < arraysize(kValueTypes); b++) {
for (size_t c = 0; c < arraysize(kValueTypes); c++) {
@@ -1612,7 +1629,7 @@ TEST_F(FunctionBodyDecoderTest, IncompleteStore) {
}
TEST_F(FunctionBodyDecoderTest, IncompleteS8x16Shuffle) {
- EXPERIMENTAL_FLAG_SCOPE(simd);
+ WASM_FEATURE_SCOPE(simd);
FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
builder.InitializeMemory();
@@ -2383,7 +2400,7 @@ TEST_F(FunctionBodyDecoderTest, Select_TypeCheck) {
}
TEST_F(FunctionBodyDecoderTest, Throw) {
- EXPERIMENTAL_FLAG_SCOPE(eh);
+ WASM_FEATURE_SCOPE(eh);
TestModuleBuilder builder;
module = builder.module();
@@ -2403,7 +2420,7 @@ TEST_F(FunctionBodyDecoderTest, Throw) {
TEST_F(FunctionBodyDecoderTest, ThrowUnreachable) {
// TODO(titzer): unreachable code after throw should validate.
- EXPERIMENTAL_FLAG_SCOPE(eh);
+ WASM_FEATURE_SCOPE(eh);
TestModuleBuilder builder;
module = builder.module();
@@ -2420,7 +2437,7 @@ TEST_F(FunctionBodyDecoderTest, ThrowUnreachable) {
#define WASM_CATCH(index) kExprCatch, static_cast<byte>(index)
TEST_F(FunctionBodyDecoderTest, TryCatch) {
- EXPERIMENTAL_FLAG_SCOPE(eh);
+ WASM_FEATURE_SCOPE(eh);
TestModuleBuilder builder;
module = builder.module();
@@ -2445,7 +2462,7 @@ TEST_F(FunctionBodyDecoderTest, TryCatch) {
#undef WASM_CATCH
TEST_F(FunctionBodyDecoderTest, MultiValBlock1) {
- EXPERIMENTAL_FLAG_SCOPE(mv);
+ WASM_FEATURE_SCOPE(mv);
TestModuleBuilder builder;
module = builder.module();
byte f0 = builder.AddSignature(sigs.ii_v());
@@ -2461,7 +2478,7 @@ TEST_F(FunctionBodyDecoderTest, MultiValBlock1) {
}
TEST_F(FunctionBodyDecoderTest, MultiValBlock2) {
- EXPERIMENTAL_FLAG_SCOPE(mv);
+ WASM_FEATURE_SCOPE(mv);
TestModuleBuilder builder;
module = builder.module();
byte f0 = builder.AddSignature(sigs.ii_v());
@@ -2479,7 +2496,7 @@ TEST_F(FunctionBodyDecoderTest, MultiValBlock2) {
}
TEST_F(FunctionBodyDecoderTest, MultiValBlockBr) {
- EXPERIMENTAL_FLAG_SCOPE(mv);
+ WASM_FEATURE_SCOPE(mv);
TestModuleBuilder builder;
module = builder.module();
byte f0 = builder.AddSignature(sigs.ii_v());
@@ -2491,7 +2508,7 @@ TEST_F(FunctionBodyDecoderTest, MultiValBlockBr) {
}
TEST_F(FunctionBodyDecoderTest, MultiValLoop1) {
- EXPERIMENTAL_FLAG_SCOPE(mv);
+ WASM_FEATURE_SCOPE(mv);
TestModuleBuilder builder;
module = builder.module();
byte f0 = builder.AddSignature(sigs.ii_v());
@@ -2507,7 +2524,7 @@ TEST_F(FunctionBodyDecoderTest, MultiValLoop1) {
}
TEST_F(FunctionBodyDecoderTest, MultiValIf) {
- EXPERIMENTAL_FLAG_SCOPE(mv);
+ WASM_FEATURE_SCOPE(mv);
TestModuleBuilder builder;
module = builder.module();
byte f0 = builder.AddSignature(sigs.ii_v());
@@ -2570,7 +2587,7 @@ TEST_F(FunctionBodyDecoderTest, MultiValIf) {
}
TEST_F(FunctionBodyDecoderTest, BlockParam) {
- EXPERIMENTAL_FLAG_SCOPE(mv);
+ WASM_FEATURE_SCOPE(mv);
TestModuleBuilder builder;
module = builder.module();
byte f1 = builder.AddSignature(sigs.i_i());
@@ -2596,7 +2613,7 @@ TEST_F(FunctionBodyDecoderTest, BlockParam) {
}
TEST_F(FunctionBodyDecoderTest, LoopParam) {
- EXPERIMENTAL_FLAG_SCOPE(mv);
+ WASM_FEATURE_SCOPE(mv);
TestModuleBuilder builder;
module = builder.module();
byte f1 = builder.AddSignature(sigs.i_i());
@@ -2622,7 +2639,7 @@ TEST_F(FunctionBodyDecoderTest, LoopParam) {
}
TEST_F(FunctionBodyDecoderTest, LoopParamBr) {
- EXPERIMENTAL_FLAG_SCOPE(mv);
+ WASM_FEATURE_SCOPE(mv);
TestModuleBuilder builder;
module = builder.module();
byte f1 = builder.AddSignature(sigs.i_i());
@@ -2644,7 +2661,7 @@ TEST_F(FunctionBodyDecoderTest, LoopParamBr) {
}
TEST_F(FunctionBodyDecoderTest, IfParam) {
- EXPERIMENTAL_FLAG_SCOPE(mv);
+ WASM_FEATURE_SCOPE(mv);
TestModuleBuilder builder;
module = builder.module();
byte f1 = builder.AddSignature(sigs.i_i());
@@ -2678,8 +2695,11 @@ TEST_F(FunctionBodyDecoderTest, Regression709741) {
PrepareBytecode(&start, &end, kAppendEnd);
for (const byte* i = start; i < end; i++) {
+ FunctionBody body(sigs.v_v(), 0, start, i);
+ WasmFeatures unused_detected_features;
DecodeResult result =
- VerifyWasmCode(zone()->allocator(), nullptr, sigs.v_v(), start, i);
+ VerifyWasmCode(zone()->allocator(), kAllWasmFeatures, nullptr,
+ &unused_detected_features, body);
if (result.ok()) {
std::ostringstream str;
str << "Expected verification to fail";
@@ -2999,6 +3019,7 @@ typedef ZoneVector<ValueType> TypesOfLocals;
class LocalDeclDecoderTest : public TestWithZone {
public:
v8::internal::AccountingAllocator allocator;
+ WasmFeatures enabled_features_;
size_t ExpectRun(TypesOfLocals map, size_t pos, ValueType expected,
size_t count) {
@@ -3007,6 +3028,11 @@ class LocalDeclDecoderTest : public TestWithZone {
}
return pos;
}
+
+ bool DecodeLocalDecls(BodyLocalDecls* decls, const byte* start,
+ const byte* end) {
+ return i::wasm::DecodeLocalDecls(enabled_features_, decls, start, end);
+ }
};
TEST_F(LocalDeclDecoderTest, EmptyLocals) {
@@ -3024,7 +3050,7 @@ TEST_F(LocalDeclDecoderTest, NoLocals) {
}
TEST_F(LocalDeclDecoderTest, OneLocal) {
- EXPERIMENTAL_FLAG_SCOPE(anyref);
+ WASM_FEATURE_SCOPE(anyref);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueType type = kValueTypes[i];
const byte data[] = {1, 1,
@@ -3040,7 +3066,7 @@ TEST_F(LocalDeclDecoderTest, OneLocal) {
}
TEST_F(LocalDeclDecoderTest, FiveLocals) {
- EXPERIMENTAL_FLAG_SCOPE(anyref);
+ WASM_FEATURE_SCOPE(anyref);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueType type = kValueTypes[i];
const byte data[] = {1, 5,
@@ -3180,6 +3206,7 @@ TEST_F(BytecodeIteratorTest, WithLocalDecls) {
EXPECT_FALSE(iter.has_next());
}
+#undef WASM_FEATURE_SCOPE
#undef B1
#undef B2
#undef B3
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 26b4f74a7a..3507f897f9 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -7,6 +7,7 @@
#include "src/handles.h"
#include "src/objects-inl.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-opcodes.h"
#include "test/common/wasm/flag-utils.h"
@@ -146,6 +147,8 @@ struct ValueTypePair {
class WasmModuleVerifyTest : public TestWithIsolateAndZone {
public:
+ WasmFeatures enabled_features_;
+
ModuleResult DecodeModule(const byte* module_start, const byte* module_end) {
// Add the wasm magic and version number automatically.
size_t size = static_cast<size_t>(module_end - module_start);
@@ -154,18 +157,39 @@ class WasmModuleVerifyTest : public TestWithIsolateAndZone {
auto temp = new byte[total];
memcpy(temp, header, sizeof(header));
memcpy(temp + sizeof(header), module_start, size);
- ModuleResult result =
- SyncDecodeWasmModule(isolate(), temp, temp + total, false, kWasmOrigin);
+ ModuleResult result = DecodeWasmModule(
+ enabled_features_, temp, temp + total, false, kWasmOrigin,
+ isolate()->counters(), isolate()->allocator());
delete[] temp;
return result;
}
ModuleResult DecodeModuleNoHeader(const byte* module_start,
const byte* module_end) {
- return SyncDecodeWasmModule(isolate(), module_start, module_end, false,
- kWasmOrigin);
+ return DecodeWasmModule(enabled_features_, module_start, module_end, false,
+ kWasmOrigin, isolate()->counters(),
+ isolate()->allocator());
+ }
+};
+
+namespace {
+class EnableBoolScope {
+ public:
+ bool prev_;
+ bool* ptr_;
+ explicit EnableBoolScope(bool* ptr, bool val = true)
+ : prev_(*ptr), ptr_(ptr) {
+ *ptr = val;
}
+ ~EnableBoolScope() { *ptr_ = prev_; }
};
+#define WASM_FEATURE_SCOPE(feat) \
+ EnableBoolScope feat##_scope(&this->enabled_features_.feat)
+
+#define WASM_FEATURE_SCOPE_VAL(feat, val) \
+ EnableBoolScope feat##_scope(&this->enabled_features_.feat, val)
+} // namespace
+
TEST_F(WasmModuleVerifyTest, WrongMagic) {
for (uint32_t x = 1; x; x <<= 1) {
const byte data[] = {U32_LE(kWasmMagic ^ x), U32_LE(kWasmVersion)};
@@ -217,7 +241,7 @@ TEST_F(WasmModuleVerifyTest, OneGlobal) {
}
TEST_F(WasmModuleVerifyTest, AnyRefGlobal) {
- EXPERIMENTAL_FLAG_SCOPE(anyref);
+ WASM_FEATURE_SCOPE(anyref);
static const byte data[] = {
SECTION(Global, 5), // --
1,
@@ -243,7 +267,7 @@ TEST_F(WasmModuleVerifyTest, AnyRefGlobal) {
}
TEST_F(WasmModuleVerifyTest, AnyRefGlobalWithGlobalInit) {
- EXPERIMENTAL_FLAG_SCOPE(anyref);
+ WASM_FEATURE_SCOPE(anyref);
static const byte data[] = {
SECTION(Import, 8), // section header
1, // number of imports
@@ -313,6 +337,7 @@ TEST_F(WasmModuleVerifyTest, ZeroGlobals) {
}
TEST_F(WasmModuleVerifyTest, ExportMutableGlobal) {
+ WASM_FEATURE_SCOPE(mut_global);
{
static const byte data[] = {
SECTION(Global, 6), // --
@@ -444,7 +469,7 @@ TEST_F(WasmModuleVerifyTest, ZeroExceptions) {
};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
- EXPERIMENTAL_FLAG_SCOPE(eh);
+ WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
EXPECT_EQ(0u, result.val->exceptions.size());
@@ -458,7 +483,7 @@ TEST_F(WasmModuleVerifyTest, OneI32Exception) {
};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
- EXPERIMENTAL_FLAG_SCOPE(eh);
+ WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
EXPECT_EQ(1u, result.val->exceptions.size());
@@ -476,7 +501,7 @@ TEST_F(WasmModuleVerifyTest, TwoExceptions) {
1, kLocalI32};
FAIL_IF_NO_EXPERIMENTAL_EH(data);
- EXPERIMENTAL_FLAG_SCOPE(eh);
+ WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
EXPECT_EQ(2u, result.val->exceptions.size());
@@ -495,7 +520,7 @@ TEST_F(WasmModuleVerifyTest, Exception_invalid_type) {
FAIL_IF_NO_EXPERIMENTAL_EH(data);
// Should fail decoding exception section.
- EXPERIMENTAL_FLAG_SCOPE(eh);
+ WASM_FEATURE_SCOPE(eh);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_FALSE(result.ok());
}
@@ -939,7 +964,7 @@ TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTables) {
// Test that if we have multiple tables, in the element section we can target
// and initialize all tables.
- FlagScope<bool> flag_scope(&FLAG_experimental_wasm_anyref, true);
+ WASM_FEATURE_SCOPE(anyref);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
@@ -969,7 +994,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTables) {
TEST_F(WasmModuleVerifyTest, ElementSectionMixedTables) {
// Test that if we have multiple tables, both imported and module-defined, in
// the element section we can target and initialize all tables.
- FlagScope<bool> flag_scope(&FLAG_experimental_wasm_anyref, true);
+ WASM_FEATURE_SCOPE(anyref);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
@@ -1026,7 +1051,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTables) {
TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTablesArbitraryOrder) {
// Test that the order in which tables are targeted in the element secion
// can be arbitrary.
- FlagScope<bool> flag_scope(&FLAG_experimental_wasm_anyref, true);
+ WASM_FEATURE_SCOPE(anyref);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
@@ -1060,7 +1085,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTablesArbitraryOrder) {
TEST_F(WasmModuleVerifyTest, ElementSectionMixedTablesArbitraryOrder) {
// Test that the order in which tables are targeted in the element secion can
// be arbitrary. In this test, tables can be both imported and module-defined.
- FlagScope<bool> flag_scope(&FLAG_experimental_wasm_anyref, true);
+ WASM_FEATURE_SCOPE(anyref);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
@@ -1117,7 +1142,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTablesArbitraryOrder) {
TEST_F(WasmModuleVerifyTest, ElementSectionDontInitAnyRefTable) {
// Test that tables of type 'AnyRef' cannot be initialized by the element
// section.
- FlagScope<bool> flag_scope(&FLAG_experimental_wasm_anyref, true);
+ WASM_FEATURE_SCOPE(anyref);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
@@ -1147,7 +1172,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionDontInitAnyRefTable) {
TEST_F(WasmModuleVerifyTest, ElementSectionDontInitAnyRefImportedTable) {
// Test that imported tables of type AnyRef cannot be initialized in the
// elements section.
- FlagScope<bool> flag_scope(&FLAG_experimental_wasm_anyref, true);
+ WASM_FEATURE_SCOPE(anyref);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
@@ -1231,7 +1256,7 @@ TEST_F(WasmModuleVerifyTest, MultipleTablesWithoutFlag) {
}
TEST_F(WasmModuleVerifyTest, MultipleTablesWithFlag) {
- FlagScope<bool> flag_scope(&FLAG_experimental_wasm_anyref, true);
+ WASM_FEATURE_SCOPE(anyref);
static const byte data[] = {
SECTION(Table, 7), // table section
ENTRY_COUNT(2), // 2 tables
@@ -1257,22 +1282,18 @@ TEST_F(WasmModuleVerifyTest, MultipleTablesWithFlag) {
class WasmSignatureDecodeTest : public TestWithZone {
public:
- WasmSignatureDecodeTest()
- // In the following tests we turn on support for AnyRef by default. There
- // is a test (Fail_anyref_without_flag) which explicitly turns off support
- // for AnyRef.
- : flag_scope(&FLAG_experimental_wasm_anyref, true) {}
-
- private:
- FlagScope<bool> flag_scope;
+ WasmFeatures enabled_features_;
+
+ FunctionSig* DecodeSig(const byte* start, const byte* end) {
+ return DecodeWasmSignatureForTesting(enabled_features_, zone(), start, end);
+ }
};
TEST_F(WasmSignatureDecodeTest, Ok_v_v) {
static const byte data[] = {SIG_ENTRY_v_v};
v8::internal::AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
- FunctionSig* sig =
- DecodeWasmSignatureForTesting(&zone, data, data + sizeof(data));
+ FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
EXPECT_EQ(0u, sig->parameter_count());
@@ -1280,11 +1301,11 @@ TEST_F(WasmSignatureDecodeTest, Ok_v_v) {
}
TEST_F(WasmSignatureDecodeTest, Ok_t_v) {
+ WASM_FEATURE_SCOPE(anyref);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueTypePair ret_type = kValueTypes[i];
const byte data[] = {SIG_ENTRY_x(ret_type.code)};
- FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
+ FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
EXPECT_EQ(0u, sig->parameter_count());
@@ -1294,11 +1315,11 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_v) {
}
TEST_F(WasmSignatureDecodeTest, Ok_v_t) {
+ WASM_FEATURE_SCOPE(anyref);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueTypePair param_type = kValueTypes[i];
const byte data[] = {SIG_ENTRY_v_x(param_type.code)};
- FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
+ FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
EXPECT_EQ(1u, sig->parameter_count());
@@ -1308,13 +1329,13 @@ TEST_F(WasmSignatureDecodeTest, Ok_v_t) {
}
TEST_F(WasmSignatureDecodeTest, Ok_t_t) {
+ WASM_FEATURE_SCOPE(anyref);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueTypePair ret_type = kValueTypes[i];
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
ValueTypePair param_type = kValueTypes[j];
const byte data[] = {SIG_ENTRY_x_x(ret_type.code, param_type.code)};
- FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
+ FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
EXPECT_EQ(1u, sig->parameter_count());
@@ -1326,14 +1347,15 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_t) {
}
TEST_F(WasmSignatureDecodeTest, Ok_i_tt) {
+ WASM_FEATURE_SCOPE(anyref);
+ WASM_FEATURE_SCOPE(mv);
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueTypePair p0_type = kValueTypes[i];
for (size_t j = 0; j < arraysize(kValueTypes); j++) {
ValueTypePair p1_type = kValueTypes[j];
const byte data[] = {
SIG_ENTRY_x_xx(kLocalI32, p0_type.code, p1_type.code)};
- FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
+ FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
EXPECT_EQ(2u, sig->parameter_count());
@@ -1344,25 +1366,45 @@ TEST_F(WasmSignatureDecodeTest, Ok_i_tt) {
}
}
+TEST_F(WasmSignatureDecodeTest, Ok_tt_tt) {
+ WASM_FEATURE_SCOPE(anyref);
+ WASM_FEATURE_SCOPE(mv);
+ for (size_t i = 0; i < arraysize(kValueTypes); i++) {
+ ValueTypePair p0_type = kValueTypes[i];
+ for (size_t j = 0; j < arraysize(kValueTypes); j++) {
+ ValueTypePair p1_type = kValueTypes[j];
+ const byte data[] = {SIG_ENTRY_xx_xx(p0_type.code, p1_type.code,
+ p0_type.code, p1_type.code)};
+ FunctionSig* sig = DecodeSig(data, data + sizeof(data));
+
+ EXPECT_TRUE(sig != nullptr);
+ EXPECT_EQ(2u, sig->parameter_count());
+ EXPECT_EQ(2u, sig->return_count());
+ EXPECT_EQ(p0_type.type, sig->GetParam(0));
+ EXPECT_EQ(p1_type.type, sig->GetParam(1));
+ EXPECT_EQ(p0_type.type, sig->GetReturn(0));
+ EXPECT_EQ(p1_type.type, sig->GetReturn(1));
+ }
+ }
+}
+
TEST_F(WasmSignatureDecodeTest, TooManyParams) {
static const byte data[] = {kWasmFunctionTypeCode,
WASM_I32V_3(kV8MaxWasmFunctionParams + 1),
kLocalI32, 0};
- FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
+ FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_FALSE(sig != nullptr);
}
TEST_F(WasmSignatureDecodeTest, TooManyReturns) {
for (int i = 0; i < 2; i++) {
- FlagScope<bool> flag_scope(&FLAG_experimental_wasm_mv, i != 0);
+ bool enable_mv = i != 0;
+ WASM_FEATURE_SCOPE_VAL(mv, enable_mv);
const int max_return_count = static_cast<int>(
- FLAG_experimental_wasm_mv ? kV8MaxWasmFunctionMultiReturns
- : kV8MaxWasmFunctionReturns);
+ enable_mv ? kV8MaxWasmFunctionMultiReturns : kV8MaxWasmFunctionReturns);
byte data[] = {kWasmFunctionTypeCode, 0, WASM_I32V_3(max_return_count + 1),
kLocalI32};
- FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
+ FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
}
}
@@ -1375,7 +1417,7 @@ TEST_F(WasmSignatureDecodeTest, Fail_off_end) {
for (int i = 0; i < p + 1; i++) {
// Should fall off the end for all signatures.
- FunctionSig* sig = DecodeWasmSignatureForTesting(zone(), data, data + i);
+ FunctionSig* sig = DecodeSig(data, data + i);
EXPECT_EQ(nullptr, sig);
}
}
@@ -1383,14 +1425,13 @@ TEST_F(WasmSignatureDecodeTest, Fail_off_end) {
TEST_F(WasmSignatureDecodeTest, Fail_anyref_without_flag) {
// Disable AnyRef support and check that decoding fails.
- FlagScope<bool> flag_scope(&FLAG_experimental_wasm_anyref, false);
+ WASM_FEATURE_SCOPE_VAL(anyref, false);
byte ref_types[] = {kLocalAnyFunc, kLocalAnyRef};
for (byte invalid_type : ref_types) {
for (size_t i = 0; i < SIZEOF_SIG_ENTRY_x_xx; i++) {
byte data[] = {SIG_ENTRY_x_xx(kLocalI32, kLocalI32, kLocalI32)};
data[i] = invalid_type;
- FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
+ FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
}
}
@@ -1401,41 +1442,43 @@ TEST_F(WasmSignatureDecodeTest, Fail_invalid_type) {
for (size_t i = 0; i < SIZEOF_SIG_ENTRY_x_xx; i++) {
byte data[] = {SIG_ENTRY_x_xx(kLocalI32, kLocalI32, kLocalI32)};
data[i] = kInvalidType;
- FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
+ FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
}
}
TEST_F(WasmSignatureDecodeTest, Fail_invalid_ret_type1) {
static const byte data[] = {SIG_ENTRY_x_x(kLocalVoid, kLocalI32)};
- FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
+ FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
}
TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type1) {
static const byte data[] = {SIG_ENTRY_x_x(kLocalI32, kLocalVoid)};
- FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
+ FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
}
TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type2) {
static const byte data[] = {SIG_ENTRY_x_xx(kLocalI32, kLocalI32, kLocalVoid)};
- FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
+ FunctionSig* sig = DecodeSig(data, data + sizeof(data));
EXPECT_EQ(nullptr, sig);
}
class WasmFunctionVerifyTest : public TestWithIsolateAndZone {
public:
- WasmFunctionVerifyTest() {}
- virtual ~WasmFunctionVerifyTest() {}
-
+ WasmFeatures enabled_features_;
WasmModule module;
Vector<const byte> bytes;
- DISALLOW_COPY_AND_ASSIGN(WasmFunctionVerifyTest);
+
+ FunctionResult DecodeWasmFunction(const ModuleWireBytes& wire_bytes,
+ const WasmModule* module,
+ const byte* function_start,
+ const byte* function_end) {
+ return DecodeWasmFunctionForTesting(enabled_features_, zone(), wire_bytes,
+ module, function_start, function_end,
+ isolate()->counters());
+ }
};
TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
@@ -1453,8 +1496,8 @@ TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
kExprEnd // body
};
- FunctionResult result = SyncDecodeWasmFunction(
- isolate(), zone(), bytes, &module, data, data + sizeof(data));
+ FunctionResult result =
+ DecodeWasmFunction(bytes, &module, data, data + sizeof(data));
EXPECT_OK(result);
if (result.val && result.ok()) {
@@ -1583,6 +1626,7 @@ TEST_F(WasmModuleVerifyTest, ImportTable_nosigs1) {
}
TEST_F(WasmModuleVerifyTest, ImportTable_mutable_global) {
+ WASM_FEATURE_SCOPE(mut_global);
{
static const byte data[] = {
SECTION(Import, 8), // section header
@@ -1614,6 +1658,7 @@ TEST_F(WasmModuleVerifyTest, ImportTable_mutable_global) {
}
TEST_F(WasmModuleVerifyTest, ImportTable_mutability_malformed) {
+ WASM_FEATURE_SCOPE(mut_global);
static const byte data[] = {
SECTION(Import, 8),
1, // --
@@ -2021,61 +2066,68 @@ TEST_F(WasmModuleVerifyTest, Regression684855) {
EXPECT_VERIFIES(data);
}
-#define EXPECT_INIT_EXPR(Type, type, value, ...) \
- { \
- static const byte data[] = {__VA_ARGS__, kExprEnd}; \
- WasmInitExpr expr = \
- DecodeWasmInitExprForTesting(data, data + sizeof(data)); \
- EXPECT_EQ(WasmInitExpr::k##Type##Const, expr.kind); \
- EXPECT_EQ(value, expr.val.type##_const); \
+class WasmInitExprDecodeTest : public TestWithZone {
+ public:
+ WasmInitExprDecodeTest() {}
+
+ WasmFeatures enabled_features_;
+
+ WasmInitExpr DecodeInitExpr(const byte* start, const byte* end) {
+ return DecodeWasmInitExprForTesting(enabled_features_, start, end);
+ }
+};
+
+#define EXPECT_INIT_EXPR(Type, type, value, ...) \
+ { \
+ static const byte data[] = {__VA_ARGS__, kExprEnd}; \
+ WasmInitExpr expr = DecodeInitExpr(data, data + sizeof(data)); \
+ EXPECT_EQ(WasmInitExpr::k##Type##Const, expr.kind); \
+ EXPECT_EQ(value, expr.val.type##_const); \
}
-TEST_F(WasmModuleVerifyTest, InitExpr_i32) {
+#define EXPECT_INIT_EXPR_FAIL(...) \
+ { \
+ static const byte data[] = {__VA_ARGS__, kExprEnd}; \
+ WasmInitExpr expr = DecodeInitExpr(data, data + sizeof(data)); \
+ EXPECT_EQ(WasmInitExpr::kNone, expr.kind); \
+ }
+
+TEST_F(WasmInitExprDecodeTest, InitExpr_i32) {
EXPECT_INIT_EXPR(I32, i32, 33, WASM_I32V_1(33));
EXPECT_INIT_EXPR(I32, i32, -21, WASM_I32V_1(-21));
EXPECT_INIT_EXPR(I32, i32, 437, WASM_I32V_2(437));
EXPECT_INIT_EXPR(I32, i32, 77777, WASM_I32V_3(77777));
}
-TEST_F(WasmModuleVerifyTest, InitExpr_f32) {
+TEST_F(WasmInitExprDecodeTest, InitExpr_f32) {
EXPECT_INIT_EXPR(F32, f32, static_cast<float>(13.1), WASM_F32(13.1));
EXPECT_INIT_EXPR(F32, f32, static_cast<float>(-21.1), WASM_F32(-21.1));
EXPECT_INIT_EXPR(F32, f32, static_cast<float>(437.2), WASM_F32(437.2));
EXPECT_INIT_EXPR(F32, f32, static_cast<float>(77777.3), WASM_F32(77777.3));
}
-TEST_F(WasmModuleVerifyTest, InitExpr_i64) {
+TEST_F(WasmInitExprDecodeTest, InitExpr_i64) {
EXPECT_INIT_EXPR(I64, i64, 33, WASM_I64V_1(33));
EXPECT_INIT_EXPR(I64, i64, -21, WASM_I64V_2(-21));
EXPECT_INIT_EXPR(I64, i64, 437, WASM_I64V_5(437));
EXPECT_INIT_EXPR(I64, i64, 77777, WASM_I64V_7(77777));
}
-TEST_F(WasmModuleVerifyTest, InitExpr_f64) {
+TEST_F(WasmInitExprDecodeTest, InitExpr_f64) {
EXPECT_INIT_EXPR(F64, f64, 83.22, WASM_F64(83.22));
EXPECT_INIT_EXPR(F64, f64, -771.3, WASM_F64(-771.3));
EXPECT_INIT_EXPR(F64, f64, 43703.0, WASM_F64(43703.0));
EXPECT_INIT_EXPR(F64, f64, 77999.1, WASM_F64(77999.1));
}
-TEST_F(WasmModuleVerifyTest, InitExpr_AnyRef) {
- EXPERIMENTAL_FLAG_SCOPE(anyref);
+TEST_F(WasmInitExprDecodeTest, InitExpr_AnyRef) {
+ WASM_FEATURE_SCOPE(anyref);
static const byte data[] = {kExprRefNull, kExprEnd};
- WasmInitExpr expr = DecodeWasmInitExprForTesting(data, data + sizeof(data));
+ WasmInitExpr expr = DecodeInitExpr(data, data + sizeof(data));
EXPECT_EQ(WasmInitExpr::kAnyRefConst, expr.kind);
}
-#undef EXPECT_INIT_EXPR
-
-#define EXPECT_INIT_EXPR_FAIL(...) \
- { \
- static const byte data[] = {__VA_ARGS__, kExprEnd}; \
- WasmInitExpr expr = \
- DecodeWasmInitExprForTesting(data, data + sizeof(data)); \
- EXPECT_EQ(WasmInitExpr::kNone, expr.kind); \
- }
-
-TEST_F(WasmModuleVerifyTest, InitExpr_illegal) {
+TEST_F(WasmInitExprDecodeTest, InitExpr_illegal) {
EXPECT_INIT_EXPR_FAIL(WASM_I32V_1(0), WASM_I32V_1(0));
EXPECT_INIT_EXPR_FAIL(WASM_GET_LOCAL(0));
EXPECT_INIT_EXPR_FAIL(WASM_SET_LOCAL(0, WASM_I32V_1(0)));
@@ -2083,8 +2135,6 @@ TEST_F(WasmModuleVerifyTest, InitExpr_illegal) {
EXPECT_INIT_EXPR_FAIL(WASM_IF_ELSE(WASM_ZERO, WASM_ZERO, WASM_ZERO));
}
-#undef EXPECT_INIT_EXPR_FAIL
-
TEST_F(WasmModuleVerifyTest, Multiple_Named_Sections) {
static const byte data[] = {
SECTION(Unknown, 4), 1, 'X', 17, 18, // --
@@ -2173,6 +2223,10 @@ TEST_F(WasmModuleCustomSectionTest, TwoKnownTwoUnknownSections) {
CheckSections(data, data + sizeof(data), expected, arraysize(expected));
}
+#undef WASM_FEATURE_SCOPE
+#undef WASM_FEATURE_SCOPE_VAL
+#undef EXPECT_INIT_EXPR
+#undef EXPECT_INIT_EXPR_FAIL
#undef WASM_INIT_EXPR_I32V_1
#undef WASM_INIT_EXPR_I32V_2
#undef WASM_INIT_EXPR_I32V_3
diff --git a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
index c05a13c431..cc66f14d9c 100644
--- a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
@@ -8,6 +8,7 @@
#include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-memory.h"
namespace v8 {
namespace internal {
@@ -157,7 +158,7 @@ class WasmCodeManagerTest : public TestWithContext,
public:
static constexpr uint32_t kNumFunctions = 10;
static constexpr uint32_t kJumpTableSize = RoundUp<kCodeAlignment>(
- kNumFunctions * JumpTableAssembler::kJumpTableSlotSize);
+ JumpTableAssembler::SizeForNumberOfSlots(kNumFunctions));
using NativeModulePtr = std::unique_ptr<NativeModule>;
@@ -166,10 +167,10 @@ class WasmCodeManagerTest : public TestWithContext,
std::shared_ptr<WasmModule> module(new WasmModule);
module->num_declared_functions = kNumFunctions;
bool can_request_more = style == Growable;
- wasm::ModuleEnv env(module.get(), UseTrapHandler::kNoTrapHandler,
- RuntimeExceptionSupport::kNoRuntimeExceptionSupport);
- return manager->NewNativeModule(i_isolate(), size, can_request_more,
- std::move(module), env);
+ ModuleEnv env(module.get(), UseTrapHandler::kNoTrapHandler,
+ RuntimeExceptionSupport::kNoRuntimeExceptionSupport);
+ return manager->NewNativeModule(i_isolate(), kAllWasmFeatures, size,
+ can_request_more, std::move(module), env);
}
WasmCode* AddCode(NativeModule* native_module, uint32_t index, size_t size) {
@@ -183,6 +184,11 @@ class WasmCodeManagerTest : public TestWithContext,
}
size_t page() const { return AllocatePageSize(); }
+
+ WasmMemoryTracker* memory_tracker() { return &memory_tracker_; }
+
+ private:
+ WasmMemoryTracker memory_tracker_;
};
INSTANTIATE_TEST_CASE_P(Parameterized, WasmCodeManagerTest,
@@ -190,7 +196,7 @@ INSTANTIATE_TEST_CASE_P(Parameterized, WasmCodeManagerTest,
PrintWasmCodeManageTestParam);
TEST_P(WasmCodeManagerTest, EmptyCase) {
- WasmCodeManager manager(0 * page());
+ WasmCodeManager manager(memory_tracker(), 0 * page());
CHECK_EQ(0, manager.remaining_uncommitted_code_space());
ASSERT_DEATH_IF_SUPPORTED(AllocModule(&manager, 1 * page(), GetParam()),
@@ -198,7 +204,7 @@ TEST_P(WasmCodeManagerTest, EmptyCase) {
}
TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
- WasmCodeManager manager(1 * page());
+ WasmCodeManager manager(memory_tracker(), 1 * page());
CHECK_EQ(1 * page(), manager.remaining_uncommitted_code_space());
NativeModulePtr native_module = AllocModule(&manager, 1 * page(), GetParam());
CHECK(native_module);
@@ -223,7 +229,7 @@ TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
}
TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
- WasmCodeManager manager(3 * page());
+ WasmCodeManager manager(memory_tracker(), 3 * page());
NativeModulePtr nm1 = AllocModule(&manager, 2 * page(), GetParam());
NativeModulePtr nm2 = AllocModule(&manager, 2 * page(), GetParam());
CHECK(nm1);
@@ -235,8 +241,8 @@ TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
}
TEST_P(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) {
- WasmCodeManager manager1(1 * page());
- WasmCodeManager manager2(2 * page());
+ WasmCodeManager manager1(memory_tracker(), 1 * page());
+ WasmCodeManager manager2(memory_tracker(), 2 * page());
NativeModulePtr nm1 = AllocModule(&manager1, 1 * page(), GetParam());
NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), GetParam());
CHECK(nm1);
@@ -249,7 +255,7 @@ TEST_P(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) {
}
TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
- WasmCodeManager manager(3 * page());
+ WasmCodeManager manager(memory_tracker(), 3 * page());
NativeModulePtr nm = AllocModule(&manager, 1 * page(), GetParam());
size_t module_size = GetParam() == Fixed ? kMaxWasmCodeMemory : 1 * page();
size_t remaining_space_in_module = module_size - kJumpTableSize;
@@ -268,7 +274,7 @@ TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
}
TEST_P(WasmCodeManagerTest, CommitIncrements) {
- WasmCodeManager manager(10 * page());
+ WasmCodeManager manager(memory_tracker(), 10 * page());
NativeModulePtr nm = AllocModule(&manager, 3 * page(), GetParam());
WasmCode* code = AddCode(nm.get(), 0, kCodeAlignment);
CHECK_NOT_NULL(code);
@@ -282,7 +288,7 @@ TEST_P(WasmCodeManagerTest, CommitIncrements) {
}
TEST_P(WasmCodeManagerTest, Lookup) {
- WasmCodeManager manager(2 * page());
+ WasmCodeManager manager(memory_tracker(), 2 * page());
NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), GetParam());
NativeModulePtr nm2 = AllocModule(&manager, 1 * page(), GetParam());
@@ -321,8 +327,8 @@ TEST_P(WasmCodeManagerTest, Lookup) {
}
TEST_P(WasmCodeManagerTest, MultiManagerLookup) {
- WasmCodeManager manager1(2 * page());
- WasmCodeManager manager2(2 * page());
+ WasmCodeManager manager1(memory_tracker(), 2 * page());
+ WasmCodeManager manager2(memory_tracker(), 2 * page());
NativeModulePtr nm1 = AllocModule(&manager1, 1 * page(), GetParam());
NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), GetParam());
@@ -344,7 +350,7 @@ TEST_P(WasmCodeManagerTest, MultiManagerLookup) {
}
TEST_P(WasmCodeManagerTest, LookupWorksAfterRewrite) {
- WasmCodeManager manager(2 * page());
+ WasmCodeManager manager(memory_tracker(), 2 * page());
NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), GetParam());
diff --git a/deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc b/deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc
index 0883dd9538..d982c1e0ec 100644
--- a/deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc
+++ b/deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc
@@ -280,5 +280,77 @@ TEST(ZoneChunkList, PushBackPopBackSize) {
CHECK_EQ(size_t(0), zone_chunk_list.size());
}
+TEST(ZoneChunkList, AdvanceZeroTest) {
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ ZoneChunkList<uintptr_t> zone_chunk_list(&zone);
+
+ for (size_t i = 0; i < kItemCount; ++i) {
+ zone_chunk_list.push_back(static_cast<uintptr_t>(i));
+ }
+
+ auto iterator_advance = zone_chunk_list.begin();
+
+ iterator_advance.Advance(0);
+
+ CHECK_EQ(iterator_advance, zone_chunk_list.begin());
+}
+
+TEST(ZoneChunkList, AdvancePartwayTest) {
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ ZoneChunkList<uintptr_t> zone_chunk_list(&zone);
+
+ for (size_t i = 0; i < kItemCount; ++i) {
+ zone_chunk_list.push_back(static_cast<uintptr_t>(i));
+ }
+
+ auto iterator_advance = zone_chunk_list.begin();
+ auto iterator_one_by_one = zone_chunk_list.begin();
+
+ iterator_advance.Advance(kItemCount / 2);
+ for (size_t i = 0; i < kItemCount / 2; ++i) {
+ ++iterator_one_by_one;
+ }
+
+ CHECK_EQ(iterator_advance, iterator_one_by_one);
+}
+
+TEST(ZoneChunkList, AdvanceEndTest) {
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ ZoneChunkList<uintptr_t> zone_chunk_list(&zone);
+
+ for (size_t i = 0; i < kItemCount; ++i) {
+ zone_chunk_list.push_back(static_cast<uintptr_t>(i));
+ }
+
+ auto iterator_advance = zone_chunk_list.begin();
+
+ iterator_advance.Advance(kItemCount);
+
+ CHECK_EQ(iterator_advance, zone_chunk_list.end());
+}
+
+TEST(ZoneChunkList, FindOverChunkBoundary) {
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ ZoneChunkList<int> zone_chunk_list(&zone);
+
+ // Make sure we get two chunks.
+ int chunk_size = static_cast<int>(ZoneChunkList<int>::StartMode::kSmall);
+ for (int i = 0; i < chunk_size + 1; ++i) {
+ zone_chunk_list.push_back(i);
+ }
+
+ for (int i = 0; i < chunk_size + 1; ++i) {
+ CHECK_EQ(i, *zone_chunk_list.Find(i));
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index 4bc2a4dba9..1dd1c7217b 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-9c9ffb1170cc729f41c661299aa2dbfbac6a118a \ No newline at end of file
+7e2cdbd60dd1d10db95fb61b491ebf7b9f2c69e6 \ No newline at end of file
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index 5f2489546f..412784d252 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -116,10 +116,10 @@
}], # arch == arm64 and msan
##############################################################################
-['(variant == nooptimization or variant == stress) and (arch == arm or arch == arm64) and simulator_run', {
+['(variant == nooptimization or variant == stress or variant == no_liftoff) and (arch == arm or arch == arm64) and simulator_run', {
# Slow tests: https://crbug.com/v8/7783
'dfg-double-vote-fuzz': [SKIP],
-}], # (variant == nooptimization or variant == stress) and (arch == arm or arch == arm64) and simulator_run
+}], # (variant == nooptimization or variant == stress or variant == no_liftoff) and (arch == arm or arch == arm64) and simulator_run
##############################################################################
['gcov_coverage', {
diff --git a/deps/v8/third_party/antlr4/BUILD.gn b/deps/v8/third_party/antlr4/BUILD.gn
deleted file mode 100644
index 65e2a78eb4..0000000000
--- a/deps/v8/third_party/antlr4/BUILD.gn
+++ /dev/null
@@ -1,342 +0,0 @@
-# Copyright 2018 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-config("antlr-compatibility") {
- if (!is_clang && !is_win) {
- cflags = [
- # Avoid warnings in generated Antlr code
- "-Wno-unused-but-set-variable",
- ]
- }
- if (is_aix) {
- cflags += [ "-fdollars-in-identifiers" ]
- }
-}
-
-source_set("antlr4") {
- defines = [ "ANTLR4CPP_STATIC" ]
-
- include_dirs = [ "runtime/Cpp/runtime/src" ]
-
- sources = [
- "runtime/Cpp/runtime/src/ANTLRErrorListener.cpp",
- "runtime/Cpp/runtime/src/ANTLRErrorListener.h",
- "runtime/Cpp/runtime/src/ANTLRErrorStrategy.cpp",
- "runtime/Cpp/runtime/src/ANTLRErrorStrategy.h",
- "runtime/Cpp/runtime/src/ANTLRFileStream.cpp",
- "runtime/Cpp/runtime/src/ANTLRFileStream.h",
- "runtime/Cpp/runtime/src/ANTLRInputStream.cpp",
- "runtime/Cpp/runtime/src/ANTLRInputStream.h",
- "runtime/Cpp/runtime/src/BailErrorStrategy.cpp",
- "runtime/Cpp/runtime/src/BailErrorStrategy.h",
- "runtime/Cpp/runtime/src/BaseErrorListener.cpp",
- "runtime/Cpp/runtime/src/BaseErrorListener.h",
- "runtime/Cpp/runtime/src/BufferedTokenStream.cpp",
- "runtime/Cpp/runtime/src/BufferedTokenStream.h",
- "runtime/Cpp/runtime/src/CharStream.cpp",
- "runtime/Cpp/runtime/src/CharStream.h",
- "runtime/Cpp/runtime/src/CommonToken.cpp",
- "runtime/Cpp/runtime/src/CommonToken.h",
- "runtime/Cpp/runtime/src/CommonTokenFactory.cpp",
- "runtime/Cpp/runtime/src/CommonTokenFactory.h",
- "runtime/Cpp/runtime/src/CommonTokenStream.cpp",
- "runtime/Cpp/runtime/src/CommonTokenStream.h",
- "runtime/Cpp/runtime/src/ConsoleErrorListener.cpp",
- "runtime/Cpp/runtime/src/ConsoleErrorListener.h",
- "runtime/Cpp/runtime/src/DefaultErrorStrategy.cpp",
- "runtime/Cpp/runtime/src/DefaultErrorStrategy.h",
- "runtime/Cpp/runtime/src/DiagnosticErrorListener.cpp",
- "runtime/Cpp/runtime/src/DiagnosticErrorListener.h",
- "runtime/Cpp/runtime/src/Exceptions.cpp",
- "runtime/Cpp/runtime/src/Exceptions.h",
- "runtime/Cpp/runtime/src/FailedPredicateException.cpp",
- "runtime/Cpp/runtime/src/FailedPredicateException.h",
- "runtime/Cpp/runtime/src/InputMismatchException.cpp",
- "runtime/Cpp/runtime/src/InputMismatchException.h",
- "runtime/Cpp/runtime/src/IntStream.cpp",
- "runtime/Cpp/runtime/src/IntStream.h",
- "runtime/Cpp/runtime/src/InterpreterRuleContext.cpp",
- "runtime/Cpp/runtime/src/InterpreterRuleContext.h",
- "runtime/Cpp/runtime/src/Lexer.cpp",
- "runtime/Cpp/runtime/src/Lexer.h",
- "runtime/Cpp/runtime/src/LexerInterpreter.cpp",
- "runtime/Cpp/runtime/src/LexerInterpreter.h",
- "runtime/Cpp/runtime/src/LexerNoViableAltException.cpp",
- "runtime/Cpp/runtime/src/LexerNoViableAltException.h",
- "runtime/Cpp/runtime/src/ListTokenSource.cpp",
- "runtime/Cpp/runtime/src/ListTokenSource.h",
- "runtime/Cpp/runtime/src/NoViableAltException.cpp",
- "runtime/Cpp/runtime/src/NoViableAltException.h",
- "runtime/Cpp/runtime/src/Parser.cpp",
- "runtime/Cpp/runtime/src/Parser.h",
- "runtime/Cpp/runtime/src/ParserInterpreter.cpp",
- "runtime/Cpp/runtime/src/ParserInterpreter.h",
- "runtime/Cpp/runtime/src/ParserRuleContext.cpp",
- "runtime/Cpp/runtime/src/ParserRuleContext.h",
- "runtime/Cpp/runtime/src/ProxyErrorListener.cpp",
- "runtime/Cpp/runtime/src/ProxyErrorListener.h",
- "runtime/Cpp/runtime/src/RecognitionException.cpp",
- "runtime/Cpp/runtime/src/RecognitionException.h",
- "runtime/Cpp/runtime/src/Recognizer.cpp",
- "runtime/Cpp/runtime/src/Recognizer.h",
- "runtime/Cpp/runtime/src/RuleContext.cpp",
- "runtime/Cpp/runtime/src/RuleContext.h",
- "runtime/Cpp/runtime/src/RuleContextWithAltNum.cpp",
- "runtime/Cpp/runtime/src/RuleContextWithAltNum.h",
- "runtime/Cpp/runtime/src/RuntimeMetaData.cpp",
- "runtime/Cpp/runtime/src/RuntimeMetaData.h",
- "runtime/Cpp/runtime/src/Token.cpp",
- "runtime/Cpp/runtime/src/Token.h",
- "runtime/Cpp/runtime/src/TokenFactory.h",
- "runtime/Cpp/runtime/src/TokenSource.cpp",
- "runtime/Cpp/runtime/src/TokenSource.h",
- "runtime/Cpp/runtime/src/TokenStream.cpp",
- "runtime/Cpp/runtime/src/TokenStream.h",
- "runtime/Cpp/runtime/src/TokenStreamRewriter.cpp",
- "runtime/Cpp/runtime/src/TokenStreamRewriter.h",
- "runtime/Cpp/runtime/src/UnbufferedCharStream.cpp",
- "runtime/Cpp/runtime/src/UnbufferedCharStream.h",
- "runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp",
- "runtime/Cpp/runtime/src/UnbufferedTokenStream.h",
- "runtime/Cpp/runtime/src/Vocabulary.cpp",
- "runtime/Cpp/runtime/src/Vocabulary.h",
- "runtime/Cpp/runtime/src/WritableToken.cpp",
- "runtime/Cpp/runtime/src/WritableToken.h",
- "runtime/Cpp/runtime/src/antlr4-common.h",
- "runtime/Cpp/runtime/src/antlr4-runtime.h",
- "runtime/Cpp/runtime/src/atn/ATN.cpp",
- "runtime/Cpp/runtime/src/atn/ATN.h",
- "runtime/Cpp/runtime/src/atn/ATNConfig.cpp",
- "runtime/Cpp/runtime/src/atn/ATNConfig.h",
- "runtime/Cpp/runtime/src/atn/ATNConfigSet.cpp",
- "runtime/Cpp/runtime/src/atn/ATNConfigSet.h",
- "runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.cpp",
- "runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.h",
- "runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp",
- "runtime/Cpp/runtime/src/atn/ATNDeserializer.h",
- "runtime/Cpp/runtime/src/atn/ATNSerializer.cpp",
- "runtime/Cpp/runtime/src/atn/ATNSerializer.h",
- "runtime/Cpp/runtime/src/atn/ATNSimulator.cpp",
- "runtime/Cpp/runtime/src/atn/ATNSimulator.h",
- "runtime/Cpp/runtime/src/atn/ATNState.cpp",
- "runtime/Cpp/runtime/src/atn/ATNState.h",
- "runtime/Cpp/runtime/src/atn/ATNType.h",
- "runtime/Cpp/runtime/src/atn/AbstractPredicateTransition.cpp",
- "runtime/Cpp/runtime/src/atn/AbstractPredicateTransition.h",
- "runtime/Cpp/runtime/src/atn/ActionTransition.cpp",
- "runtime/Cpp/runtime/src/atn/ActionTransition.h",
- "runtime/Cpp/runtime/src/atn/AmbiguityInfo.cpp",
- "runtime/Cpp/runtime/src/atn/AmbiguityInfo.h",
- "runtime/Cpp/runtime/src/atn/ArrayPredictionContext.cpp",
- "runtime/Cpp/runtime/src/atn/ArrayPredictionContext.h",
- "runtime/Cpp/runtime/src/atn/AtomTransition.cpp",
- "runtime/Cpp/runtime/src/atn/AtomTransition.h",
- "runtime/Cpp/runtime/src/atn/BasicBlockStartState.cpp",
- "runtime/Cpp/runtime/src/atn/BasicBlockStartState.h",
- "runtime/Cpp/runtime/src/atn/BasicState.cpp",
- "runtime/Cpp/runtime/src/atn/BasicState.h",
- "runtime/Cpp/runtime/src/atn/BlockEndState.cpp",
- "runtime/Cpp/runtime/src/atn/BlockEndState.h",
- "runtime/Cpp/runtime/src/atn/BlockStartState.cpp",
- "runtime/Cpp/runtime/src/atn/BlockStartState.h",
- "runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.cpp",
- "runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.h",
- "runtime/Cpp/runtime/src/atn/DecisionEventInfo.cpp",
- "runtime/Cpp/runtime/src/atn/DecisionEventInfo.h",
- "runtime/Cpp/runtime/src/atn/DecisionInfo.cpp",
- "runtime/Cpp/runtime/src/atn/DecisionInfo.h",
- "runtime/Cpp/runtime/src/atn/DecisionState.cpp",
- "runtime/Cpp/runtime/src/atn/DecisionState.h",
- "runtime/Cpp/runtime/src/atn/EmptyPredictionContext.cpp",
- "runtime/Cpp/runtime/src/atn/EmptyPredictionContext.h",
- "runtime/Cpp/runtime/src/atn/EpsilonTransition.cpp",
- "runtime/Cpp/runtime/src/atn/EpsilonTransition.h",
- "runtime/Cpp/runtime/src/atn/ErrorInfo.cpp",
- "runtime/Cpp/runtime/src/atn/ErrorInfo.h",
- "runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp",
- "runtime/Cpp/runtime/src/atn/LL1Analyzer.h",
- "runtime/Cpp/runtime/src/atn/LexerATNConfig.cpp",
- "runtime/Cpp/runtime/src/atn/LexerATNConfig.h",
- "runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp",
- "runtime/Cpp/runtime/src/atn/LexerATNSimulator.h",
- "runtime/Cpp/runtime/src/atn/LexerAction.cpp",
- "runtime/Cpp/runtime/src/atn/LexerAction.h",
- "runtime/Cpp/runtime/src/atn/LexerActionExecutor.cpp",
- "runtime/Cpp/runtime/src/atn/LexerActionExecutor.h",
- "runtime/Cpp/runtime/src/atn/LexerActionType.h",
- "runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp",
- "runtime/Cpp/runtime/src/atn/LexerChannelAction.h",
- "runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp",
- "runtime/Cpp/runtime/src/atn/LexerCustomAction.h",
- "runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.cpp",
- "runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.h",
- "runtime/Cpp/runtime/src/atn/LexerModeAction.cpp",
- "runtime/Cpp/runtime/src/atn/LexerModeAction.h",
- "runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp",
- "runtime/Cpp/runtime/src/atn/LexerMoreAction.h",
- "runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp",
- "runtime/Cpp/runtime/src/atn/LexerPopModeAction.h",
- "runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp",
- "runtime/Cpp/runtime/src/atn/LexerPushModeAction.h",
- "runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp",
- "runtime/Cpp/runtime/src/atn/LexerSkipAction.h",
- "runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp",
- "runtime/Cpp/runtime/src/atn/LexerTypeAction.h",
- "runtime/Cpp/runtime/src/atn/LookaheadEventInfo.cpp",
- "runtime/Cpp/runtime/src/atn/LookaheadEventInfo.h",
- "runtime/Cpp/runtime/src/atn/LoopEndState.cpp",
- "runtime/Cpp/runtime/src/atn/LoopEndState.h",
- "runtime/Cpp/runtime/src/atn/NotSetTransition.cpp",
- "runtime/Cpp/runtime/src/atn/NotSetTransition.h",
- "runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.cpp",
- "runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.h",
- "runtime/Cpp/runtime/src/atn/ParseInfo.cpp",
- "runtime/Cpp/runtime/src/atn/ParseInfo.h",
- "runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp",
- "runtime/Cpp/runtime/src/atn/ParserATNSimulator.h",
- "runtime/Cpp/runtime/src/atn/PlusBlockStartState.cpp",
- "runtime/Cpp/runtime/src/atn/PlusBlockStartState.h",
- "runtime/Cpp/runtime/src/atn/PlusLoopbackState.cpp",
- "runtime/Cpp/runtime/src/atn/PlusLoopbackState.h",
- "runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.cpp",
- "runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.h",
- "runtime/Cpp/runtime/src/atn/PredicateEvalInfo.cpp",
- "runtime/Cpp/runtime/src/atn/PredicateEvalInfo.h",
- "runtime/Cpp/runtime/src/atn/PredicateTransition.cpp",
- "runtime/Cpp/runtime/src/atn/PredicateTransition.h",
- "runtime/Cpp/runtime/src/atn/PredictionContext.cpp",
- "runtime/Cpp/runtime/src/atn/PredictionContext.h",
- "runtime/Cpp/runtime/src/atn/PredictionMode.cpp",
- "runtime/Cpp/runtime/src/atn/PredictionMode.h",
- "runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.cpp",
- "runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.h",
- "runtime/Cpp/runtime/src/atn/RangeTransition.cpp",
- "runtime/Cpp/runtime/src/atn/RangeTransition.h",
- "runtime/Cpp/runtime/src/atn/RuleStartState.cpp",
- "runtime/Cpp/runtime/src/atn/RuleStartState.h",
- "runtime/Cpp/runtime/src/atn/RuleStopState.cpp",
- "runtime/Cpp/runtime/src/atn/RuleStopState.h",
- "runtime/Cpp/runtime/src/atn/RuleTransition.cpp",
- "runtime/Cpp/runtime/src/atn/RuleTransition.h",
- "runtime/Cpp/runtime/src/atn/SemanticContext.cpp",
- "runtime/Cpp/runtime/src/atn/SemanticContext.h",
- "runtime/Cpp/runtime/src/atn/SetTransition.cpp",
- "runtime/Cpp/runtime/src/atn/SetTransition.h",
- "runtime/Cpp/runtime/src/atn/SingletonPredictionContext.cpp",
- "runtime/Cpp/runtime/src/atn/SingletonPredictionContext.h",
- "runtime/Cpp/runtime/src/atn/StarBlockStartState.cpp",
- "runtime/Cpp/runtime/src/atn/StarBlockStartState.h",
- "runtime/Cpp/runtime/src/atn/StarLoopEntryState.cpp",
- "runtime/Cpp/runtime/src/atn/StarLoopEntryState.h",
- "runtime/Cpp/runtime/src/atn/StarLoopbackState.cpp",
- "runtime/Cpp/runtime/src/atn/StarLoopbackState.h",
- "runtime/Cpp/runtime/src/atn/TokensStartState.cpp",
- "runtime/Cpp/runtime/src/atn/TokensStartState.h",
- "runtime/Cpp/runtime/src/atn/Transition.cpp",
- "runtime/Cpp/runtime/src/atn/Transition.h",
- "runtime/Cpp/runtime/src/atn/WildcardTransition.cpp",
- "runtime/Cpp/runtime/src/atn/WildcardTransition.h",
- "runtime/Cpp/runtime/src/dfa/DFA.cpp",
- "runtime/Cpp/runtime/src/dfa/DFA.h",
- "runtime/Cpp/runtime/src/dfa/DFASerializer.cpp",
- "runtime/Cpp/runtime/src/dfa/DFASerializer.h",
- "runtime/Cpp/runtime/src/dfa/DFAState.cpp",
- "runtime/Cpp/runtime/src/dfa/DFAState.h",
- "runtime/Cpp/runtime/src/dfa/LexerDFASerializer.cpp",
- "runtime/Cpp/runtime/src/dfa/LexerDFASerializer.h",
- "runtime/Cpp/runtime/src/misc/InterpreterDataReader.cpp",
- "runtime/Cpp/runtime/src/misc/InterpreterDataReader.h",
- "runtime/Cpp/runtime/src/misc/Interval.cpp",
- "runtime/Cpp/runtime/src/misc/Interval.h",
- "runtime/Cpp/runtime/src/misc/IntervalSet.cpp",
- "runtime/Cpp/runtime/src/misc/IntervalSet.h",
- "runtime/Cpp/runtime/src/misc/MurmurHash.cpp",
- "runtime/Cpp/runtime/src/misc/MurmurHash.h",
- "runtime/Cpp/runtime/src/misc/Predicate.cpp",
- "runtime/Cpp/runtime/src/misc/Predicate.h",
- "runtime/Cpp/runtime/src/support/Any.cpp",
- "runtime/Cpp/runtime/src/support/Any.h",
- "runtime/Cpp/runtime/src/support/Arrays.cpp",
- "runtime/Cpp/runtime/src/support/Arrays.h",
- "runtime/Cpp/runtime/src/support/BitSet.h",
- "runtime/Cpp/runtime/src/support/CPPUtils.cpp",
- "runtime/Cpp/runtime/src/support/CPPUtils.h",
- "runtime/Cpp/runtime/src/support/Declarations.h",
- "runtime/Cpp/runtime/src/support/StringUtils.cpp",
- "runtime/Cpp/runtime/src/support/StringUtils.h",
- "runtime/Cpp/runtime/src/support/guid.cpp",
- "runtime/Cpp/runtime/src/support/guid.h",
- "runtime/Cpp/runtime/src/tree/AbstractParseTreeVisitor.h",
- "runtime/Cpp/runtime/src/tree/ErrorNode.cpp",
- "runtime/Cpp/runtime/src/tree/ErrorNode.h",
- "runtime/Cpp/runtime/src/tree/ErrorNodeImpl.cpp",
- "runtime/Cpp/runtime/src/tree/ErrorNodeImpl.h",
- "runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp",
- "runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.h",
- "runtime/Cpp/runtime/src/tree/ParseTree.cpp",
- "runtime/Cpp/runtime/src/tree/ParseTree.h",
- "runtime/Cpp/runtime/src/tree/ParseTreeListener.cpp",
- "runtime/Cpp/runtime/src/tree/ParseTreeListener.h",
- "runtime/Cpp/runtime/src/tree/ParseTreeProperty.h",
- "runtime/Cpp/runtime/src/tree/ParseTreeVisitor.cpp",
- "runtime/Cpp/runtime/src/tree/ParseTreeVisitor.h",
- "runtime/Cpp/runtime/src/tree/ParseTreeWalker.cpp",
- "runtime/Cpp/runtime/src/tree/ParseTreeWalker.h",
- "runtime/Cpp/runtime/src/tree/TerminalNode.cpp",
- "runtime/Cpp/runtime/src/tree/TerminalNode.h",
- "runtime/Cpp/runtime/src/tree/TerminalNodeImpl.cpp",
- "runtime/Cpp/runtime/src/tree/TerminalNodeImpl.h",
- "runtime/Cpp/runtime/src/tree/Trees.cpp",
- "runtime/Cpp/runtime/src/tree/Trees.h",
- "runtime/Cpp/runtime/src/tree/pattern/Chunk.cpp",
- "runtime/Cpp/runtime/src/tree/pattern/Chunk.h",
- "runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.cpp",
- "runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.h",
- "runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.cpp",
- "runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.h",
- "runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.cpp",
- "runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.h",
- "runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.cpp",
- "runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.h",
- "runtime/Cpp/runtime/src/tree/pattern/TagChunk.cpp",
- "runtime/Cpp/runtime/src/tree/pattern/TagChunk.h",
- "runtime/Cpp/runtime/src/tree/pattern/TextChunk.cpp",
- "runtime/Cpp/runtime/src/tree/pattern/TextChunk.h",
- "runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.cpp",
- "runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.h",
- "runtime/Cpp/runtime/src/tree/xpath/XPath.cpp",
- "runtime/Cpp/runtime/src/tree/xpath/XPath.h",
- "runtime/Cpp/runtime/src/tree/xpath/XPathElement.cpp",
- "runtime/Cpp/runtime/src/tree/xpath/XPathElement.h",
- "runtime/Cpp/runtime/src/tree/xpath/XPathLexer.cpp",
- "runtime/Cpp/runtime/src/tree/xpath/XPathLexer.h",
- "runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.cpp",
- "runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.h",
- "runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.cpp",
- "runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.h",
- "runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.cpp",
- "runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.h",
- "runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.cpp",
- "runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.h",
- "runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.cpp",
- "runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.h",
- "runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.cpp",
- "runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.h",
- "runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.cpp",
- "runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.h",
- ]
-
- configs -= [
- "//build/config/compiler:chromium_code",
- "//build/config/compiler:no_rtti",
- "//build/config/compiler:no_exceptions",
- ]
-
- configs += [
- "//build/config/compiler:no_chromium_code",
- "//build/config/compiler:rtti",
- "//build/config/compiler:exceptions",
- ":antlr-compatibility",
- ]
-}
diff --git a/deps/v8/third_party/antlr4/LICENSE.txt b/deps/v8/third_party/antlr4/LICENSE.txt
deleted file mode 100644
index 2042d1bda6..0000000000
--- a/deps/v8/third_party/antlr4/LICENSE.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-[The "BSD 3-clause license"]
-Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- 3. Neither the name of the copyright holder nor the names of its contributors
- may be used to endorse or promote products derived from this software
- without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-=====
-
-MIT License for codepointat.js from https://git.io/codepointat
-MIT License for fromcodepoint.js from https://git.io/vDW1m
-
-Copyright Mathias Bynens <https://mathiasbynens.be/>
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/README.md b/deps/v8/third_party/antlr4/runtime/Cpp/README.md
deleted file mode 100644
index fbcfadd51e..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/README.md
+++ /dev/null
@@ -1,59 +0,0 @@
-# C++ target for ANTLR 4
-
-This folder contains the C++ runtime support for ANTLR. See [the canonical antlr4 repository](https://github.com/antlr/antlr4) for in depth detail about how to use ANTLR 4.
-
-## Authors and major contributors
-
-ANTLR 4 is the result of substantial effort of the following people:
-
-* [Terence Parr](http://www.cs.usfca.edu/~parrt/), parrt@cs.usfca.edu
- ANTLR project lead and supreme dictator for life
- [University of San Francisco](http://www.usfca.edu/)
-* [Sam Harwell](http://tunnelvisionlabs.com/)
- Tool co-author, Java and C# target)
-
-The C++ target has been the work of the following people:
-
-* Dan McLaughlin, dan.mclaughlin@gmail.com (initial port, got code to compile)
-* David Sisson, dsisson@google.com (initial port, made the runtime C++ tests runnable)
-* [Mike Lischke](www.soft-gems.net), mike@lischke-online.de (brought the initial port to a working library, made most runtime tests passing)
-
-## Other contributors
-
-* Marcin Szalowicz, mszalowicz@mailplus.pl (cmake build setup)
-* Tim O'Callaghan, timo@linux.com (additional superbuild cmake pattern script)
-
-## Project Status
-
-* Building on OS X, Windows, and Linux
-* No errors and warnings
-* Library linking
-* Some unit tests in the OSX project, for important base classes with almost 100% code coverage.
-* All memory allocations checked
-* Simple command line demo application working on all supported platforms.
-* All runtime tests pass.
-
-### Build + Usage Notes
-
-The minimum C++ version to compile the ANTLR C++ runtime with is C++11. The supplied projects can built the runtime either as static or dynamic library, as both 32bit and 64bit arch. The OSX project contains a target for iOS and can also be built using cmake (instead of XCode).
-
-Include the antlr4-runtime.h umbrella header in your target application to get everything needed to use the library.
-
-If you are compiling with cmake, the minimum version required is cmake 2.8.
-
-#### Compiling on Windows
-Simply open the VS solution (VS 2013+) and build it.
-
-#### Compiling on OSX
-Either open the included XCode project and build that or use the cmake compilation as described for linux.
-
-#### Compiling on Linux
-- cd <antlr4-dir>/runtime/Cpp (this is where this readme is located)
-- mkdir build && mkdir run && cd build
-- cmake .. -DANTLR_JAR_LOCATION=full/path/to/antlr4-4.5.4-SNAPSHOT.jar -DWITH_DEMO=True
-- make
-- DESTDIR=<antlr4-dir>/runtime/Cpp/run make install
-
-If you don't want to build the demo then simply run cmake without parameters.
-There is another cmake script available in the subfolder cmake/ for those who prefer the superbuild cmake pattern.
-
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/VERSION b/deps/v8/third_party/antlr4/runtime/Cpp/VERSION
deleted file mode 100644
index 7c66fca579..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-4.7.1
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRErrorListener.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRErrorListener.cpp
deleted file mode 100644
index b8efcdd71a..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRErrorListener.cpp
+++ /dev/null
@@ -1,8 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ANTLRErrorListener.h"
-
-antlr4::ANTLRErrorListener::~ANTLRErrorListener() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRErrorListener.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRErrorListener.h
deleted file mode 100644
index 5fa6f04608..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRErrorListener.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "RecognitionException.h"
-
-namespace antlrcpp {
-class BitSet;
-}
-
-namespace antlr4 {
-
-/// How to emit recognition errors (an interface in Java).
-class ANTLR4CPP_PUBLIC ANTLRErrorListener {
- public:
- virtual ~ANTLRErrorListener();
-
- /// <summary>
- /// Upon syntax error, notify any interested parties. This is not how to
- /// recover from errors or compute error messages. <seealso
- /// cref="ANTLRErrorStrategy"/> specifies how to recover from syntax errors
- /// and how to compute error messages. This listener's job is simply to emit a
- /// computed message, though it has enough information to create its own
- /// message in many cases. <p/> The <seealso cref="RecognitionException"/> is
- /// non-null for all syntax errors except when we discover mismatched token
- /// errors that we can recover from in-line, without returning from the
- /// surrounding rule (via the single token insertion and deletion mechanism).
- /// </summary>
- /// <param name="recognizer">
- /// What parser got the error. From this
- /// object, you can access the context as well
- /// as the input stream. </param>
- /// <param name="offendingSymbol">
- /// The offending token in the input token
- /// stream, unless recognizer is a lexer (then it's null). If
- /// no viable alternative error, {@code e} has token at which we
- /// started production for the decision. </param>
- /// <param name="line">
- /// The line number in the input where the error occurred.
- /// </param> <param name="charPositionInLine"> The character
- /// position within that line where the error occurred. </param> <param
- /// name="msg"> The message to emit. </param> <param name="e">
- /// The exception generated by the parser that led to
- /// the reporting of an error. It is null in the case where
- /// the parser was able to recover in line without exiting the
- /// surrounding rule. </param>
- virtual void syntaxError(Recognizer* recognizer, Token* offendingSymbol,
- size_t line, size_t charPositionInLine,
- const std::string& msg, std::exception_ptr e) = 0;
-
- /**
- * This method is called by the parser when a full-context prediction
- * results in an ambiguity.
- *
- * <p>Each full-context prediction which does not result in a syntax error
- * will call either {@link #reportContextSensitivity} or
- * {@link #reportAmbiguity}.</p>
- *
- * <p>When {@code ambigAlts} is not null, it contains the set of potentially
- * viable alternatives identified by the prediction algorithm. When
- * {@code ambigAlts} is null, use {@link ATNConfigSet#getAlts} to obtain the
- * represented alternatives from the {@code configs} argument.</p>
- *
- * <p>When {@code exact} is {@code true}, <em>all</em> of the potentially
- * viable alternatives are truly viable, i.e. this is reporting an exact
- * ambiguity. When {@code exact} is {@code false}, <em>at least two</em> of
- * the potentially viable alternatives are viable for the current input, but
- * the prediction algorithm terminated as soon as it determined that at
- * least the <em>minimum</em> potentially viable alternative is truly
- * viable.</p>
- *
- * <p>When the {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} prediction
- * mode is used, the parser is required to identify exact ambiguities so
- * {@code exact} will always be {@code true}.</p>
- *
- * <p>This method is not used by lexers.</p>
- *
- * @param recognizer the parser instance
- * @param dfa the DFA for the current decision
- * @param startIndex the input index where the decision started
- * @param stopIndex the input input where the ambiguity was identified
- * @param exact {@code true} if the ambiguity is exactly known, otherwise
- * {@code false}. This is always {@code true} when
- * {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} is used.
- * @param ambigAlts the potentially ambiguous alternatives, or {@code null}
- * to indicate that the potentially ambiguous alternatives are the complete
- * set of represented alternatives in {@code configs}
- * @param configs the ATN configuration set where the ambiguity was
- * identified
- */
- virtual void reportAmbiguity(Parser* recognizer, const dfa::DFA& dfa,
- size_t startIndex, size_t stopIndex, bool exact,
- const antlrcpp::BitSet& ambigAlts,
- atn::ATNConfigSet* configs) = 0;
-
- /**
- * This method is called when an SLL conflict occurs and the parser is about
- * to use the full context information to make an LL decision.
- *
- * <p>If one or more configurations in {@code configs} contains a semantic
- * predicate, the predicates are evaluated before this method is called. The
- * subset of alternatives which are still viable after predicates are
- * evaluated is reported in {@code conflictingAlts}.</p>
- *
- * <p>This method is not used by lexers.</p>
- *
- * @param recognizer the parser instance
- * @param dfa the DFA for the current decision
- * @param startIndex the input index where the decision started
- * @param stopIndex the input index where the SLL conflict occurred
- * @param conflictingAlts The specific conflicting alternatives. If this is
- * {@code null}, the conflicting alternatives are all alternatives
- * represented in {@code configs}. At the moment, conflictingAlts is non-null
- * (for the reference implementation, but Sam's optimized version can see this
- * as null).
- * @param configs the ATN configuration set where the SLL conflict was
- * detected
- */
- virtual void reportAttemptingFullContext(
- Parser* recognizer, const dfa::DFA& dfa, size_t startIndex,
- size_t stopIndex, const antlrcpp::BitSet& conflictingAlts,
- atn::ATNConfigSet* configs) = 0;
-
- /**
- * This method is called by the parser when a full-context prediction has a
- * unique result.
- *
- * <p>Each full-context prediction which does not result in a syntax error
- * will call either {@link #reportContextSensitivity} or
- * {@link #reportAmbiguity}.</p>
- *
- * <p>For prediction implementations that only evaluate full-context
- * predictions when an SLL conflict is found (including the default
- * {@link ParserATNSimulator} implementation), this method reports cases
- * where SLL conflicts were resolved to unique full-context predictions,
- * i.e. the decision was context-sensitive. This report does not necessarily
- * indicate a problem, and it may appear even in completely unambiguous
- * grammars.</p>
- *
- * <p>{@code configs} may have more than one represented alternative if the
- * full-context prediction algorithm does not evaluate predicates before
- * beginning the full-context prediction. In all cases, the final prediction
- * is passed as the {@code prediction} argument.</p>
- *
- * <p>Note that the definition of "context sensitivity" in this method
- * differs from the concept in {@link DecisionInfo#contextSensitivities}.
- * This method reports all instances where an SLL conflict occurred but LL
- * parsing produced a unique result, whether or not that unique result
- * matches the minimum alternative in the SLL conflicting set.</p>
- *
- * <p>This method is not used by lexers.</p>
- *
- * @param recognizer the parser instance
- * @param dfa the DFA for the current decision
- * @param startIndex the input index where the decision started
- * @param stopIndex the input index where the context sensitivity was
- * finally determined
- * @param prediction the unambiguous result of the full-context prediction
- * @param configs the ATN configuration set where the unambiguous prediction
- * was determined
- */
- virtual void reportContextSensitivity(Parser* recognizer, const dfa::DFA& dfa,
- size_t startIndex, size_t stopIndex,
- size_t prediction,
- atn::ATNConfigSet* configs) = 0;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRErrorStrategy.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRErrorStrategy.cpp
deleted file mode 100644
index f76444e020..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRErrorStrategy.cpp
+++ /dev/null
@@ -1,8 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ANTLRErrorStrategy.h"
-
-antlr4::ANTLRErrorStrategy::~ANTLRErrorStrategy() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRErrorStrategy.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRErrorStrategy.h
deleted file mode 100644
index 7204a5c85f..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRErrorStrategy.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Token.h"
-
-namespace antlr4 {
-
-/// <summary>
-/// The interface for defining strategies to deal with syntax errors encountered
-/// during a parse by ANTLR-generated parsers. We distinguish between three
-/// different kinds of errors:
-///
-/// <ul>
-/// <li>The parser could not figure out which path to take in the ATN (none of
-/// the available alternatives could possibly match)</li>
-/// <li>The current input does not match what we were looking for</li>
-/// <li>A predicate evaluated to false</li>
-/// </ul>
-///
-/// Implementations of this interface report syntax errors by calling
-/// <seealso cref="Parser#notifyErrorListeners"/>.
-/// <p/>
-/// TO_DO: what to do about lexers
-/// </summary>
-class ANTLR4CPP_PUBLIC ANTLRErrorStrategy {
- public:
- /// <summary>
- /// Reset the error handler state for the specified {@code recognizer}.
- /// </summary> <param name="recognizer"> the parser instance </param>
- virtual ~ANTLRErrorStrategy();
-
- virtual void reset(Parser* recognizer) = 0;
-
- /**
- * This method is called when an unexpected symbol is encountered during an
- * inline match operation, such as {@link Parser#match}. If the error
- * strategy successfully recovers from the match failure, this method
- * returns the {@link Token} instance which should be treated as the
- * successful result of the match.
- *
- * <p>This method handles the consumption of any tokens - the caller should
- * <b>not</b> call {@link Parser#consume} after a successful recovery.</p>
- *
- * <p>Note that the calling code will not report an error if this method
- * returns successfully. The error strategy implementation is responsible
- * for calling {@link Parser#notifyErrorListeners} as appropriate.</p>
- *
- * @param recognizer the parser instance
- * @throws RecognitionException if the error strategy was not able to
- * recover from the unexpected input symbol
- */
- virtual Token* recoverInline(Parser* recognizer) = 0;
-
- /// <summary>
- /// This method is called to recover from exception {@code e}. This method is
- /// called after <seealso cref="#reportError"/> by the default exception
- /// handler generated for a rule method.
- /// </summary>
- /// <seealso cref= #reportError
- /// </seealso>
- /// <param name="recognizer"> the parser instance </param>
- /// <param name="e"> the recognition exception to recover from </param>
- /// <exception cref="RecognitionException"> if the error strategy could not
- /// recover from the recognition exception </exception>
- virtual void recover(Parser* recognizer, std::exception_ptr e) = 0;
-
- /// <summary>
- /// This method provides the error handler with an opportunity to handle
- /// syntactic or semantic errors in the input stream before they result in a
- /// <seealso cref="RecognitionException"/>.
- /// <p/>
- /// The generated code currently contains calls to <seealso cref="#sync"/>
- /// after entering the decision state of a closure block ({@code (...)*} or
- /// {@code (...)+}).
- /// <p/>
- /// For an implementation based on Jim Idle's "magic sync" mechanism, see
- /// <seealso cref="DefaultErrorStrategy#sync"/>.
- /// </summary>
- /// <seealso cref= DefaultErrorStrategy#sync
- /// </seealso>
- /// <param name="recognizer"> the parser instance </param>
- /// <exception cref="RecognitionException"> if an error is detected by the
- /// error strategy but cannot be automatically recovered at the current state
- /// in the parsing process </exception>
- virtual void sync(Parser* recognizer) = 0;
-
- /// <summary>
- /// Tests whether or not {@code recognizer} is in the process of recovering
- /// from an error. In error recovery mode, <seealso cref="Parser#consume"/>
- /// adds symbols to the parse tree by calling
- /// {@link Parser#createErrorNode(ParserRuleContext, Token)} then
- /// {@link ParserRuleContext#addErrorNode(ErrorNode)} instead of
- /// {@link Parser#createTerminalNode(ParserRuleContext, Token)}.
- /// </summary>
- /// <param name="recognizer"> the parser instance </param>
- /// <returns> {@code true} if the parser is currently recovering from a parse
- /// error, otherwise {@code false} </returns>
- virtual bool inErrorRecoveryMode(Parser* recognizer) = 0;
-
- /// <summary>
- /// This method is called by when the parser successfully matches an input
- /// symbol.
- /// </summary>
- /// <param name="recognizer"> the parser instance </param>
- virtual void reportMatch(Parser* recognizer) = 0;
-
- /// <summary>
- /// Report any kind of <seealso cref="RecognitionException"/>. This method is
- /// called by the default exception handler generated for a rule method.
- /// </summary>
- /// <param name="recognizer"> the parser instance </param>
- /// <param name="e"> the recognition exception to report </param>
- virtual void reportError(Parser* recognizer,
- const RecognitionException& e) = 0;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRFileStream.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRFileStream.cpp
deleted file mode 100644
index a0376470b3..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRFileStream.cpp
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "support/StringUtils.h"
-
-#include "ANTLRFileStream.h"
-
-using namespace antlr4;
-
-ANTLRFileStream::ANTLRFileStream(const std::string& fileName) {
- _fileName = fileName;
- loadFromFile(fileName);
-}
-
-void ANTLRFileStream::loadFromFile(const std::string& fileName) {
- _fileName = fileName;
- if (_fileName.empty()) {
- return;
- }
-
-#ifdef _MSC_VER
- std::ifstream stream(antlrcpp::s2ws(fileName), std::ios::binary);
-#else
- std::ifstream stream(fileName, std::ios::binary);
-#endif
-
- ANTLRInputStream::load(stream);
-}
-
-std::string ANTLRFileStream::getSourceName() const { return _fileName; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRFileStream.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRFileStream.h
deleted file mode 100644
index 3c92247f03..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRFileStream.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "ANTLRInputStream.h"
-
-namespace antlr4 {
-
-/// This is an ANTLRInputStream that is loaded from a file all at once
-/// when you construct the object (or call load()).
-// TODO: this class needs testing.
-class ANTLR4CPP_PUBLIC ANTLRFileStream : public ANTLRInputStream {
- protected:
- std::string _fileName; // UTF-8 encoded file name.
-
- public:
- // Assumes a file name encoded in UTF-8 and file content in the same encoding
- // (with or w/o BOM).
- ANTLRFileStream(const std::string& fileName);
-
- virtual void loadFromFile(const std::string& fileName);
- virtual std::string getSourceName() const override;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRInputStream.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRInputStream.cpp
deleted file mode 100644
index 8eecd823cd..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRInputStream.cpp
+++ /dev/null
@@ -1,143 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-#include "IntStream.h"
-#include "misc/Interval.h"
-
-#include "support/CPPUtils.h"
-#include "support/StringUtils.h"
-
-#include "ANTLRInputStream.h"
-
-using namespace antlr4;
-using namespace antlrcpp;
-
-using misc::Interval;
-
-ANTLRInputStream::ANTLRInputStream(const std::string& input) {
- InitializeInstanceFields();
- load(input);
-}
-
-ANTLRInputStream::ANTLRInputStream(const char data_[],
- size_t numberOfActualCharsInArray)
- : ANTLRInputStream(std::string(data_, numberOfActualCharsInArray)) {}
-
-ANTLRInputStream::ANTLRInputStream(std::istream& stream) {
- InitializeInstanceFields();
- load(stream);
-}
-
-void ANTLRInputStream::load(const std::string& input) {
- // Remove the UTF-8 BOM if present.
- const char bom[4] = "\xef\xbb\xbf";
- if (input.compare(0, 3, bom, 3) == 0)
- _data =
- antlrcpp::utf8_to_utf32(input.data() + 3, input.data() + input.size());
- else
- _data = antlrcpp::utf8_to_utf32(input.data(), input.data() + input.size());
- p = 0;
-}
-
-void ANTLRInputStream::load(std::istream& stream) {
- if (!stream.good() || stream.eof()) // No fail, bad or EOF.
- return;
-
- _data.clear();
-
- std::string s((std::istreambuf_iterator<char>(stream)),
- std::istreambuf_iterator<char>());
- load(s);
-}
-
-void ANTLRInputStream::reset() { p = 0; }
-
-void ANTLRInputStream::consume() {
- if (p >= _data.size()) {
- assert(LA(1) == IntStream::EOF);
- throw IllegalStateException("cannot consume EOF");
- }
-
- if (p < _data.size()) {
- p++;
- }
-}
-
-size_t ANTLRInputStream::LA(ssize_t i) {
- if (i == 0) {
- return 0; // undefined
- }
-
- ssize_t position = static_cast<ssize_t>(p);
- if (i < 0) {
- i++; // e.g., translate LA(-1) to use offset i=0; then _data[p+0-1]
- if ((position + i - 1) < 0) {
- return IntStream::EOF; // invalid; no char before first char
- }
- }
-
- if ((position + i - 1) >= static_cast<ssize_t>(_data.size())) {
- return IntStream::EOF;
- }
-
- return _data[static_cast<size_t>((position + i - 1))];
-}
-
-size_t ANTLRInputStream::LT(ssize_t i) { return LA(i); }
-
-size_t ANTLRInputStream::index() { return p; }
-
-size_t ANTLRInputStream::size() { return _data.size(); }
-
-// Mark/release do nothing. We have entire buffer.
-ssize_t ANTLRInputStream::mark() { return -1; }
-
-void ANTLRInputStream::release(ssize_t /* marker */) {}
-
-void ANTLRInputStream::seek(size_t index) {
- if (index <= p) {
- p = index; // just jump; don't update stream state (line, ...)
- return;
- }
- // seek forward, consume until p hits index or n (whichever comes first)
- index = std::min(index, _data.size());
- while (p < index) {
- consume();
- }
-}
-
-std::string ANTLRInputStream::getText(const Interval& interval) {
- if (interval.a < 0 || interval.b < 0) {
- return "";
- }
-
- size_t start = static_cast<size_t>(interval.a);
- size_t stop = static_cast<size_t>(interval.b);
-
- if (stop >= _data.size()) {
- stop = _data.size() - 1;
- }
-
- size_t count = stop - start + 1;
- if (start >= _data.size()) {
- return "";
- }
-
- return antlrcpp::utf32_to_utf8(_data.substr(start, count));
-}
-
-std::string ANTLRInputStream::getSourceName() const {
- if (name.empty()) {
- return IntStream::UNKNOWN_SOURCE_NAME;
- }
- return name;
-}
-
-std::string ANTLRInputStream::toString() const {
- return antlrcpp::utf32_to_utf8(_data);
-}
-
-void ANTLRInputStream::InitializeInstanceFields() { p = 0; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRInputStream.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRInputStream.h
deleted file mode 100644
index 4a7af5152a..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ANTLRInputStream.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "CharStream.h"
-
-namespace antlr4 {
-
-// Vacuum all input from a stream and then treat it
-// like a string. Can also pass in a string or char[] to use.
-// Input is expected to be encoded in UTF-8 and converted to UTF-32 internally.
-class ANTLR4CPP_PUBLIC ANTLRInputStream : public CharStream {
- protected:
- /// The data being scanned.
- // UTF-32
- UTF32String _data;
-
- /// 0..n-1 index into string of next char </summary>
- size_t p;
-
- public:
- /// What is name or source of this char stream?
- std::string name;
-
- ANTLRInputStream(const std::string& input = "");
- ANTLRInputStream(const char data_[], size_t numberOfActualCharsInArray);
- ANTLRInputStream(std::istream& stream);
-
- virtual void load(const std::string& input);
- virtual void load(std::istream& stream);
-
- /// Reset the stream so that it's in the same state it was
- /// when the object was created *except* the data array is not
- /// touched.
- virtual void reset();
- virtual void consume() override;
- virtual size_t LA(ssize_t i) override;
- virtual size_t LT(ssize_t i);
-
- /// <summary>
- /// Return the current input symbol index 0..n where n indicates the
- /// last symbol has been read. The index is the index of char to
- /// be returned from LA(1).
- /// </summary>
- virtual size_t index() override;
- virtual size_t size() override;
-
- /// <summary>
- /// mark/release do nothing; we have entire buffer </summary>
- virtual ssize_t mark() override;
- virtual void release(ssize_t marker) override;
-
- /// <summary>
- /// consume() ahead until p==index; can't just set p=index as we must
- /// update line and charPositionInLine. If we seek backwards, just set p
- /// </summary>
- virtual void seek(size_t index) override;
- virtual std::string getText(const misc::Interval& interval) override;
- virtual std::string getSourceName() const override;
- virtual std::string toString() const override;
-
- private:
- void InitializeInstanceFields();
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BailErrorStrategy.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BailErrorStrategy.cpp
deleted file mode 100644
index bc341d7ba1..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BailErrorStrategy.cpp
+++ /dev/null
@@ -1,59 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-#include "InputMismatchException.h"
-#include "Parser.h"
-#include "ParserRuleContext.h"
-
-#include "BailErrorStrategy.h"
-
-using namespace antlr4;
-
-void BailErrorStrategy::recover(Parser* recognizer, std::exception_ptr e) {
- ParserRuleContext* context = recognizer->getContext();
- do {
- context->exception = e;
- if (context->parent == nullptr) break;
- context = static_cast<ParserRuleContext*>(context->parent);
- } while (true);
-
- try {
- std::rethrow_exception(
- e); // Throw the exception to be able to catch and rethrow nested.
-#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023026
- } catch (RecognitionException& inner) {
- throw ParseCancellationException(inner.what());
-#else
- } catch (RecognitionException& /*inner*/) {
- std::throw_with_nested(ParseCancellationException());
-#endif
- }
-}
-
-Token* BailErrorStrategy::recoverInline(Parser* recognizer) {
- InputMismatchException e(recognizer);
- std::exception_ptr exception = std::make_exception_ptr(e);
-
- ParserRuleContext* context = recognizer->getContext();
- do {
- context->exception = exception;
- if (context->parent == nullptr) break;
- context = static_cast<ParserRuleContext*>(context->parent);
- } while (true);
-
- try {
- throw e;
-#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023026
- } catch (InputMismatchException& inner) {
- throw ParseCancellationException(inner.what());
-#else
- } catch (InputMismatchException& /*inner*/) {
- std::throw_with_nested(ParseCancellationException());
-#endif
- }
-}
-
-void BailErrorStrategy::sync(Parser* /*recognizer*/) {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BailErrorStrategy.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BailErrorStrategy.h
deleted file mode 100644
index a21189cd60..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BailErrorStrategy.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "DefaultErrorStrategy.h"
-
-namespace antlr4 {
-
-/**
- * This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
- * by immediately canceling the parse operation with a
- * {@link ParseCancellationException}. The implementation ensures that the
- * {@link ParserRuleContext#exception} field is set for all parse tree nodes
- * that were not completed prior to encountering the error.
- *
- * <p>
- * This error strategy is useful in the following scenarios.</p>
- *
- * <ul>
- * <li><strong>Two-stage parsing:</strong> This error strategy allows the first
- * stage of two-stage parsing to immediately terminate if an error is
- * encountered, and immediately fall back to the second stage. In addition to
- * avoiding wasted work by attempting to recover from errors here, the empty
- * implementation of {@link BailErrorStrategy#sync} improves the performance of
- * the first stage.</li>
- * <li><strong>Silent validation:</strong> When syntax errors are not being
- * reported or logged, and the parse result is simply ignored if errors occur,
- * the {@link BailErrorStrategy} avoids wasting work on recovering from errors
- * when the result will be ignored either way.</li>
- * </ul>
- *
- * <p>
- * {@code myparser.setErrorHandler(new BailErrorStrategy());}</p>
- *
- * @see Parser#setErrorHandler(ANTLRErrorStrategy)
- */
-class ANTLR4CPP_PUBLIC BailErrorStrategy : public DefaultErrorStrategy {
- /// <summary>
- /// Instead of recovering from exception {@code e}, re-throw it wrapped
- /// in a <seealso cref="ParseCancellationException"/> so it is not caught by
- /// the rule function catches. Use <seealso cref="Exception#getCause()"/> to
- /// get the original <seealso cref="RecognitionException"/>.
- /// </summary>
- public:
- virtual void recover(Parser* recognizer, std::exception_ptr e) override;
-
- /// Make sure we don't attempt to recover inline; if the parser
- /// successfully recovers, it won't throw an exception.
- virtual Token* recoverInline(Parser* recognizer) override;
-
- /// <summary>
- /// Make sure we don't attempt to recover from problems in subrules.
- /// </summary>
- virtual void sync(Parser* recognizer) override;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BaseErrorListener.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BaseErrorListener.cpp
deleted file mode 100644
index 66bd1a1444..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BaseErrorListener.cpp
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "BaseErrorListener.h"
-#include "RecognitionException.h"
-
-using namespace antlr4;
-
-void BaseErrorListener::syntaxError(Recognizer* /*recognizer*/,
- Token* /*offendingSymbol*/, size_t /*line*/,
- size_t /*charPositionInLine*/,
- const std::string& /*msg*/,
- std::exception_ptr /*e*/) {}
-
-void BaseErrorListener::reportAmbiguity(Parser* /*recognizer*/,
- const dfa::DFA& /*dfa*/,
- size_t /*startIndex*/,
- size_t /*stopIndex*/, bool /*exact*/,
- const antlrcpp::BitSet& /*ambigAlts*/,
- atn::ATNConfigSet* /*configs*/) {}
-
-void BaseErrorListener::reportAttemptingFullContext(
- Parser* /*recognizer*/, const dfa::DFA& /*dfa*/, size_t /*startIndex*/,
- size_t /*stopIndex*/, const antlrcpp::BitSet& /*conflictingAlts*/,
- atn::ATNConfigSet* /*configs*/) {}
-
-void BaseErrorListener::reportContextSensitivity(
- Parser* /*recognizer*/, const dfa::DFA& /*dfa*/, size_t /*startIndex*/,
- size_t /*stopIndex*/, size_t /*prediction*/,
- atn::ATNConfigSet* /*configs*/) {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BaseErrorListener.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BaseErrorListener.h
deleted file mode 100644
index 60934524f4..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BaseErrorListener.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "ANTLRErrorListener.h"
-
-namespace antlrcpp {
-class BitSet;
-}
-
-namespace antlr4 {
-
-/**
- * Provides an empty default implementation of {@link ANTLRErrorListener}. The
- * default implementation of each method does nothing, but can be overridden as
- * necessary.
- */
-class ANTLR4CPP_PUBLIC BaseErrorListener : public ANTLRErrorListener {
- virtual void syntaxError(Recognizer* recognizer, Token* offendingSymbol,
- size_t line, size_t charPositionInLine,
- const std::string& msg,
- std::exception_ptr e) override;
-
- virtual void reportAmbiguity(Parser* recognizer, const dfa::DFA& dfa,
- size_t startIndex, size_t stopIndex, bool exact,
- const antlrcpp::BitSet& ambigAlts,
- atn::ATNConfigSet* configs) override;
-
- virtual void reportAttemptingFullContext(
- Parser* recognizer, const dfa::DFA& dfa, size_t startIndex,
- size_t stopIndex, const antlrcpp::BitSet& conflictingAlts,
- atn::ATNConfigSet* configs) override;
-
- virtual void reportContextSensitivity(Parser* recognizer, const dfa::DFA& dfa,
- size_t startIndex, size_t stopIndex,
- size_t prediction,
- atn::ATNConfigSet* configs) override;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BufferedTokenStream.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BufferedTokenStream.cpp
deleted file mode 100644
index 479da94404..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BufferedTokenStream.cpp
+++ /dev/null
@@ -1,407 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-#include "Lexer.h"
-#include "RuleContext.h"
-#include "WritableToken.h"
-#include "misc/Interval.h"
-#include "support/CPPUtils.h"
-
-#include "BufferedTokenStream.h"
-
-using namespace antlr4;
-using namespace antlrcpp;
-
-BufferedTokenStream::BufferedTokenStream(TokenSource* tokenSource)
- : _tokenSource(tokenSource) {
- InitializeInstanceFields();
-}
-
-TokenSource* BufferedTokenStream::getTokenSource() const {
- return _tokenSource;
-}
-
-size_t BufferedTokenStream::index() { return _p; }
-
-ssize_t BufferedTokenStream::mark() { return 0; }
-
-void BufferedTokenStream::release(ssize_t /*marker*/) {
- // no resources to release
-}
-
-void BufferedTokenStream::reset() { seek(0); }
-
-void BufferedTokenStream::seek(size_t index) {
- lazyInit();
- _p = adjustSeekIndex(index);
-}
-
-size_t BufferedTokenStream::size() { return _tokens.size(); }
-
-void BufferedTokenStream::consume() {
- bool skipEofCheck = false;
- if (!_needSetup) {
- if (_fetchedEOF) {
- // the last token in tokens is EOF. skip check if p indexes any
- // fetched token except the last.
- skipEofCheck = _p < _tokens.size() - 1;
- } else {
- // no EOF token in tokens. skip check if p indexes a fetched token.
- skipEofCheck = _p < _tokens.size();
- }
- } else {
- // not yet initialized
- skipEofCheck = false;
- }
-
- if (!skipEofCheck && LA(1) == Token::EOF) {
- throw IllegalStateException("cannot consume EOF");
- }
-
- if (sync(_p + 1)) {
- _p = adjustSeekIndex(_p + 1);
- }
-}
-
-bool BufferedTokenStream::sync(size_t i) {
- if (i + 1 < _tokens.size()) return true;
- size_t n = i - _tokens.size() + 1; // how many more elements we need?
-
- if (n > 0) {
- size_t fetched = fetch(n);
- return fetched >= n;
- }
-
- return true;
-}
-
-size_t BufferedTokenStream::fetch(size_t n) {
- if (_fetchedEOF) {
- return 0;
- }
-
- size_t i = 0;
- while (i < n) {
- std::unique_ptr<Token> t(_tokenSource->nextToken());
-
- if (is<WritableToken*>(t.get())) {
- (static_cast<WritableToken*>(t.get()))->setTokenIndex(_tokens.size());
- }
-
- _tokens.push_back(std::move(t));
- ++i;
-
- if (_tokens.back()->getType() == Token::EOF) {
- _fetchedEOF = true;
- break;
- }
- }
-
- return i;
-}
-
-Token* BufferedTokenStream::get(size_t i) const {
- if (i >= _tokens.size()) {
- throw IndexOutOfBoundsException(
- std::string("token index ") + std::to_string(i) +
- std::string(" out of range 0..") + std::to_string(_tokens.size() - 1));
- }
- return _tokens[i].get();
-}
-
-std::vector<Token*> BufferedTokenStream::get(size_t start, size_t stop) {
- std::vector<Token*> subset;
-
- lazyInit();
-
- if (_tokens.empty()) {
- return subset;
- }
-
- if (stop >= _tokens.size()) {
- stop = _tokens.size() - 1;
- }
- for (size_t i = start; i <= stop; i++) {
- Token* t = _tokens[i].get();
- if (t->getType() == Token::EOF) {
- break;
- }
- subset.push_back(t);
- }
- return subset;
-}
-
-size_t BufferedTokenStream::LA(ssize_t i) { return LT(i)->getType(); }
-
-Token* BufferedTokenStream::LB(size_t k) {
- if (k > _p) {
- return nullptr;
- }
- return _tokens[_p - k].get();
-}
-
-Token* BufferedTokenStream::LT(ssize_t k) {
- lazyInit();
- if (k == 0) {
- return nullptr;
- }
- if (k < 0) {
- return LB(-k);
- }
-
- size_t i = _p + k - 1;
- sync(i);
- if (i >= _tokens.size()) { // return EOF token
- // EOF must be last token
- return _tokens.back().get();
- }
-
- return _tokens[i].get();
-}
-
-ssize_t BufferedTokenStream::adjustSeekIndex(size_t i) { return i; }
-
-void BufferedTokenStream::lazyInit() {
- if (_needSetup) {
- setup();
- }
-}
-
-void BufferedTokenStream::setup() {
- _needSetup = false;
- sync(0);
- _p = adjustSeekIndex(0);
-}
-
-void BufferedTokenStream::setTokenSource(TokenSource* tokenSource) {
- _tokenSource = tokenSource;
- _tokens.clear();
- _fetchedEOF = false;
- _needSetup = true;
-}
-
-std::vector<Token*> BufferedTokenStream::getTokens() {
- std::vector<Token*> result;
- for (auto& t : _tokens) result.push_back(t.get());
- return result;
-}
-
-std::vector<Token*> BufferedTokenStream::getTokens(size_t start, size_t stop) {
- return getTokens(start, stop, std::vector<size_t>());
-}
-
-std::vector<Token*> BufferedTokenStream::getTokens(
- size_t start, size_t stop, const std::vector<size_t>& types) {
- lazyInit();
- if (stop >= _tokens.size() || start >= _tokens.size()) {
- throw IndexOutOfBoundsException(
- std::string("start ") + std::to_string(start) +
- std::string(" or stop ") + std::to_string(stop) +
- std::string(" not in 0..") + std::to_string(_tokens.size() - 1));
- }
-
- std::vector<Token*> filteredTokens;
-
- if (start > stop) {
- return filteredTokens;
- }
-
- for (size_t i = start; i <= stop; i++) {
- Token* tok = _tokens[i].get();
-
- if (types.empty() ||
- std::find(types.begin(), types.end(), tok->getType()) != types.end()) {
- filteredTokens.push_back(tok);
- }
- }
- return filteredTokens;
-}
-
-std::vector<Token*> BufferedTokenStream::getTokens(size_t start, size_t stop,
- size_t ttype) {
- std::vector<size_t> s;
- s.push_back(ttype);
- return getTokens(start, stop, s);
-}
-
-ssize_t BufferedTokenStream::nextTokenOnChannel(size_t i, size_t channel) {
- sync(i);
- if (i >= size()) {
- return size() - 1;
- }
-
- Token* token = _tokens[i].get();
- while (token->getChannel() != channel) {
- if (token->getType() == Token::EOF) {
- return i;
- }
- i++;
- sync(i);
- token = _tokens[i].get();
- }
- return i;
-}
-
-ssize_t BufferedTokenStream::previousTokenOnChannel(size_t i, size_t channel) {
- sync(i);
- if (i >= size()) {
- // the EOF token is on every channel
- return size() - 1;
- }
-
- while (true) {
- Token* token = _tokens[i].get();
- if (token->getType() == Token::EOF || token->getChannel() == channel) {
- return i;
- }
-
- if (i == 0) break;
- i--;
- }
- return i;
-}
-
-std::vector<Token*> BufferedTokenStream::getHiddenTokensToRight(
- size_t tokenIndex, ssize_t channel) {
- lazyInit();
- if (tokenIndex >= _tokens.size()) {
- throw IndexOutOfBoundsException(std::to_string(tokenIndex) + " not in 0.." +
- std::to_string(_tokens.size() - 1));
- }
-
- ssize_t nextOnChannel =
- nextTokenOnChannel(tokenIndex + 1, Lexer::DEFAULT_TOKEN_CHANNEL);
- size_t to;
- size_t from = tokenIndex + 1;
- // if none onchannel to right, nextOnChannel=-1 so set to = last token
- if (nextOnChannel == -1) {
- to = static_cast<ssize_t>(size() - 1);
- } else {
- to = nextOnChannel;
- }
-
- return filterForChannel(from, to, channel);
-}
-
-std::vector<Token*> BufferedTokenStream::getHiddenTokensToRight(
- size_t tokenIndex) {
- return getHiddenTokensToRight(tokenIndex, -1);
-}
-
-std::vector<Token*> BufferedTokenStream::getHiddenTokensToLeft(
- size_t tokenIndex, ssize_t channel) {
- lazyInit();
- if (tokenIndex >= _tokens.size()) {
- throw IndexOutOfBoundsException(std::to_string(tokenIndex) + " not in 0.." +
- std::to_string(_tokens.size() - 1));
- }
-
- if (tokenIndex == 0) {
- // Obviously no tokens can appear before the first token.
- return {};
- }
-
- ssize_t prevOnChannel =
- previousTokenOnChannel(tokenIndex - 1, Lexer::DEFAULT_TOKEN_CHANNEL);
- if (prevOnChannel == static_cast<ssize_t>(tokenIndex - 1)) {
- return {};
- }
- // if none onchannel to left, prevOnChannel=-1 then from=0
- size_t from = static_cast<size_t>(prevOnChannel + 1);
- size_t to = tokenIndex - 1;
-
- return filterForChannel(from, to, channel);
-}
-
-std::vector<Token*> BufferedTokenStream::getHiddenTokensToLeft(
- size_t tokenIndex) {
- return getHiddenTokensToLeft(tokenIndex, -1);
-}
-
-std::vector<Token*> BufferedTokenStream::filterForChannel(size_t from,
- size_t to,
- ssize_t channel) {
- std::vector<Token*> hidden;
- for (size_t i = from; i <= to; i++) {
- Token* t = _tokens[i].get();
- if (channel == -1) {
- if (t->getChannel() != Lexer::DEFAULT_TOKEN_CHANNEL) {
- hidden.push_back(t);
- }
- } else {
- if (t->getChannel() == static_cast<size_t>(channel)) {
- hidden.push_back(t);
- }
- }
- }
-
- return hidden;
-}
-
-bool BufferedTokenStream::isInitialized() const { return !_needSetup; }
-
-/**
- * Get the text of all tokens in this buffer.
- */
-std::string BufferedTokenStream::getSourceName() const {
- return _tokenSource->getSourceName();
-}
-
-std::string BufferedTokenStream::getText() {
- return getText(misc::Interval(0U, size() - 1));
-}
-
-std::string BufferedTokenStream::getText(const misc::Interval& interval) {
- lazyInit();
- fill();
- size_t start = interval.a;
- size_t stop = interval.b;
- if (start == INVALID_INDEX || stop == INVALID_INDEX) {
- return "";
- }
- if (stop >= _tokens.size()) {
- stop = _tokens.size() - 1;
- }
-
- std::stringstream ss;
- for (size_t i = start; i <= stop; i++) {
- Token* t = _tokens[i].get();
- if (t->getType() == Token::EOF) {
- break;
- }
- ss << t->getText();
- }
- return ss.str();
-}
-
-std::string BufferedTokenStream::getText(RuleContext* ctx) {
- return getText(ctx->getSourceInterval());
-}
-
-std::string BufferedTokenStream::getText(Token* start, Token* stop) {
- if (start != nullptr && stop != nullptr) {
- return getText(
- misc::Interval(start->getTokenIndex(), stop->getTokenIndex()));
- }
-
- return "";
-}
-
-void BufferedTokenStream::fill() {
- lazyInit();
- const size_t blockSize = 1000;
- while (true) {
- size_t fetched = fetch(blockSize);
- if (fetched < blockSize) {
- return;
- }
- }
-}
-
-void BufferedTokenStream::InitializeInstanceFields() {
- _needSetup = true;
- _fetchedEOF = false;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BufferedTokenStream.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BufferedTokenStream.h
deleted file mode 100644
index 320d1248cd..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/BufferedTokenStream.h
+++ /dev/null
@@ -1,207 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "TokenStream.h"
-
-namespace antlr4 {
-
-/**
- * This implementation of {@link TokenStream} loads tokens from a
- * {@link TokenSource} on-demand, and places the tokens in a buffer to provide
- * access to any previous token by index.
- *
- * <p>
- * This token stream ignores the value of {@link Token#getChannel}. If your
- * parser requires the token stream filter tokens to only those on a particular
- * channel, such as {@link Token#DEFAULT_CHANNEL} or
- * {@link Token#HIDDEN_CHANNEL}, use a filtering token stream such a
- * {@link CommonTokenStream}.</p>
- */
-class ANTLR4CPP_PUBLIC BufferedTokenStream : public TokenStream {
- public:
- BufferedTokenStream(TokenSource* tokenSource);
- BufferedTokenStream(const BufferedTokenStream& other) = delete;
-
- BufferedTokenStream& operator=(const BufferedTokenStream& other) = delete;
-
- virtual TokenSource* getTokenSource() const override;
- virtual size_t index() override;
- virtual ssize_t mark() override;
-
- virtual void release(ssize_t marker) override;
- virtual void reset();
- virtual void seek(size_t index) override;
-
- virtual size_t size() override;
- virtual void consume() override;
-
- virtual Token* get(size_t i) const override;
-
- /// Get all tokens from start..stop inclusively.
- virtual std::vector<Token*> get(size_t start, size_t stop);
-
- virtual size_t LA(ssize_t i) override;
- virtual Token* LT(ssize_t k) override;
-
- /// Reset this token stream by setting its token source.
- virtual void setTokenSource(TokenSource* tokenSource);
- virtual std::vector<Token*> getTokens();
- virtual std::vector<Token*> getTokens(size_t start, size_t stop);
-
- /// <summary>
- /// Given a start and stop index, return a List of all tokens in
- /// the token type BitSet. Return null if no tokens were found. This
- /// method looks at both on and off channel tokens.
- /// </summary>
- virtual std::vector<Token*> getTokens(size_t start, size_t stop,
- const std::vector<size_t>& types);
- virtual std::vector<Token*> getTokens(size_t start, size_t stop,
- size_t ttype);
-
- /// Collect all tokens on specified channel to the right of
- /// the current token up until we see a token on DEFAULT_TOKEN_CHANNEL or
- /// EOF. If channel is -1, find any non default channel token.
- virtual std::vector<Token*> getHiddenTokensToRight(size_t tokenIndex,
- ssize_t channel);
-
- /// <summary>
- /// Collect all hidden tokens (any off-default channel) to the right of
- /// the current token up until we see a token on DEFAULT_TOKEN_CHANNEL
- /// or EOF.
- /// </summary>
- virtual std::vector<Token*> getHiddenTokensToRight(size_t tokenIndex);
-
- /// <summary>
- /// Collect all tokens on specified channel to the left of
- /// the current token up until we see a token on DEFAULT_TOKEN_CHANNEL.
- /// If channel is -1, find any non default channel token.
- /// </summary>
- virtual std::vector<Token*> getHiddenTokensToLeft(size_t tokenIndex,
- ssize_t channel);
-
- /// <summary>
- /// Collect all hidden tokens (any off-default channel) to the left of
- /// the current token up until we see a token on DEFAULT_TOKEN_CHANNEL.
- /// </summary>
- virtual std::vector<Token*> getHiddenTokensToLeft(size_t tokenIndex);
-
- virtual std::string getSourceName() const override;
- virtual std::string getText() override;
- virtual std::string getText(const misc::Interval& interval) override;
- virtual std::string getText(RuleContext* ctx) override;
- virtual std::string getText(Token* start, Token* stop) override;
-
- /// Get all tokens from lexer until EOF.
- virtual void fill();
-
- protected:
- /**
- * The {@link TokenSource} from which tokens for this stream are fetched.
- */
- TokenSource* _tokenSource;
-
- /**
- * A collection of all tokens fetched from the token source. The list is
- * considered a complete view of the input once {@link #fetchedEOF} is set
- * to {@code true}.
- */
- std::vector<std::unique_ptr<Token>> _tokens;
-
- /**
- * The index into {@link #tokens} of the current token (next token to
- * {@link #consume}). {@link #tokens}{@code [}{@link #p}{@code ]} should be
- * {@link #LT LT(1)}.
- *
- * <p>This field is set to -1 when the stream is first constructed or when
- * {@link #setTokenSource} is called, indicating that the first token has
- * not yet been fetched from the token source. For additional information,
- * see the documentation of {@link IntStream} for a description of
- * Initializing Methods.</p>
- */
- // ml: since -1 requires to make this member signed for just this single
- // aspect we use a member _needSetup instead.
- // Use bool isInitialized() to find out if this stream has started
- // reading.
- size_t _p;
-
- /**
- * Indicates whether the {@link Token#EOF} token has been fetched from
- * {@link #tokenSource} and added to {@link #tokens}. This field improves
- * performance for the following cases:
- *
- * <ul>
- * <li>{@link #consume}: The lookahead check in {@link #consume} to prevent
- * consuming the EOF symbol is optimized by checking the values of
- * {@link #fetchedEOF} and {@link #p} instead of calling {@link #LA}.</li>
- * <li>{@link #fetch}: The check to prevent adding multiple EOF symbols into
- * {@link #tokens} is trivial with this field.</li>
- * <ul>
- */
- bool _fetchedEOF;
-
- /// <summary>
- /// Make sure index {@code i} in tokens has a token.
- /// </summary>
- /// <returns> {@code true} if a token is located at index {@code i}, otherwise
- /// {@code false}. </returns>
- /// <seealso cref= #get(int i) </seealso>
- virtual bool sync(size_t i);
-
- /// <summary>
- /// Add {@code n} elements to buffer.
- /// </summary>
- /// <returns> The actual number of elements added to the buffer. </returns>
- virtual size_t fetch(size_t n);
-
- virtual Token* LB(size_t k);
-
- /// Allowed derived classes to modify the behavior of operations which change
- /// the current stream position by adjusting the target token index of a seek
- /// operation. The default implementation simply returns {@code i}. If an
- /// exception is thrown in this method, the current stream index should not be
- /// changed.
- /// <p/>
- /// For example, <seealso cref="CommonTokenStream"/> overrides this method to
- /// ensure that the seek target is always an on-channel token.
- ///
- /// <param name="i"> The target token index. </param>
- /// <returns> The adjusted target token index. </returns>
- virtual ssize_t adjustSeekIndex(size_t i);
- void lazyInit();
- virtual void setup();
-
- /**
- * Given a starting index, return the index of the next token on channel.
- * Return {@code i} if {@code tokens[i]} is on channel. Return the index of
- * the EOF token if there are no tokens on channel between {@code i} and
- * EOF.
- */
- virtual ssize_t nextTokenOnChannel(size_t i, size_t channel);
-
- /**
- * Given a starting index, return the index of the previous token on
- * channel. Return {@code i} if {@code tokens[i]} is on channel. Return -1
- * if there are no tokens on channel between {@code i} and 0.
- *
- * <p>
- * If {@code i} specifies an index at or after the EOF token, the EOF token
- * index is returned. This is due to the fact that the EOF token is treated
- * as though it were on every channel.</p>
- */
- virtual ssize_t previousTokenOnChannel(size_t i, size_t channel);
-
- virtual std::vector<Token*> filterForChannel(size_t from, size_t to,
- ssize_t channel);
-
- bool isInitialized() const;
-
- private:
- bool _needSetup;
- void InitializeInstanceFields();
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CharStream.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CharStream.cpp
deleted file mode 100644
index f1dc415a56..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CharStream.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "CharStream.h"
-
-using namespace antlr4;
-
-CharStream::~CharStream() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CharStream.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CharStream.h
deleted file mode 100644
index 08baff935d..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CharStream.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "IntStream.h"
-#include "misc/Interval.h"
-
-namespace antlr4 {
-
-/// A source of characters for an ANTLR lexer.
-class ANTLR4CPP_PUBLIC CharStream : public IntStream {
- public:
- virtual ~CharStream();
-
- /// This method returns the text for a range of characters within this input
- /// stream. This method is guaranteed to not throw an exception if the
- /// specified interval lies entirely within a marked range. For more
- /// information about marked ranges, see IntStream::mark.
- ///
- /// <param name="interval"> an interval within the stream </param>
- /// <returns> the text of the specified interval
- /// </returns>
- /// <exception cref="NullPointerException"> if {@code interval} is {@code
- /// null} </exception> <exception cref="IllegalArgumentException"> if {@code
- /// interval.a < 0}, or if
- /// {@code interval.b < interval.a - 1}, or if {@code interval.b} lies at or
- /// past the end of the stream </exception>
- /// <exception cref="UnsupportedOperationException"> if the stream does not
- /// support getting the text of the specified interval </exception>
- virtual std::string getText(const misc::Interval& interval) = 0;
-
- virtual std::string toString() const = 0;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonToken.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonToken.cpp
deleted file mode 100644
index 0005eaffb4..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonToken.cpp
+++ /dev/null
@@ -1,170 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "CharStream.h"
-#include "Recognizer.h"
-#include "TokenSource.h"
-#include "Vocabulary.h"
-
-#include "misc/Interval.h"
-
-#include "support/CPPUtils.h"
-#include "support/StringUtils.h"
-
-#include "CommonToken.h"
-
-using namespace antlr4;
-using namespace antlr4::misc;
-
-using namespace antlrcpp;
-
-const std::pair<TokenSource*, CharStream*> CommonToken::EMPTY_SOURCE(nullptr,
- nullptr);
-
-CommonToken::CommonToken(size_t type) {
- InitializeInstanceFields();
- _type = type;
-}
-
-CommonToken::CommonToken(std::pair<TokenSource*, CharStream*> source,
- size_t type, size_t channel, size_t start,
- size_t stop) {
- InitializeInstanceFields();
- _source = source;
- _type = type;
- _channel = channel;
- _start = start;
- _stop = stop;
- if (_source.first != nullptr) {
- _line = static_cast<int>(source.first->getLine());
- _charPositionInLine = source.first->getCharPositionInLine();
- }
-}
-
-CommonToken::CommonToken(size_t type, const std::string& text) {
- InitializeInstanceFields();
- _type = type;
- _channel = DEFAULT_CHANNEL;
- _text = text;
- _source = EMPTY_SOURCE;
-}
-
-CommonToken::CommonToken(Token* oldToken) {
- InitializeInstanceFields();
- _type = oldToken->getType();
- _line = oldToken->getLine();
- _index = oldToken->getTokenIndex();
- _charPositionInLine = oldToken->getCharPositionInLine();
- _channel = oldToken->getChannel();
- _start = oldToken->getStartIndex();
- _stop = oldToken->getStopIndex();
-
- if (is<CommonToken*>(oldToken)) {
- _text = (static_cast<CommonToken*>(oldToken))->_text;
- _source = (static_cast<CommonToken*>(oldToken))->_source;
- } else {
- _text = oldToken->getText();
- _source = {oldToken->getTokenSource(), oldToken->getInputStream()};
- }
-}
-
-size_t CommonToken::getType() const { return _type; }
-
-void CommonToken::setLine(size_t line) { _line = line; }
-
-std::string CommonToken::getText() const {
- if (!_text.empty()) {
- return _text;
- }
-
- CharStream* input = getInputStream();
- if (input == nullptr) {
- return "";
- }
- size_t n = input->size();
- if (_start < n && _stop < n) {
- return input->getText(misc::Interval(_start, _stop));
- } else {
- return "<EOF>";
- }
-}
-
-void CommonToken::setText(const std::string& text) { _text = text; }
-
-size_t CommonToken::getLine() const { return _line; }
-
-size_t CommonToken::getCharPositionInLine() const {
- return _charPositionInLine;
-}
-
-void CommonToken::setCharPositionInLine(size_t charPositionInLine) {
- _charPositionInLine = charPositionInLine;
-}
-
-size_t CommonToken::getChannel() const { return _channel; }
-
-void CommonToken::setChannel(size_t channel) { _channel = channel; }
-
-void CommonToken::setType(size_t type) { _type = type; }
-
-size_t CommonToken::getStartIndex() const { return _start; }
-
-void CommonToken::setStartIndex(size_t start) { _start = start; }
-
-size_t CommonToken::getStopIndex() const { return _stop; }
-
-void CommonToken::setStopIndex(size_t stop) { _stop = stop; }
-
-size_t CommonToken::getTokenIndex() const { return _index; }
-
-void CommonToken::setTokenIndex(size_t index) { _index = index; }
-
-antlr4::TokenSource* CommonToken::getTokenSource() const {
- return _source.first;
-}
-
-antlr4::CharStream* CommonToken::getInputStream() const {
- return _source.second;
-}
-
-std::string CommonToken::toString() const { return toString(nullptr); }
-
-std::string CommonToken::toString(Recognizer* r) const {
- std::stringstream ss;
-
- std::string channelStr;
- if (_channel > 0) {
- channelStr = ",channel=" + std::to_string(_channel);
- }
- std::string txt = getText();
- if (!txt.empty()) {
- antlrcpp::replaceAll(txt, "\n", "\\n");
- antlrcpp::replaceAll(txt, "\r", "\\r");
- antlrcpp::replaceAll(txt, "\t", "\\t");
- } else {
- txt = "<no text>";
- }
-
- std::string typeString = std::to_string(symbolToNumeric(_type));
- if (r != nullptr) typeString = r->getVocabulary().getDisplayName(_type);
-
- ss << "[@" << symbolToNumeric(getTokenIndex()) << ","
- << symbolToNumeric(_start) << ":" << symbolToNumeric(_stop) << "='" << txt
- << "',<" << typeString << ">" << channelStr << "," << _line << ":"
- << getCharPositionInLine() << "]";
-
- return ss.str();
-}
-
-void CommonToken::InitializeInstanceFields() {
- _type = 0;
- _line = 0;
- _charPositionInLine = INVALID_INDEX;
- _channel = DEFAULT_CHANNEL;
- _index = INVALID_INDEX;
- _start = 0;
- _stop = 0;
- _source = EMPTY_SOURCE;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonToken.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonToken.h
deleted file mode 100644
index d05222d67f..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonToken.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "WritableToken.h"
-
-namespace antlr4 {
-
-class ANTLR4CPP_PUBLIC CommonToken : public WritableToken {
- protected:
- /**
- * An empty {@link Pair} which is used as the default value of
- * {@link #source} for tokens that do not have a source.
- */
- static const std::pair<TokenSource*, CharStream*> EMPTY_SOURCE;
-
- /**
- * This is the backing field for {@link #getType} and {@link #setType}.
- */
- size_t _type;
-
- /**
- * This is the backing field for {@link #getLine} and {@link #setLine}.
- */
- size_t _line;
-
- /**
- * This is the backing field for {@link #getCharPositionInLine} and
- * {@link #setCharPositionInLine}.
- */
- size_t _charPositionInLine; // set to invalid position
-
- /**
- * This is the backing field for {@link #getChannel} and
- * {@link #setChannel}.
- */
- size_t _channel;
-
- /**
- * This is the backing field for {@link #getTokenSource} and
- * {@link #getInputStream}.
- *
- * <p>
- * These properties share a field to reduce the memory footprint of
- * {@link CommonToken}. Tokens created by a {@link CommonTokenFactory} from
- * the same source and input stream share a reference to the same
- * {@link Pair} containing these values.</p>
- */
-
- std::pair<TokenSource*, CharStream*> _source; // ml: pure references, usually
- // from statically allocated
- // classes.
-
- /**
- * This is the backing field for {@link #getText} when the token text is
- * explicitly set in the constructor or via {@link #setText}.
- *
- * @see #getText()
- */
- std::string _text;
-
- /**
- * This is the backing field for {@link #getTokenIndex} and
- * {@link #setTokenIndex}.
- */
- size_t _index;
-
- /**
- * This is the backing field for {@link #getStartIndex} and
- * {@link #setStartIndex}.
- */
- size_t _start;
-
- /**
- * This is the backing field for {@link #getStopIndex} and
- * {@link #setStopIndex}.
- */
- size_t _stop;
-
- public:
- /**
- * Constructs a new {@link CommonToken} with the specified token type.
- *
- * @param type The token type.
- */
- CommonToken(size_t type);
- CommonToken(std::pair<TokenSource*, CharStream*> source, size_t type,
- size_t channel, size_t start, size_t stop);
-
- /**
- * Constructs a new {@link CommonToken} with the specified token type and
- * text.
- *
- * @param type The token type.
- * @param text The text of the token.
- */
- CommonToken(size_t type, const std::string& text);
-
- /**
- * Constructs a new {@link CommonToken} as a copy of another {@link Token}.
- *
- * <p>
- * If {@code oldToken} is also a {@link CommonToken} instance, the newly
- * constructed token will share a reference to the {@link #text} field and
- * the {@link Pair} stored in {@link #source}. Otherwise, {@link #text} will
- * be assigned the result of calling {@link #getText}, and {@link #source}
- * will be constructed from the result of {@link Token#getTokenSource} and
- * {@link Token#getInputStream}.</p>
- *
- * @param oldToken The token to copy.
- */
- CommonToken(Token* oldToken);
-
- virtual size_t getType() const override;
-
- /**
- * Explicitly set the text for this token. If {code text} is not
- * {@code null}, then {@link #getText} will return this value rather than
- * extracting the text from the input.
- *
- * @param text The explicit text of the token, or {@code null} if the text
- * should be obtained from the input along with the start and stop indexes
- * of the token.
- */
- virtual void setText(const std::string& text) override;
- virtual std::string getText() const override;
-
- virtual void setLine(size_t line) override;
- virtual size_t getLine() const override;
-
- virtual size_t getCharPositionInLine() const override;
- virtual void setCharPositionInLine(size_t charPositionInLine) override;
-
- virtual size_t getChannel() const override;
- virtual void setChannel(size_t channel) override;
-
- virtual void setType(size_t type) override;
-
- virtual size_t getStartIndex() const override;
- virtual void setStartIndex(size_t start);
-
- virtual size_t getStopIndex() const override;
- virtual void setStopIndex(size_t stop);
-
- virtual size_t getTokenIndex() const override;
- virtual void setTokenIndex(size_t index) override;
-
- virtual TokenSource* getTokenSource() const override;
- virtual CharStream* getInputStream() const override;
-
- virtual std::string toString() const override;
-
- virtual std::string toString(Recognizer* r) const;
-
- private:
- void InitializeInstanceFields();
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonTokenFactory.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonTokenFactory.cpp
deleted file mode 100644
index b1d5159974..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonTokenFactory.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "CharStream.h"
-#include "CommonToken.h"
-#include "misc/Interval.h"
-
-#include "CommonTokenFactory.h"
-
-using namespace antlr4;
-
-const Ref<TokenFactory<CommonToken>> CommonTokenFactory::DEFAULT =
- std::make_shared<CommonTokenFactory>();
-
-CommonTokenFactory::CommonTokenFactory(bool copyText_) : copyText(copyText_) {}
-
-CommonTokenFactory::CommonTokenFactory() : CommonTokenFactory(false) {}
-
-std::unique_ptr<CommonToken> CommonTokenFactory::create(
- std::pair<TokenSource*, CharStream*> source, size_t type,
- const std::string& text, size_t channel, size_t start, size_t stop,
- size_t line, size_t charPositionInLine) {
- std::unique_ptr<CommonToken> t(
- new CommonToken(source, type, channel, start, stop));
- t->setLine(line);
- t->setCharPositionInLine(charPositionInLine);
- if (text != "") {
- t->setText(text);
- } else if (copyText && source.second != nullptr) {
- t->setText(source.second->getText(misc::Interval(start, stop)));
- }
-
- return t;
-}
-
-std::unique_ptr<CommonToken> CommonTokenFactory::create(
- size_t type, const std::string& text) {
- return std::unique_ptr<CommonToken>(new CommonToken(type, text));
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonTokenFactory.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonTokenFactory.h
deleted file mode 100644
index ebb725d118..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonTokenFactory.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "TokenFactory.h"
-
-namespace antlr4 {
-
-/**
- * This default implementation of {@link TokenFactory} creates
- * {@link CommonToken} objects.
- */
-class ANTLR4CPP_PUBLIC CommonTokenFactory : public TokenFactory<CommonToken> {
- public:
- /**
- * The default {@link CommonTokenFactory} instance.
- *
- * <p>
- * This token factory does not explicitly copy token text when constructing
- * tokens.</p>
- */
- static const Ref<TokenFactory<CommonToken>> DEFAULT;
-
- protected:
- /**
- * Indicates whether {@link CommonToken#setText} should be called after
- * constructing tokens to explicitly set the text. This is useful for cases
- * where the input stream might not be able to provide arbitrary substrings
- * of text from the input after the lexer creates a token (e.g. the
- * implementation of {@link CharStream#getText} in
- * {@link UnbufferedCharStream} throws an
- * {@link UnsupportedOperationException}). Explicitly setting the token text
- * allows {@link Token#getText} to be called at any time regardless of the
- * input stream implementation.
- *
- * <p>
- * The default value is {@code false} to avoid the performance and memory
- * overhead of copying text for every token unless explicitly requested.</p>
- */
- const bool copyText;
-
- public:
- /**
- * Constructs a {@link CommonTokenFactory} with the specified value for
- * {@link #copyText}.
- *
- * <p>
- * When {@code copyText} is {@code false}, the {@link #DEFAULT} instance
- * should be used instead of constructing a new instance.</p>
- *
- * @param copyText The value for {@link #copyText}.
- */
- CommonTokenFactory(bool copyText);
-
- /**
- * Constructs a {@link CommonTokenFactory} with {@link #copyText} set to
- * {@code false}.
- *
- * <p>
- * The {@link #DEFAULT} instance should be used instead of calling this
- * directly.</p>
- */
- CommonTokenFactory();
-
- virtual std::unique_ptr<CommonToken> create(
- std::pair<TokenSource*, CharStream*> source, size_t type,
- const std::string& text, size_t channel, size_t start, size_t stop,
- size_t line, size_t charPositionInLine) override;
-
- virtual std::unique_ptr<CommonToken> create(size_t type,
- const std::string& text) override;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonTokenStream.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonTokenStream.cpp
deleted file mode 100644
index 54a1734afb..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonTokenStream.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Token.h"
-
-#include "CommonTokenStream.h"
-
-using namespace antlr4;
-
-CommonTokenStream::CommonTokenStream(TokenSource* tokenSource)
- : CommonTokenStream(tokenSource, Token::DEFAULT_CHANNEL) {}
-
-CommonTokenStream::CommonTokenStream(TokenSource* tokenSource, size_t channel_)
- : BufferedTokenStream(tokenSource), channel(channel_) {}
-
-ssize_t CommonTokenStream::adjustSeekIndex(size_t i) {
- return nextTokenOnChannel(i, channel);
-}
-
-Token* CommonTokenStream::LB(size_t k) {
- if (k == 0 || k > _p) {
- return nullptr;
- }
-
- ssize_t i = static_cast<ssize_t>(_p);
- size_t n = 1;
- // find k good tokens looking backwards
- while (n <= k) {
- // skip off-channel tokens
- i = previousTokenOnChannel(i - 1, channel);
- n++;
- }
- if (i < 0) {
- return nullptr;
- }
-
- return _tokens[i].get();
-}
-
-Token* CommonTokenStream::LT(ssize_t k) {
- lazyInit();
- if (k == 0) {
- return nullptr;
- }
- if (k < 0) {
- return LB(static_cast<size_t>(-k));
- }
- size_t i = _p;
- ssize_t n = 1; // we know tokens[p] is a good one
- // find k good tokens
- while (n < k) {
- // skip off-channel tokens, but make sure to not look past EOF
- if (sync(i + 1)) {
- i = nextTokenOnChannel(i + 1, channel);
- }
- n++;
- }
-
- return _tokens[i].get();
-}
-
-int CommonTokenStream::getNumberOfOnChannelTokens() {
- int n = 0;
- fill();
- for (size_t i = 0; i < _tokens.size(); i++) {
- Token* t = _tokens[i].get();
- if (t->getChannel() == channel) {
- n++;
- }
- if (t->getType() == Token::EOF) {
- break;
- }
- }
- return n;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonTokenStream.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonTokenStream.h
deleted file mode 100644
index 0de2defa07..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/CommonTokenStream.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "BufferedTokenStream.h"
-
-namespace antlr4 {
-
-/**
- * This class extends {@link BufferedTokenStream} with functionality to filter
- * token streams to tokens on a particular channel (tokens where
- * {@link Token#getChannel} returns a particular value).
- *
- * <p>
- * This token stream provides access to all tokens by index or when calling
- * methods like {@link #getText}. The channel filtering is only used for code
- * accessing tokens via the lookahead methods {@link #LA}, {@link #LT}, and
- * {@link #LB}.</p>
- *
- * <p>
- * By default, tokens are placed on the default channel
- * ({@link Token#DEFAULT_CHANNEL}), but may be reassigned by using the
- * {@code ->channel(HIDDEN)} lexer command, or by using an embedded action to
- * call {@link Lexer#setChannel}.
- * </p>
- *
- * <p>
- * Note: lexer rules which use the {@code ->skip} lexer command or call
- * {@link Lexer#skip} do not produce tokens at all, so input text matched by
- * such a rule will not be available as part of the token stream, regardless of
- * channel.</p>
- */
-class ANTLR4CPP_PUBLIC CommonTokenStream : public BufferedTokenStream {
- public:
- /**
- * Constructs a new {@link CommonTokenStream} using the specified token
- * source and the default token channel ({@link Token#DEFAULT_CHANNEL}).
- *
- * @param tokenSource The token source.
- */
- CommonTokenStream(TokenSource* tokenSource);
-
- /**
- * Constructs a new {@link CommonTokenStream} using the specified token
- * source and filtering tokens to the specified channel. Only tokens whose
- * {@link Token#getChannel} matches {@code channel} or have the
- * {@link Token#getType} equal to {@link Token#EOF} will be returned by the
- * token stream lookahead methods.
- *
- * @param tokenSource The token source.
- * @param channel The channel to use for filtering tokens.
- */
- CommonTokenStream(TokenSource* tokenSource, size_t channel);
-
- virtual Token* LT(ssize_t k) override;
-
- /// Count EOF just once.
- virtual int getNumberOfOnChannelTokens();
-
- protected:
- /**
- * Specifies the channel to use for filtering tokens.
- *
- * <p>
- * The default value is {@link Token#DEFAULT_CHANNEL}, which matches the
- * default channel assigned to tokens created by the lexer.</p>
- */
- size_t channel;
-
- virtual ssize_t adjustSeekIndex(size_t i) override;
-
- virtual Token* LB(size_t k) override;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ConsoleErrorListener.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ConsoleErrorListener.cpp
deleted file mode 100644
index dc87366701..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ConsoleErrorListener.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ConsoleErrorListener.h"
-
-using namespace antlr4;
-
-ConsoleErrorListener ConsoleErrorListener::INSTANCE;
-
-void ConsoleErrorListener::syntaxError(Recognizer* /*recognizer*/,
- Token* /*offendingSymbol*/, size_t line,
- size_t charPositionInLine,
- const std::string& msg,
- std::exception_ptr /*e*/) {
- std::cerr << "line " << line << ":" << charPositionInLine << " " << msg
- << std::endl;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ConsoleErrorListener.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ConsoleErrorListener.h
deleted file mode 100644
index 7f904d5af3..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ConsoleErrorListener.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "BaseErrorListener.h"
-
-namespace antlr4 {
-
-class ANTLR4CPP_PUBLIC ConsoleErrorListener : public BaseErrorListener {
- public:
- /**
- * Provides a default instance of {@link ConsoleErrorListener}.
- */
- static ConsoleErrorListener INSTANCE;
-
- /**
- * {@inheritDoc}
- *
- * <p>
- * This implementation prints messages to {@link System#err} containing the
- * values of {@code line}, {@code charPositionInLine}, and {@code msg} using
- * the following format.</p>
- *
- * <pre>
- * line <em>line</em>:<em>charPositionInLine</em> <em>msg</em>
- * </pre>
- */
- virtual void syntaxError(Recognizer* recognizer, Token* offendingSymbol,
- size_t line, size_t charPositionInLine,
- const std::string& msg,
- std::exception_ptr e) override;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/DefaultErrorStrategy.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/DefaultErrorStrategy.cpp
deleted file mode 100644
index 42d929713c..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/DefaultErrorStrategy.cpp
+++ /dev/null
@@ -1,355 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "CommonToken.h"
-#include "FailedPredicateException.h"
-#include "InputMismatchException.h"
-#include "NoViableAltException.h"
-#include "Parser.h"
-#include "ParserRuleContext.h"
-#include "Vocabulary.h"
-#include "atn/ATN.h"
-#include "atn/ATNState.h"
-#include "atn/ParserATNSimulator.h"
-#include "atn/RuleTransition.h"
-#include "misc/IntervalSet.h"
-#include "support/StringUtils.h"
-
-#include "DefaultErrorStrategy.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-using namespace antlrcpp;
-
-DefaultErrorStrategy::DefaultErrorStrategy() { InitializeInstanceFields(); }
-
-DefaultErrorStrategy::~DefaultErrorStrategy() {}
-
-void DefaultErrorStrategy::reset(Parser* recognizer) {
- _errorSymbols.clear();
- endErrorCondition(recognizer);
-}
-
-void DefaultErrorStrategy::beginErrorCondition(Parser* /*recognizer*/) {
- errorRecoveryMode = true;
-}
-
-bool DefaultErrorStrategy::inErrorRecoveryMode(Parser* /*recognizer*/) {
- return errorRecoveryMode;
-}
-
-void DefaultErrorStrategy::endErrorCondition(Parser* /*recognizer*/) {
- errorRecoveryMode = false;
- lastErrorIndex = -1;
-}
-
-void DefaultErrorStrategy::reportMatch(Parser* recognizer) {
- endErrorCondition(recognizer);
-}
-
-void DefaultErrorStrategy::reportError(Parser* recognizer,
- const RecognitionException& e) {
- // If we've already reported an error and have not matched a token
- // yet successfully, don't report any errors.
- if (inErrorRecoveryMode(recognizer)) {
- return; // don't report spurious errors
- }
-
- beginErrorCondition(recognizer);
- if (is<const NoViableAltException*>(&e)) {
- reportNoViableAlternative(recognizer,
- static_cast<const NoViableAltException&>(e));
- } else if (is<const InputMismatchException*>(&e)) {
- reportInputMismatch(recognizer,
- static_cast<const InputMismatchException&>(e));
- } else if (is<const FailedPredicateException*>(&e)) {
- reportFailedPredicate(recognizer,
- static_cast<const FailedPredicateException&>(e));
- } else if (is<const RecognitionException*>(&e)) {
- recognizer->notifyErrorListeners(e.getOffendingToken(), e.what(),
- std::current_exception());
- }
-}
-
-void DefaultErrorStrategy::recover(Parser* recognizer,
- std::exception_ptr /*e*/) {
- if (lastErrorIndex ==
- static_cast<int>(recognizer->getInputStream()->index()) &&
- lastErrorStates.contains(recognizer->getState())) {
- // uh oh, another error at same token index and previously-visited
- // state in ATN; must be a case where LT(1) is in the recovery
- // token set so nothing got consumed. Consume a single token
- // at least to prevent an infinite loop; this is a failsafe.
- recognizer->consume();
- }
- lastErrorIndex = static_cast<int>(recognizer->getInputStream()->index());
- lastErrorStates.add(recognizer->getState());
- misc::IntervalSet followSet = getErrorRecoverySet(recognizer);
- consumeUntil(recognizer, followSet);
-}
-
-void DefaultErrorStrategy::sync(Parser* recognizer) {
- atn::ATNState* s = recognizer->getInterpreter<atn::ATNSimulator>()
- ->atn.states[recognizer->getState()];
-
- // If already recovering, don't try to sync
- if (inErrorRecoveryMode(recognizer)) {
- return;
- }
-
- TokenStream* tokens = recognizer->getTokenStream();
- size_t la = tokens->LA(1);
-
- // try cheaper subset first; might get lucky. seems to shave a wee bit off
- auto nextTokens = recognizer->getATN().nextTokens(s);
- if (nextTokens.contains(Token::EPSILON) || nextTokens.contains(la)) {
- return;
- }
-
- switch (s->getStateType()) {
- case atn::ATNState::BLOCK_START:
- case atn::ATNState::STAR_BLOCK_START:
- case atn::ATNState::PLUS_BLOCK_START:
- case atn::ATNState::STAR_LOOP_ENTRY:
- // report error and recover if possible
- if (singleTokenDeletion(recognizer) != nullptr) {
- return;
- }
-
- throw InputMismatchException(recognizer);
-
- case atn::ATNState::PLUS_LOOP_BACK:
- case atn::ATNState::STAR_LOOP_BACK: {
- reportUnwantedToken(recognizer);
- misc::IntervalSet expecting = recognizer->getExpectedTokens();
- misc::IntervalSet whatFollowsLoopIterationOrRule =
- expecting.Or(getErrorRecoverySet(recognizer));
- consumeUntil(recognizer, whatFollowsLoopIterationOrRule);
- } break;
-
- default:
- // do nothing if we can't identify the exact kind of ATN state
- break;
- }
-}
-
-void DefaultErrorStrategy::reportNoViableAlternative(
- Parser* recognizer, const NoViableAltException& e) {
- TokenStream* tokens = recognizer->getTokenStream();
- std::string input;
- if (tokens != nullptr) {
- if (e.getStartToken()->getType() == Token::EOF) {
- input = "<EOF>";
- } else {
- input = tokens->getText(e.getStartToken(), e.getOffendingToken());
- }
- } else {
- input = "<unknown input>";
- }
- std::string msg = "no viable alternative at input " + escapeWSAndQuote(input);
- recognizer->notifyErrorListeners(e.getOffendingToken(), msg,
- std::make_exception_ptr(e));
-}
-
-void DefaultErrorStrategy::reportInputMismatch(
- Parser* recognizer, const InputMismatchException& e) {
- std::string msg = "mismatched input " +
- getTokenErrorDisplay(e.getOffendingToken()) +
- " expecting " +
- e.getExpectedTokens().toString(recognizer->getVocabulary());
- recognizer->notifyErrorListeners(e.getOffendingToken(), msg,
- std::make_exception_ptr(e));
-}
-
-void DefaultErrorStrategy::reportFailedPredicate(
- Parser* recognizer, const FailedPredicateException& e) {
- const std::string& ruleName =
- recognizer->getRuleNames()[recognizer->getContext()->getRuleIndex()];
- std::string msg = "rule " + ruleName + " " + e.what();
- recognizer->notifyErrorListeners(e.getOffendingToken(), msg,
- std::make_exception_ptr(e));
-}
-
-void DefaultErrorStrategy::reportUnwantedToken(Parser* recognizer) {
- if (inErrorRecoveryMode(recognizer)) {
- return;
- }
-
- beginErrorCondition(recognizer);
-
- Token* t = recognizer->getCurrentToken();
- std::string tokenName = getTokenErrorDisplay(t);
- misc::IntervalSet expecting = getExpectedTokens(recognizer);
-
- std::string msg = "extraneous input " + tokenName + " expecting " +
- expecting.toString(recognizer->getVocabulary());
- recognizer->notifyErrorListeners(t, msg, nullptr);
-}
-
-void DefaultErrorStrategy::reportMissingToken(Parser* recognizer) {
- if (inErrorRecoveryMode(recognizer)) {
- return;
- }
-
- beginErrorCondition(recognizer);
-
- Token* t = recognizer->getCurrentToken();
- misc::IntervalSet expecting = getExpectedTokens(recognizer);
- std::string expectedText = expecting.toString(recognizer->getVocabulary());
- std::string msg =
- "missing " + expectedText + " at " + getTokenErrorDisplay(t);
-
- recognizer->notifyErrorListeners(t, msg, nullptr);
-}
-
-Token* DefaultErrorStrategy::recoverInline(Parser* recognizer) {
- // Single token deletion.
- Token* matchedSymbol = singleTokenDeletion(recognizer);
- if (matchedSymbol) {
- // We have deleted the extra token.
- // Now, move past ttype token as if all were ok.
- recognizer->consume();
- return matchedSymbol;
- }
-
- // Single token insertion.
- if (singleTokenInsertion(recognizer)) {
- return getMissingSymbol(recognizer);
- }
-
- // Even that didn't work; must throw the exception.
- throw InputMismatchException(recognizer);
-}
-
-bool DefaultErrorStrategy::singleTokenInsertion(Parser* recognizer) {
- ssize_t currentSymbolType = recognizer->getInputStream()->LA(1);
-
- // if current token is consistent with what could come after current
- // ATN state, then we know we're missing a token; error recovery
- // is free to conjure up and insert the missing token
- atn::ATNState* currentState = recognizer->getInterpreter<atn::ATNSimulator>()
- ->atn.states[recognizer->getState()];
- atn::ATNState* next = currentState->transitions[0]->target;
- const atn::ATN& atn = recognizer->getInterpreter<atn::ATNSimulator>()->atn;
- misc::IntervalSet expectingAtLL2 =
- atn.nextTokens(next, recognizer->getContext());
- if (expectingAtLL2.contains(currentSymbolType)) {
- reportMissingToken(recognizer);
- return true;
- }
- return false;
-}
-
-Token* DefaultErrorStrategy::singleTokenDeletion(Parser* recognizer) {
- size_t nextTokenType = recognizer->getInputStream()->LA(2);
- misc::IntervalSet expecting = getExpectedTokens(recognizer);
- if (expecting.contains(nextTokenType)) {
- reportUnwantedToken(recognizer);
- recognizer
- ->consume(); // simply delete extra token
- // we want to return the token we're actually matching
- Token* matchedSymbol = recognizer->getCurrentToken();
- reportMatch(recognizer); // we know current token is correct
- return matchedSymbol;
- }
- return nullptr;
-}
-
-Token* DefaultErrorStrategy::getMissingSymbol(Parser* recognizer) {
- Token* currentSymbol = recognizer->getCurrentToken();
- misc::IntervalSet expecting = getExpectedTokens(recognizer);
- size_t expectedTokenType = expecting.getMinElement(); // get any element
- std::string tokenText;
- if (expectedTokenType == Token::EOF) {
- tokenText = "<missing EOF>";
- } else {
- tokenText = "<missing " +
- recognizer->getVocabulary().getDisplayName(expectedTokenType) +
- ">";
- }
- Token* current = currentSymbol;
- Token* lookback = recognizer->getTokenStream()->LT(-1);
- if (current->getType() == Token::EOF && lookback != nullptr) {
- current = lookback;
- }
-
- _errorSymbols.push_back(recognizer->getTokenFactory()->create(
- {current->getTokenSource(), current->getTokenSource()->getInputStream()},
- expectedTokenType, tokenText, Token::DEFAULT_CHANNEL, INVALID_INDEX,
- INVALID_INDEX, current->getLine(), current->getCharPositionInLine()));
-
- return _errorSymbols.back().get();
-}
-
-misc::IntervalSet DefaultErrorStrategy::getExpectedTokens(Parser* recognizer) {
- return recognizer->getExpectedTokens();
-}
-
-std::string DefaultErrorStrategy::getTokenErrorDisplay(Token* t) {
- if (t == nullptr) {
- return "<no Token>";
- }
- std::string s = getSymbolText(t);
- if (s == "") {
- if (getSymbolType(t) == Token::EOF) {
- s = "<EOF>";
- } else {
- s = "<" + std::to_string(getSymbolType(t)) + ">";
- }
- }
- return escapeWSAndQuote(s);
-}
-
-std::string DefaultErrorStrategy::getSymbolText(Token* symbol) {
- return symbol->getText();
-}
-
-size_t DefaultErrorStrategy::getSymbolType(Token* symbol) {
- return symbol->getType();
-}
-
-std::string DefaultErrorStrategy::escapeWSAndQuote(const std::string& s) const {
- std::string result = s;
- antlrcpp::replaceAll(result, "\n", "\\n");
- antlrcpp::replaceAll(result, "\r", "\\r");
- antlrcpp::replaceAll(result, "\t", "\\t");
- return "'" + result + "'";
-}
-
-misc::IntervalSet DefaultErrorStrategy::getErrorRecoverySet(
- Parser* recognizer) {
- const atn::ATN& atn = recognizer->getInterpreter<atn::ATNSimulator>()->atn;
- RuleContext* ctx = recognizer->getContext();
- misc::IntervalSet recoverSet;
- while (ctx->invokingState != ATNState::INVALID_STATE_NUMBER) {
- // compute what follows who invoked us
- atn::ATNState* invokingState = atn.states[ctx->invokingState];
- atn::RuleTransition* rt =
- dynamic_cast<atn::RuleTransition*>(invokingState->transitions[0]);
- misc::IntervalSet follow = atn.nextTokens(rt->followState);
- recoverSet.addAll(follow);
-
- if (ctx->parent == nullptr) break;
- ctx = static_cast<RuleContext*>(ctx->parent);
- }
- recoverSet.remove(Token::EPSILON);
-
- return recoverSet;
-}
-
-void DefaultErrorStrategy::consumeUntil(Parser* recognizer,
- const misc::IntervalSet& set) {
- size_t ttype = recognizer->getInputStream()->LA(1);
- while (ttype != Token::EOF && !set.contains(ttype)) {
- recognizer->consume();
- ttype = recognizer->getInputStream()->LA(1);
- }
-}
-
-void DefaultErrorStrategy::InitializeInstanceFields() {
- errorRecoveryMode = false;
- lastErrorIndex = -1;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/DefaultErrorStrategy.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/DefaultErrorStrategy.h
deleted file mode 100644
index a3f52981da..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/DefaultErrorStrategy.h
+++ /dev/null
@@ -1,478 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "ANTLRErrorStrategy.h"
-#include "misc/IntervalSet.h"
-
-namespace antlr4 {
-
-/**
- * This is the default implementation of {@link ANTLRErrorStrategy} used for
- * error reporting and recovery in ANTLR parsers.
- */
-class ANTLR4CPP_PUBLIC DefaultErrorStrategy : public ANTLRErrorStrategy {
- public:
- DefaultErrorStrategy();
- DefaultErrorStrategy(DefaultErrorStrategy const& other) = delete;
- virtual ~DefaultErrorStrategy();
-
- DefaultErrorStrategy& operator=(DefaultErrorStrategy const& other) = delete;
-
- protected:
- /**
- * Indicates whether the error strategy is currently "recovering from an
- * error". This is used to suppress reporting multiple error messages while
- * attempting to recover from a detected syntax error.
- *
- * @see #inErrorRecoveryMode
- */
- bool errorRecoveryMode;
-
- /** The index into the input stream where the last error occurred.
- * This is used to prevent infinite loops where an error is found
- * but no token is consumed during recovery...another error is found,
- * ad nauseum. This is a failsafe mechanism to guarantee that at least
- * one token/tree node is consumed for two errors.
- */
- int lastErrorIndex;
-
- misc::IntervalSet lastErrorStates;
-
- /// <summary>
- /// {@inheritDoc}
- /// <p/>
- /// The default implementation simply calls <seealso
- /// cref="#endErrorCondition"/> to ensure that the handler is not in error
- /// recovery mode.
- /// </summary>
- public:
- virtual void reset(Parser* recognizer) override;
-
- /// <summary>
- /// This method is called to enter error recovery mode when a recognition
- /// exception is reported.
- /// </summary>
- /// <param name="recognizer"> the parser instance </param>
- protected:
- virtual void beginErrorCondition(Parser* recognizer);
-
- /// <summary>
- /// {@inheritDoc}
- /// </summary>
- public:
- virtual bool inErrorRecoveryMode(Parser* recognizer) override;
-
- /// <summary>
- /// This method is called to leave error recovery mode after recovering from
- /// a recognition exception.
- /// </summary>
- /// <param name="recognizer"> </param>
- protected:
- virtual void endErrorCondition(Parser* recognizer);
-
- /// <summary>
- /// {@inheritDoc}
- /// <p/>
- /// The default implementation simply calls <seealso
- /// cref="#endErrorCondition"/>.
- /// </summary>
- public:
- virtual void reportMatch(Parser* recognizer) override;
-
- /// {@inheritDoc}
- /// <p/>
- /// The default implementation returns immediately if the handler is already
- /// in error recovery mode. Otherwise, it calls <seealso
- /// cref="#beginErrorCondition"/> and dispatches the reporting task based on
- /// the runtime type of {@code e} according to the following table.
- ///
- /// <ul>
- /// <li><seealso cref="NoViableAltException"/>: Dispatches the call to
- /// <seealso cref="#reportNoViableAlternative"/></li>
- /// <li><seealso cref="InputMismatchException"/>: Dispatches the call to
- /// <seealso cref="#reportInputMismatch"/></li>
- /// <li><seealso cref="FailedPredicateException"/>: Dispatches the call to
- /// <seealso cref="#reportFailedPredicate"/></li>
- /// <li>All other types: calls <seealso cref="Parser#notifyErrorListeners"/>
- /// to report the exception</li>
- /// </ul>
- virtual void reportError(Parser* recognizer,
- const RecognitionException& e) override;
-
- /// <summary>
- /// {@inheritDoc}
- /// <p/>
- /// The default implementation resynchronizes the parser by consuming tokens
- /// until we find one in the resynchronization set--loosely the set of tokens
- /// that can follow the current rule.
- /// </summary>
- virtual void recover(Parser* recognizer, std::exception_ptr e) override;
-
- /**
- * The default implementation of {@link ANTLRErrorStrategy#sync} makes sure
- * that the current lookahead symbol is consistent with what were expecting
- * at this point in the ATN. You can call this anytime but ANTLR only
- * generates code to check before subrules/loops and each iteration.
- *
- * <p>Implements Jim Idle's magic sync mechanism in closures and optional
- * subrules. E.g.,</p>
- *
- * <pre>
- * a : sync ( stuff sync )* ;
- * sync : {consume to what can follow sync} ;
- * </pre>
- *
- * At the start of a sub rule upon error, {@link #sync} performs single
- * token deletion, if possible. If it can't do that, it bails on the current
- * rule and uses the default error recovery, which consumes until the
- * resynchronization set of the current rule.
- *
- * <p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
- * with an empty alternative), then the expected set includes what follows
- * the subrule.</p>
- *
- * <p>During loop iteration, it consumes until it sees a token that can start
- * a sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
- * stay in the loop as long as possible.</p>
- *
- * <p><strong>ORIGINS</strong></p>
- *
- * <p>Previous versions of ANTLR did a poor job of their recovery within
- * loops. A single mismatch token or missing token would force the parser to
- * bail out of the entire rules surrounding the loop. So, for rule</p>
- *
- * <pre>
- * classDef : 'class' ID '{' member* '}'
- * </pre>
- *
- * input with an extra token between members would force the parser to
- * consume until it found the next class definition rather than the next
- * member definition of the current class.
- *
- * <p>This functionality cost a little bit of effort because the parser has to
- * compare token set at the start of the loop and at each iteration. If for
- * some reason speed is suffering for you, you can turn off this
- * functionality by simply overriding this method as a blank { }.</p>
- */
- virtual void sync(Parser* recognizer) override;
-
- /// <summary>
- /// This is called by <seealso cref="#reportError"/> when the exception is a
- /// <seealso cref="NoViableAltException"/>.
- /// </summary>
- /// <seealso cref= #reportError
- /// </seealso>
- /// <param name="recognizer"> the parser instance </param>
- /// <param name="e"> the recognition exception </param>
- protected:
- virtual void reportNoViableAlternative(Parser* recognizer,
- const NoViableAltException& e);
-
- /// <summary>
- /// This is called by <seealso cref="#reportError"/> when the exception is an
- /// <seealso cref="InputMismatchException"/>.
- /// </summary>
- /// <seealso cref= #reportError
- /// </seealso>
- /// <param name="recognizer"> the parser instance </param>
- /// <param name="e"> the recognition exception </param>
- virtual void reportInputMismatch(Parser* recognizer,
- const InputMismatchException& e);
-
- /// <summary>
- /// This is called by <seealso cref="#reportError"/> when the exception is a
- /// <seealso cref="FailedPredicateException"/>.
- /// </summary>
- /// <seealso cref= #reportError
- /// </seealso>
- /// <param name="recognizer"> the parser instance </param>
- /// <param name="e"> the recognition exception </param>
- virtual void reportFailedPredicate(Parser* recognizer,
- const FailedPredicateException& e);
-
- /**
- * This method is called to report a syntax error which requires the removal
- * of a token from the input stream. At the time this method is called, the
- * erroneous symbol is current {@code LT(1)} symbol and has not yet been
- * removed from the input stream. When this method returns,
- * {@code recognizer} is in error recovery mode.
- *
- * <p>This method is called when {@link #singleTokenDeletion} identifies
- * single-token deletion as a viable recovery strategy for a mismatched
- * input error.</p>
- *
- * <p>The default implementation simply returns if the handler is already in
- * error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
- * enter error recovery mode, followed by calling
- * {@link Parser#notifyErrorListeners}.</p>
- *
- * @param recognizer the parser instance
- */
- virtual void reportUnwantedToken(Parser* recognizer);
-
- /**
- * This method is called to report a syntax error which requires the
- * insertion of a missing token into the input stream. At the time this
- * method is called, the missing token has not yet been inserted. When this
- * method returns, {@code recognizer} is in error recovery mode.
- *
- * <p>This method is called when {@link #singleTokenInsertion} identifies
- * single-token insertion as a viable recovery strategy for a mismatched
- * input error.</p>
- *
- * <p>The default implementation simply returns if the handler is already in
- * error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
- * enter error recovery mode, followed by calling
- * {@link Parser#notifyErrorListeners}.</p>
- *
- * @param recognizer the parser instance
- */
- virtual void reportMissingToken(Parser* recognizer);
-
- public:
- /**
- * {@inheritDoc}
- *
- * <p>The default implementation attempts to recover from the mismatched input
- * by using single token insertion and deletion as described below. If the
- * recovery attempt fails, this method throws an
- * {@link InputMismatchException}.</p>
- *
- * <p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
- *
- * <p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
- * right token, however, then assume {@code LA(1)} is some extra spurious
- * token and delete it. Then consume and return the next token (which was
- * the {@code LA(2)} token) as the successful result of the match
- * operation.</p>
- *
- * <p>This recovery strategy is implemented by {@link
- * #singleTokenDeletion}.</p>
- *
- * <p><strong>MISSING TOKEN</strong> (single token insertion)</p>
- *
- * <p>If current token (at {@code LA(1)}) is consistent with what could come
- * after the expected {@code LA(1)} token, then assume the token is missing
- * and use the parser's {@link TokenFactory} to create it on the fly. The
- * "insertion" is performed by returning the created token as the successful
- * result of the match operation.</p>
- *
- * <p>This recovery strategy is implemented by {@link
- * #singleTokenInsertion}.</p>
- *
- * <p><strong>EXAMPLE</strong></p>
- *
- * <p>For example, Input {@code i=(3;} is clearly missing the {@code ')'}.
- * When the parser returns from the nested call to {@code expr}, it will have
- * call chain:</p>
- *
- * <pre>
- * stat &rarr; expr &rarr; atom
- * </pre>
- *
- * and it will be trying to match the {@code ')'} at this point in the
- * derivation:
- *
- * <pre>
- * =&gt; ID '=' '(' INT ')' ('+' atom)* ';'
- * ^
- * </pre>
- *
- * The attempt to match {@code ')'} will fail when it sees {@code ';'} and
- * call {@link #recoverInline}. To recover, it sees that {@code LA(1)==';'}
- * is in the set of tokens that can follow the {@code ')'} token reference
- * in rule {@code atom}. It can assume that you forgot the {@code ')'}.
- */
- virtual Token* recoverInline(Parser* recognizer) override;
-
- /// <summary>
- /// This method implements the single-token insertion inline error recovery
- /// strategy. It is called by <seealso cref="#recoverInline"/> if the
- /// single-token deletion strategy fails to recover from the mismatched input.
- /// If this method returns {@code true}, {@code recognizer} will be in error
- /// recovery mode. <p/> This method determines whether or not single-token
- /// insertion is viable by checking if the {@code LA(1)} input symbol could be
- /// successfully matched if it were instead the {@code LA(2)} symbol. If this
- /// method returns
- /// {@code true}, the caller is responsible for creating and inserting a
- /// token with the correct type to produce this behavior.
- /// </summary>
- /// <param name="recognizer"> the parser instance </param>
- /// <returns> {@code true} if single-token insertion is a viable recovery
- /// strategy for the current mismatched input, otherwise {@code false}
- /// </returns>
- protected:
- virtual bool singleTokenInsertion(Parser* recognizer);
-
- /// <summary>
- /// This method implements the single-token deletion inline error recovery
- /// strategy. It is called by <seealso cref="#recoverInline"/> to attempt to
- /// recover from mismatched input. If this method returns null, the parser and
- /// error handler state will not have changed. If this method returns
- /// non-null,
- /// {@code recognizer} will <em>not</em> be in error recovery mode since the
- /// returned token was a successful match.
- /// <p/>
- /// If the single-token deletion is successful, this method calls
- /// <seealso cref="#reportUnwantedToken"/> to report the error, followed by
- /// <seealso cref="Parser#consume"/> to actually "delete" the extraneous
- /// token. Then, before returning <seealso cref="#reportMatch"/> is called to
- /// signal a successful match.
- /// </summary>
- /// <param name="recognizer"> the parser instance </param>
- /// <returns> the successfully matched <seealso cref="Token"/> instance if
- /// single-token deletion successfully recovers from the mismatched input,
- /// otherwise
- /// {@code null} </returns>
- virtual Token* singleTokenDeletion(Parser* recognizer);
-
- /// <summary>
- /// Conjure up a missing token during error recovery.
- ///
- /// The recognizer attempts to recover from single missing
- /// symbols. But, actions might refer to that missing symbol.
- /// For example, x=ID {f($x);}. The action clearly assumes
- /// that there has been an identifier matched previously and that
- /// $x points at that token. If that token is missing, but
- /// the next token in the stream is what we want we assume that
- /// this token is missing and we keep going. Because we
- /// have to return some token to replace the missing token,
- /// we have to conjure one up. This method gives the user control
- /// over the tokens returned for missing tokens. Mostly,
- /// you will want to create something special for identifier
- /// tokens. For literals such as '{' and ',', the default
- /// action in the parser or tree parser works. It simply creates
- /// a CommonToken of the appropriate type. The text will be the token.
- /// If you change what tokens must be created by the lexer,
- /// override this method to create the appropriate tokens.
- /// </summary>
- virtual Token* getMissingSymbol(Parser* recognizer);
-
- virtual misc::IntervalSet getExpectedTokens(Parser* recognizer);
-
- /// <summary>
- /// How should a token be displayed in an error message? The default
- /// is to display just the text, but during development you might
- /// want to have a lot of information spit out. Override in that case
- /// to use t.toString() (which, for CommonToken, dumps everything about
- /// the token). This is better than forcing you to override a method in
- /// your token objects because you don't have to go modify your lexer
- /// so that it creates a new class.
- /// </summary>
- virtual std::string getTokenErrorDisplay(Token* t);
-
- virtual std::string getSymbolText(Token* symbol);
-
- virtual size_t getSymbolType(Token* symbol);
-
- virtual std::string escapeWSAndQuote(const std::string& s) const;
-
- /* Compute the error recovery set for the current rule. During
- * rule invocation, the parser pushes the set of tokens that can
- * follow that rule reference on the stack; this amounts to
- * computing FIRST of what follows the rule reference in the
- * enclosing rule. See LinearApproximator.FIRST().
- * This local follow set only includes tokens
- * from within the rule; i.e., the FIRST computation done by
- * ANTLR stops at the end of a rule.
- *
- * EXAMPLE
- *
- * When you find a "no viable alt exception", the input is not
- * consistent with any of the alternatives for rule r. The best
- * thing to do is to consume tokens until you see something that
- * can legally follow a call to r *or* any rule that called r.
- * You don't want the exact set of viable next tokens because the
- * input might just be missing a token--you might consume the
- * rest of the input looking for one of the missing tokens.
- *
- * Consider grammar:
- *
- * a : '[' b ']'
- * | '(' b ')'
- * ;
- * b : c '^' INT ;
- * c : ID
- * | INT
- * ;
- *
- * At each rule invocation, the set of tokens that could follow
- * that rule is pushed on a stack. Here are the various
- * context-sensitive follow sets:
- *
- * FOLLOW(b1_in_a) = FIRST(']') = ']'
- * FOLLOW(b2_in_a) = FIRST(')') = ')'
- * FOLLOW(c_in_b) = FIRST('^') = '^'
- *
- * Upon erroneous input "[]", the call chain is
- *
- * a -> b -> c
- *
- * and, hence, the follow context stack is:
- *
- * depth follow set start of rule execution
- * 0 <EOF> a (from main())
- * 1 ']' b
- * 2 '^' c
- *
- * Notice that ')' is not included, because b would have to have
- * been called from a different context in rule a for ')' to be
- * included.
- *
- * For error recovery, we cannot consider FOLLOW(c)
- * (context-sensitive or otherwise). We need the combined set of
- * all context-sensitive FOLLOW sets--the set of all tokens that
- * could follow any reference in the call chain. We need to
- * resync to one of those tokens. Note that FOLLOW(c)='^' and if
- * we resync'd to that token, we'd consume until EOF. We need to
- * sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
- * In this case, for input "[]", LA(1) is ']' and in the set, so we would
- * not consume anything. After printing an error, rule c would
- * return normally. Rule b would not find the required '^' though.
- * At this point, it gets a mismatched token error and throws an
- * exception (since LA(1) is not in the viable following token
- * set). The rule exception handler tries to recover, but finds
- * the same recovery set and doesn't consume anything. Rule b
- * exits normally returning to rule a. Now it finds the ']' (and
- * with the successful match exits errorRecovery mode).
- *
- * So, you can see that the parser walks up the call chain looking
- * for the token that was a member of the recovery set.
- *
- * Errors are not generated in errorRecovery mode.
- *
- * ANTLR's error recovery mechanism is based upon original ideas:
- *
- * "Algorithms + Data Structures = Programs" by Niklaus Wirth
- *
- * and
- *
- * "A note on error recovery in recursive descent parsers":
- * http://portal.acm.org/citation.cfm?id=947902.947905
- *
- * Later, Josef Grosch had some good ideas:
- *
- * "Efficient and Comfortable Error Recovery in Recursive Descent
- * Parsers":
- * ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
- *
- * Like Grosch I implement context-sensitive FOLLOW sets that are combined
- * at run-time upon error to avoid overhead during parsing.
- */
- virtual misc::IntervalSet getErrorRecoverySet(Parser* recognizer);
-
- /// <summary>
- /// Consume tokens until one matches the given token set. </summary>
- virtual void consumeUntil(Parser* recognizer, const misc::IntervalSet& set);
-
- private:
- std::vector<std::unique_ptr<Token>>
- _errorSymbols; // Temporarily created token.
- void InitializeInstanceFields();
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/DiagnosticErrorListener.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/DiagnosticErrorListener.cpp
deleted file mode 100644
index 34306b66fe..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/DiagnosticErrorListener.cpp
+++ /dev/null
@@ -1,101 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Parser.h"
-#include "atn/ATNConfig.h"
-#include "atn/ATNConfigSet.h"
-#include "atn/PredictionContext.h"
-#include "dfa/DFA.h"
-#include "misc/Interval.h"
-
-#include "DiagnosticErrorListener.h"
-
-using namespace antlr4;
-
-DiagnosticErrorListener::DiagnosticErrorListener()
- : DiagnosticErrorListener(true) {}
-
-DiagnosticErrorListener::DiagnosticErrorListener(bool exactOnly_)
- : exactOnly(exactOnly_) {}
-
-void DiagnosticErrorListener::reportAmbiguity(Parser* recognizer,
- const dfa::DFA& dfa,
- size_t startIndex,
- size_t stopIndex, bool exact,
- const antlrcpp::BitSet& ambigAlts,
- atn::ATNConfigSet* configs) {
- if (exactOnly && !exact) {
- return;
- }
-
- std::string decision = getDecisionDescription(recognizer, dfa);
- antlrcpp::BitSet conflictingAlts = getConflictingAlts(ambigAlts, configs);
- std::string text = recognizer->getTokenStream()->getText(
- misc::Interval(startIndex, stopIndex));
- std::string message = "reportAmbiguity d=" + decision +
- ": ambigAlts=" + conflictingAlts.toString() +
- ", input='" + text + "'";
-
- recognizer->notifyErrorListeners(message);
-}
-
-void DiagnosticErrorListener::reportAttemptingFullContext(
- Parser* recognizer, const dfa::DFA& dfa, size_t startIndex,
- size_t stopIndex, const antlrcpp::BitSet& /*conflictingAlts*/,
- atn::ATNConfigSet* /*configs*/) {
- std::string decision = getDecisionDescription(recognizer, dfa);
- std::string text = recognizer->getTokenStream()->getText(
- misc::Interval(startIndex, stopIndex));
- std::string message =
- "reportAttemptingFullContext d=" + decision + ", input='" + text + "'";
- recognizer->notifyErrorListeners(message);
-}
-
-void DiagnosticErrorListener::reportContextSensitivity(
- Parser* recognizer, const dfa::DFA& dfa, size_t startIndex,
- size_t stopIndex, size_t /*prediction*/, atn::ATNConfigSet* /*configs*/) {
- std::string decision = getDecisionDescription(recognizer, dfa);
- std::string text = recognizer->getTokenStream()->getText(
- misc::Interval(startIndex, stopIndex));
- std::string message =
- "reportContextSensitivity d=" + decision + ", input='" + text + "'";
- recognizer->notifyErrorListeners(message);
-}
-
-std::string DiagnosticErrorListener::getDecisionDescription(
- Parser* recognizer, const dfa::DFA& dfa) {
- size_t decision = dfa.decision;
- size_t ruleIndex =
- (reinterpret_cast<atn::ATNState*>(dfa.atnStartState))->ruleIndex;
-
- const std::vector<std::string>& ruleNames = recognizer->getRuleNames();
- if (ruleIndex == INVALID_INDEX || ruleIndex >= ruleNames.size()) {
- return std::to_string(decision);
- }
-
- std::string ruleName = ruleNames[ruleIndex];
- if (ruleName == "" || ruleName.empty()) {
- return std::to_string(decision);
- }
-
- return std::to_string(decision) + " (" + ruleName + ")";
-}
-
-antlrcpp::BitSet DiagnosticErrorListener::getConflictingAlts(
- const antlrcpp::BitSet& reportedAlts, atn::ATNConfigSet* configs) {
- if (reportedAlts.count() > 0) { // Not exactly like the original Java code,
- // but this listener is only used in the
- // TestRig (where it never provides a good
- // alt set), so it's probably ok so.
- return reportedAlts;
- }
-
- antlrcpp::BitSet result;
- for (auto& config : configs->configs) {
- result.set(config->alt);
- }
-
- return result;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/DiagnosticErrorListener.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/DiagnosticErrorListener.h
deleted file mode 100644
index e6214ca775..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/DiagnosticErrorListener.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "BaseErrorListener.h"
-
-namespace antlr4 {
-
-/// <summary>
-/// This implementation of <seealso cref="ANTLRErrorListener"/> can be used to
-/// identify certain potential correctness and performance problems in grammars.
-/// "Reports" are made by calling <seealso cref="Parser#notifyErrorListeners"/>
-/// with the appropriate message.
-///
-/// <ul>
-/// <li><b>Ambiguities</b>: These are cases where more than one path through the
-/// grammar can match the input.</li>
-/// <li><b>Weak context sensitivity</b>: These are cases where full-context
-/// prediction resolved an SLL conflict to a unique alternative which equaled
-/// the minimum alternative of the SLL conflict.</li> <li><b>Strong (forced)
-/// context sensitivity</b>: These are cases where the full-context prediction
-/// resolved an SLL conflict to a unique alternative, <em>and</em> the minimum
-/// alternative of the SLL conflict was found to not be a truly viable
-/// alternative. Two-stage parsing cannot be used for inputs where this
-/// situation occurs.</li>
-/// </ul>
-///
-/// @author Sam Harwell
-/// </summary>
-class ANTLR4CPP_PUBLIC DiagnosticErrorListener : public BaseErrorListener {
- /// <summary>
- /// When {@code true}, only exactly known ambiguities are reported.
- /// </summary>
- protected:
- const bool exactOnly;
-
- /// <summary>
- /// Initializes a new instance of <seealso cref="DiagnosticErrorListener"/>
- /// which only reports exact ambiguities.
- /// </summary>
- public:
- DiagnosticErrorListener();
-
- /// <summary>
- /// Initializes a new instance of <seealso cref="DiagnosticErrorListener"/>,
- /// specifying whether all ambiguities or only exact ambiguities are reported.
- /// </summary>
- /// <param name="exactOnly"> {@code true} to report only exact ambiguities,
- /// otherwise
- /// {@code false} to report all ambiguities. </param>
- DiagnosticErrorListener(bool exactOnly);
-
- virtual void reportAmbiguity(Parser* recognizer, const dfa::DFA& dfa,
- size_t startIndex, size_t stopIndex, bool exact,
- const antlrcpp::BitSet& ambigAlts,
- atn::ATNConfigSet* configs) override;
-
- virtual void reportAttemptingFullContext(
- Parser* recognizer, const dfa::DFA& dfa, size_t startIndex,
- size_t stopIndex, const antlrcpp::BitSet& conflictingAlts,
- atn::ATNConfigSet* configs) override;
-
- virtual void reportContextSensitivity(Parser* recognizer, const dfa::DFA& dfa,
- size_t startIndex, size_t stopIndex,
- size_t prediction,
- atn::ATNConfigSet* configs) override;
-
- protected:
- virtual std::string getDecisionDescription(Parser* recognizer,
- const dfa::DFA& dfa);
-
- /// <summary>
- /// Computes the set of conflicting or ambiguous alternatives from a
- /// configuration set, if that information was not already provided by the
- /// parser.
- /// </summary>
- /// <param name="reportedAlts"> The set of conflicting or ambiguous
- /// alternatives, as reported by the parser. </param> <param name="configs">
- /// The conflicting or ambiguous configuration set. </param> <returns> Returns
- /// {@code reportedAlts} if it is not {@code null}, otherwise returns the set
- /// of alternatives represented in {@code configs}. </returns>
- virtual antlrcpp::BitSet getConflictingAlts(
- const antlrcpp::BitSet& reportedAlts, atn::ATNConfigSet* configs);
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Exceptions.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Exceptions.cpp
deleted file mode 100644
index 07effb0461..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Exceptions.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-
-using namespace antlr4;
-
-RuntimeException::RuntimeException(const std::string& msg)
- : std::exception(), _message(msg) {}
-
-const char* RuntimeException::what() const NOEXCEPT { return _message.c_str(); }
-
-//------------------ IOException
-//---------------------------------------------------------------------------------------
-
-IOException::IOException(const std::string& msg)
- : std::exception(), _message(msg) {}
-
-const char* IOException::what() const NOEXCEPT { return _message.c_str(); }
-
-//------------------ IllegalStateException
-//-----------------------------------------------------------------------------
-
-IllegalStateException::~IllegalStateException() {}
-
-//------------------ IllegalArgumentException
-//--------------------------------------------------------------------------
-
-IllegalArgumentException::~IllegalArgumentException() {}
-
-//------------------ NullPointerException
-//------------------------------------------------------------------------------
-
-NullPointerException::~NullPointerException() {}
-
-//------------------ IndexOutOfBoundsException
-//-------------------------------------------------------------------------
-
-IndexOutOfBoundsException::~IndexOutOfBoundsException() {}
-
-//------------------ UnsupportedOperationException
-//---------------------------------------------------------------------
-
-UnsupportedOperationException::~UnsupportedOperationException() {}
-
-//------------------ EmptyStackException
-//-------------------------------------------------------------------------------
-
-EmptyStackException::~EmptyStackException() {}
-
-//------------------ CancellationException
-//-----------------------------------------------------------------------------
-
-CancellationException::~CancellationException() {}
-
-//------------------ ParseCancellationException
-//------------------------------------------------------------------------
-
-ParseCancellationException::~ParseCancellationException() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Exceptions.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Exceptions.h
deleted file mode 100644
index 9e3cbce4f8..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Exceptions.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-
-// An exception hierarchy modelled loosely after java.lang.* exceptions.
-class ANTLR4CPP_PUBLIC RuntimeException : public std::exception {
- private:
- std::string _message;
-
- public:
- RuntimeException(const std::string& msg = "");
-
- virtual const char* what() const NOEXCEPT override;
-};
-
-class ANTLR4CPP_PUBLIC IllegalStateException : public RuntimeException {
- public:
- IllegalStateException(const std::string& msg = "") : RuntimeException(msg) {}
- IllegalStateException(IllegalStateException const&) = default;
- ~IllegalStateException();
- IllegalStateException& operator=(IllegalStateException const&) = default;
-};
-
-class ANTLR4CPP_PUBLIC IllegalArgumentException : public RuntimeException {
- public:
- IllegalArgumentException(IllegalArgumentException const&) = default;
- IllegalArgumentException(const std::string& msg = "")
- : RuntimeException(msg) {}
- ~IllegalArgumentException();
- IllegalArgumentException& operator=(IllegalArgumentException const&) =
- default;
-};
-
-class ANTLR4CPP_PUBLIC NullPointerException : public RuntimeException {
- public:
- NullPointerException(const std::string& msg = "") : RuntimeException(msg) {}
- NullPointerException(NullPointerException const&) = default;
- ~NullPointerException();
- NullPointerException& operator=(NullPointerException const&) = default;
-};
-
-class ANTLR4CPP_PUBLIC IndexOutOfBoundsException : public RuntimeException {
- public:
- IndexOutOfBoundsException(const std::string& msg = "")
- : RuntimeException(msg) {}
- IndexOutOfBoundsException(IndexOutOfBoundsException const&) = default;
- ~IndexOutOfBoundsException();
- IndexOutOfBoundsException& operator=(IndexOutOfBoundsException const&) =
- default;
-};
-
-class ANTLR4CPP_PUBLIC UnsupportedOperationException : public RuntimeException {
- public:
- UnsupportedOperationException(const std::string& msg = "")
- : RuntimeException(msg) {}
- UnsupportedOperationException(UnsupportedOperationException const&) = default;
- ~UnsupportedOperationException();
- UnsupportedOperationException& operator=(
- UnsupportedOperationException const&) = default;
-};
-
-class ANTLR4CPP_PUBLIC EmptyStackException : public RuntimeException {
- public:
- EmptyStackException(const std::string& msg = "") : RuntimeException(msg) {}
- EmptyStackException(EmptyStackException const&) = default;
- ~EmptyStackException();
- EmptyStackException& operator=(EmptyStackException const&) = default;
-};
-
-// IOException is not a runtime exception (in the java hierarchy).
-// Hence we have to duplicate the RuntimeException implementation.
-class ANTLR4CPP_PUBLIC IOException : public std::exception {
- private:
- std::string _message;
-
- public:
- IOException(const std::string& msg = "");
-
- virtual const char* what() const NOEXCEPT override;
-};
-
-class ANTLR4CPP_PUBLIC CancellationException : public IllegalStateException {
- public:
- CancellationException(const std::string& msg = "")
- : IllegalStateException(msg) {}
- CancellationException(CancellationException const&) = default;
- ~CancellationException();
- CancellationException& operator=(CancellationException const&) = default;
-};
-
-class ANTLR4CPP_PUBLIC ParseCancellationException
- : public CancellationException {
- public:
- ParseCancellationException(const std::string& msg = "")
- : CancellationException(msg) {}
- ParseCancellationException(ParseCancellationException const&) = default;
- ~ParseCancellationException();
- ParseCancellationException& operator=(ParseCancellationException const&) =
- default;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/FailedPredicateException.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/FailedPredicateException.cpp
deleted file mode 100644
index 8699aa3b96..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/FailedPredicateException.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Parser.h"
-#include "atn/ATN.h"
-#include "atn/ATNState.h"
-#include "atn/ParserATNSimulator.h"
-#include "atn/PredicateTransition.h"
-#include "support/CPPUtils.h"
-
-#include "FailedPredicateException.h"
-
-using namespace antlr4;
-using namespace antlrcpp;
-
-FailedPredicateException::FailedPredicateException(Parser* recognizer)
- : FailedPredicateException(recognizer, "", "") {}
-
-FailedPredicateException::FailedPredicateException(Parser* recognizer,
- const std::string& predicate)
- : FailedPredicateException(recognizer, predicate, "") {}
-
-FailedPredicateException::FailedPredicateException(Parser* recognizer,
- const std::string& predicate,
- const std::string& message)
- : RecognitionException(
- !message.empty() ? message : "failed predicate: " + predicate + "?",
- recognizer, recognizer->getInputStream(), recognizer->getContext(),
- recognizer->getCurrentToken()) {
- atn::ATNState* s = recognizer->getInterpreter<atn::ATNSimulator>()
- ->atn.states[recognizer->getState()];
- atn::Transition* transition = s->transitions[0];
- if (is<atn::PredicateTransition*>(transition)) {
- _ruleIndex = static_cast<atn::PredicateTransition*>(transition)->ruleIndex;
- _predicateIndex =
- static_cast<atn::PredicateTransition*>(transition)->predIndex;
- } else {
- _ruleIndex = 0;
- _predicateIndex = 0;
- }
-
- _predicate = predicate;
-}
-
-size_t FailedPredicateException::getRuleIndex() { return _ruleIndex; }
-
-size_t FailedPredicateException::getPredIndex() { return _predicateIndex; }
-
-std::string FailedPredicateException::getPredicate() { return _predicate; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/FailedPredicateException.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/FailedPredicateException.h
deleted file mode 100644
index 0d76ee603f..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/FailedPredicateException.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "RecognitionException.h"
-
-namespace antlr4 {
-
-/// A semantic predicate failed during validation. Validation of predicates
-/// occurs when normally parsing the alternative just like matching a token.
-/// Disambiguating predicate evaluation occurs when we test a predicate during
-/// prediction.
-class ANTLR4CPP_PUBLIC FailedPredicateException : public RecognitionException {
- public:
- FailedPredicateException(Parser* recognizer);
- FailedPredicateException(Parser* recognizer, const std::string& predicate);
- FailedPredicateException(Parser* recognizer, const std::string& predicate,
- const std::string& message);
-
- virtual size_t getRuleIndex();
- virtual size_t getPredIndex();
- virtual std::string getPredicate();
-
- private:
- size_t _ruleIndex;
- size_t _predicateIndex;
- std::string _predicate;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/InputMismatchException.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/InputMismatchException.cpp
deleted file mode 100644
index 9d1c35fba0..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/InputMismatchException.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Parser.h"
-
-#include "InputMismatchException.h"
-
-using namespace antlr4;
-
-InputMismatchException::InputMismatchException(Parser* recognizer)
- : RecognitionException(recognizer, recognizer->getInputStream(),
- recognizer->getContext(),
- recognizer->getCurrentToken()) {}
-
-InputMismatchException::~InputMismatchException() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/InputMismatchException.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/InputMismatchException.h
deleted file mode 100644
index 802a897617..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/InputMismatchException.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "RecognitionException.h"
-
-namespace antlr4 {
-
-/// <summary>
-/// This signifies any kind of mismatched input exceptions such as
-/// when the current input does not match the expected token.
-/// </summary>
-class ANTLR4CPP_PUBLIC InputMismatchException : public RecognitionException {
- public:
- InputMismatchException(Parser* recognizer);
- InputMismatchException(InputMismatchException const&) = default;
- ~InputMismatchException();
- InputMismatchException& operator=(InputMismatchException const&) = default;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/IntStream.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/IntStream.cpp
deleted file mode 100644
index 5408ae50f6..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/IntStream.cpp
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "IntStream.h"
-
-using namespace antlr4;
-
-const std::string IntStream::UNKNOWN_SOURCE_NAME = "<unknown>";
-
-IntStream::~IntStream() = default;
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/IntStream.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/IntStream.h
deleted file mode 100644
index 6a8e9e25b1..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/IntStream.h
+++ /dev/null
@@ -1,222 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-
-/// <summary>
-/// A simple stream of symbols whose values are represented as integers. This
-/// interface provides <em>marked ranges</em> with support for a minimum level
-/// of buffering necessary to implement arbitrary lookahead during prediction.
-/// For more information on marked ranges, see <seealso cref="#mark"/>.
-/// <p/>
-/// <strong>Initializing Methods:</strong> Some methods in this interface have
-/// unspecified behavior if no call to an initializing method has occurred after
-/// the stream was constructed. The following is a list of initializing methods:
-///
-/// <ul>
-/// <li><seealso cref="#LA"/></li>
-/// <li><seealso cref="#consume"/></li>
-/// <li><seealso cref="#size"/></li>
-/// </ul>
-/// </summary>
-class ANTLR4CPP_PUBLIC IntStream {
- public:
- static const size_t EOF = static_cast<size_t>(
- -1); // std::numeric_limits<size_t>::max(); doesn't work in VS 2013
-
- /// The value returned by <seealso cref="#LA LA()"/> when the end of the
- /// stream is reached. No explicit EOF definition. We got EOF on all
- /// platforms.
- // static const size_t _EOF = std::ios::eofbit;
-
- /// <summary>
- /// The value returned by <seealso cref="#getSourceName"/> when the actual
- /// name of the underlying source is not known.
- /// </summary>
- static const std::string UNKNOWN_SOURCE_NAME;
-
- virtual ~IntStream();
-
- /// <summary>
- /// Consumes the current symbol in the stream. This method has the following
- /// effects:
- ///
- /// <ul>
- /// <li><strong>Forward movement:</strong> The value of <seealso
- /// cref="#index index()"/>
- /// before calling this method is less than the value of {@code
- /// index()} after calling this method.</li>
- /// <li><strong>Ordered lookahead:</strong> The value of {@code LA(1)}
- /// before
- /// calling this method becomes the value of {@code LA(-1)} after
- /// calling this method.</li>
- /// </ul>
- ///
- /// Note that calling this method does not guarantee that {@code index()} is
- /// incremented by exactly 1, as that would preclude the ability to implement
- /// filtering streams (e.g. <seealso cref="CommonTokenStream"/> which
- /// distinguishes between "on-channel" and "off-channel" tokens).
- /// </summary>
- /// <exception cref="IllegalStateException"> if an attempt is made to consume
- /// the the end of the stream (i.e. if {@code LA(1)==}<seealso cref="#EOF
- /// EOF"/> before calling
- /// {@code consume}). </exception>
- virtual void consume() = 0;
-
- /// <summary>
- /// Gets the value of the symbol at offset {@code i} from the current
- /// position. When {@code i==1}, this method returns the value of the current
- /// symbol in the stream (which is the next symbol to be consumed). When
- /// {@code i==-1}, this method returns the value of the previously read
- /// symbol in the stream. It is not valid to call this method with
- /// {@code i==0}, but the specific behavior is unspecified because this
- /// method is frequently called from performance-critical code.
- /// <p/>
- /// This method is guaranteed to succeed if any of the following are true:
- ///
- /// <ul>
- /// <li>{@code i>0}</li>
- /// <li>{@code i==-1} and <seealso cref="#index index()"/> returns a value
- /// greater
- /// than the value of {@code index()} after the stream was constructed
- /// and {@code LA(1)} was called in that order. Specifying the current
- /// {@code index()} relative to the index after the stream was created
- /// allows for filtering implementations that do not return every symbol
- /// from the underlying source. Specifying the call to {@code LA(1)}
- /// allows for lazily initialized streams.</li>
- /// <li>{@code LA(i)} refers to a symbol consumed within a marked region
- /// that has not yet been released.</li>
- /// </ul>
- ///
- /// If {@code i} represents a position at or beyond the end of the stream,
- /// this method returns <seealso cref="#EOF"/>.
- /// <p/>
- /// The return value is unspecified if {@code i<0} and fewer than {@code -i}
- /// calls to <seealso cref="#consume consume()"/> have occurred from the
- /// beginning of the stream before calling this method.
- /// </summary>
- /// <exception cref="UnsupportedOperationException"> if the stream does not
- /// support retrieving the value of the specified symbol </exception>
- virtual size_t LA(ssize_t i) = 0;
-
- /// <summary>
- /// A mark provides a guarantee that <seealso cref="#seek seek()"/> operations
- /// will be valid over a "marked range" extending from the index where {@code
- /// mark()} was called to the current <seealso cref="#index index()"/>. This
- /// allows the use of streaming input sources by specifying the minimum
- /// buffering requirements to support arbitrary lookahead during prediction.
- /// <p/>
- /// The returned mark is an opaque handle (type {@code int}) which is passed
- /// to <seealso cref="#release release()"/> when the guarantees provided by
- /// the marked range are no longer necessary. When calls to
- /// {@code mark()}/{@code release()} are nested, the marks must be released
- /// in reverse order of which they were obtained. Since marked regions are
- /// used during performance-critical sections of prediction, the specific
- /// behavior of invalid usage is unspecified (i.e. a mark is not released, or
- /// a mark is released twice, or marks are not released in reverse order from
- /// which they were created).
- /// <p/>
- /// The behavior of this method is unspecified if no call to an
- /// <seealso cref="IntStream initializing method"/> has occurred after this
- /// stream was constructed. <p/> This method does not change the current
- /// position in the input stream. <p/> The following example shows the use of
- /// <seealso cref="#mark mark()"/>, <seealso cref="#release release(mark)"/>,
- /// <seealso cref="#index index()"/>, and <seealso cref="#seek seek(index)"/>
- /// as part of an operation to safely work within a marked region, then
- /// restore the stream position to its original value and release the mark.
- /// <pre>
- /// IntStream stream = ...;
- /// int index = -1;
- /// int mark = stream.mark();
- /// try {
- /// index = stream.index();
- /// // perform work here...
- /// } finally {
- /// if (index != -1) {
- /// stream.seek(index);
- /// }
- /// stream.release(mark);
- /// }
- /// </pre>
- /// </summary>
- /// <returns> An opaque marker which should be passed to
- /// <seealso cref="#release release()"/> when the marked range is no longer
- /// required. </returns>
- virtual ssize_t mark() = 0;
-
- /// <summary>
- /// This method releases a marked range created by a call to
- /// <seealso cref="#mark mark()"/>. Calls to {@code release()} must appear in
- /// the reverse order of the corresponding calls to {@code mark()}. If a mark
- /// is released twice, or if marks are not released in reverse order of the
- /// corresponding calls to {@code mark()}, the behavior is unspecified.
- /// <p/>
- /// For more information and an example, see <seealso cref="#mark"/>.
- /// </summary>
- /// <param name="marker"> A marker returned by a call to {@code mark()}.
- /// </param> <seealso cref= #mark </seealso>
- virtual void release(ssize_t marker) = 0;
-
- /// <summary>
- /// Return the index into the stream of the input symbol referred to by
- /// {@code LA(1)}.
- /// <p/>
- /// The behavior of this method is unspecified if no call to an
- /// <seealso cref="IntStream initializing method"/> has occurred after this
- /// stream was constructed.
- /// </summary>
- virtual size_t index() = 0;
-
- /// <summary>
- /// Set the input cursor to the position indicated by {@code index}. If the
- /// specified index lies past the end of the stream, the operation behaves as
- /// though {@code index} was the index of the EOF symbol. After this method
- /// returns without throwing an exception, the at least one of the following
- /// will be true.
- ///
- /// <ul>
- /// <li><seealso cref="#index index()"/> will return the index of the first
- /// symbol
- /// appearing at or after the specified {@code index}. Specifically,
- /// implementations which filter their sources should automatically
- /// adjust {@code index} forward the minimum amount required for the
- /// operation to target a non-ignored symbol.</li>
- /// <li>{@code LA(1)} returns <seealso cref="#EOF"/></li>
- /// </ul>
- ///
- /// This operation is guaranteed to not throw an exception if {@code index}
- /// lies within a marked region. For more information on marked regions, see
- /// <seealso cref="#mark"/>. The behavior of this method is unspecified if no
- /// call to an <seealso cref="IntStream initializing method"/> has occurred
- /// after this stream was constructed.
- /// </summary>
- /// <param name="index"> The absolute index to seek to.
- /// </param>
- /// <exception cref="IllegalArgumentException"> if {@code index} is less than
- /// 0 </exception> <exception cref="UnsupportedOperationException"> if the
- /// stream does not support seeking to the specified index </exception>
- virtual void seek(size_t index) = 0;
-
- /// <summary>
- /// Returns the total number of symbols in the stream, including a single EOF
- /// symbol.
- /// </summary>
- /// <exception cref="UnsupportedOperationException"> if the size of the stream
- /// is unknown. </exception>
- virtual size_t size() = 0;
-
- /// <summary>
- /// Gets the name of the underlying symbol source. This method returns a
- /// non-null, non-empty string. If such a name is not known, this method
- /// returns <seealso cref="#UNKNOWN_SOURCE_NAME"/>.
- /// </summary>
- virtual std::string getSourceName() const = 0;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/InterpreterRuleContext.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/InterpreterRuleContext.cpp
deleted file mode 100644
index b1369aca18..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/InterpreterRuleContext.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "InterpreterRuleContext.h"
-
-using namespace antlr4;
-
-InterpreterRuleContext::InterpreterRuleContext() : ParserRuleContext() {}
-
-InterpreterRuleContext::InterpreterRuleContext(ParserRuleContext* parent,
- size_t invokingStateNumber,
- size_t ruleIndex)
- : ParserRuleContext(parent, invokingStateNumber), _ruleIndex(ruleIndex) {}
-
-size_t InterpreterRuleContext::getRuleIndex() const { return _ruleIndex; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/InterpreterRuleContext.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/InterpreterRuleContext.h
deleted file mode 100644
index 5aac966f3c..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/InterpreterRuleContext.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "ParserRuleContext.h"
-
-namespace antlr4 {
-
-/**
- * This class extends {@link ParserRuleContext} by allowing the value of
- * {@link #getRuleIndex} to be explicitly set for the context.
- *
- * <p>
- * {@link ParserRuleContext} does not include field storage for the rule index
- * since the context classes created by the code generator override the
- * {@link #getRuleIndex} method to return the correct value for that context.
- * Since the parser interpreter does not use the context classes generated for a
- * parser, this class (with slightly more memory overhead per node) is used to
- * provide equivalent functionality.</p>
- */
-class ANTLR4CPP_PUBLIC InterpreterRuleContext : public ParserRuleContext {
- public:
- InterpreterRuleContext();
-
- /**
- * Constructs a new {@link InterpreterRuleContext} with the specified
- * parent, invoking state, and rule index.
- *
- * @param parent The parent context.
- * @param invokingStateNumber The invoking state number.
- * @param ruleIndex The rule index for the current context.
- */
- InterpreterRuleContext(ParserRuleContext* parent, size_t invokingStateNumber,
- size_t ruleIndex);
-
- virtual size_t getRuleIndex() const override;
-
- protected:
- /** This is the backing field for {@link #getRuleIndex}. */
- const size_t _ruleIndex = INVALID_INDEX;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Lexer.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Lexer.cpp
deleted file mode 100644
index df4e31f965..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Lexer.cpp
+++ /dev/null
@@ -1,274 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ANTLRErrorListener.h"
-#include "CommonToken.h"
-#include "CommonTokenFactory.h"
-#include "Exceptions.h"
-#include "LexerNoViableAltException.h"
-#include "atn/LexerATNSimulator.h"
-#include "misc/Interval.h"
-#include "support/CPPUtils.h"
-#include "support/StringUtils.h"
-
-#include "Lexer.h"
-
-#define DEBUG_LEXER 0
-
-using namespace antlrcpp;
-using namespace antlr4;
-
-Lexer::Lexer() : Recognizer() {
- InitializeInstanceFields();
- _input = nullptr;
-}
-
-Lexer::Lexer(CharStream* input) : Recognizer(), _input(input) {
- InitializeInstanceFields();
-}
-
-void Lexer::reset() {
- // wack Lexer state variables
- _input->seek(0); // rewind the input
-
- _syntaxErrors = 0;
- token.reset();
- type = Token::INVALID_TYPE;
- channel = Token::DEFAULT_CHANNEL;
- tokenStartCharIndex = INVALID_INDEX;
- tokenStartCharPositionInLine = 0;
- tokenStartLine = 0;
- type = 0;
- _text = "";
-
- hitEOF = false;
- mode = Lexer::DEFAULT_MODE;
- modeStack.clear();
-
- getInterpreter<atn::LexerATNSimulator>()->reset();
-}
-
-std::unique_ptr<Token> Lexer::nextToken() {
- // Mark start location in char stream so unbuffered streams are
- // guaranteed at least have text of current token
- ssize_t tokenStartMarker = _input->mark();
-
- auto onExit = finally([this, tokenStartMarker] {
- // make sure we release marker after match or
- // unbuffered char stream will keep buffering
- _input->release(tokenStartMarker);
- });
-
- while (true) {
- outerContinue:
- if (hitEOF) {
- emitEOF();
- return std::move(token);
- }
-
- token.reset();
- channel = Token::DEFAULT_CHANNEL;
- tokenStartCharIndex = _input->index();
- tokenStartCharPositionInLine =
- getInterpreter<atn::LexerATNSimulator>()->getCharPositionInLine();
- tokenStartLine = getInterpreter<atn::LexerATNSimulator>()->getLine();
- _text = "";
- do {
- type = Token::INVALID_TYPE;
- size_t ttype;
- try {
- ttype = getInterpreter<atn::LexerATNSimulator>()->match(_input, mode);
- } catch (LexerNoViableAltException& e) {
- notifyListeners(e); // report error
- recover(e);
- ttype = SKIP;
- }
- if (_input->LA(1) == EOF) {
- hitEOF = true;
- }
- if (type == Token::INVALID_TYPE) {
- type = ttype;
- }
- if (type == SKIP) {
- goto outerContinue;
- }
- } while (type == MORE);
- if (token == nullptr) {
- emit();
- }
- return std::move(token);
- }
-}
-
-void Lexer::skip() { type = SKIP; }
-
-void Lexer::more() { type = MORE; }
-
-void Lexer::setMode(size_t m) { mode = m; }
-
-void Lexer::pushMode(size_t m) {
-#if DEBUG_LEXER == 1
- std::cout << "pushMode " << m << std::endl;
-#endif
-
- modeStack.push_back(mode);
- setMode(m);
-}
-
-size_t Lexer::popMode() {
- if (modeStack.empty()) {
- throw EmptyStackException();
- }
-#if DEBUG_LEXER == 1
- std::cout << std::string("popMode back to ") << modeStack.back() << std::endl;
-#endif
-
- setMode(modeStack.back());
- modeStack.pop_back();
- return mode;
-}
-
-Ref<TokenFactory<CommonToken>> Lexer::getTokenFactory() { return _factory; }
-
-void Lexer::setInputStream(IntStream* input) {
- reset();
- _input = dynamic_cast<CharStream*>(input);
-}
-
-std::string Lexer::getSourceName() { return _input->getSourceName(); }
-
-CharStream* Lexer::getInputStream() { return _input; }
-
-void Lexer::emit(std::unique_ptr<Token> newToken) {
- token = std::move(newToken);
-}
-
-Token* Lexer::emit() {
- emit(_factory->create({this, _input}, type, _text, channel,
- tokenStartCharIndex, getCharIndex() - 1, tokenStartLine,
- tokenStartCharPositionInLine));
- return token.get();
-}
-
-Token* Lexer::emitEOF() {
- size_t cpos = getCharPositionInLine();
- size_t line = getLine();
- emit(_factory->create({this, _input}, EOF, "", Token::DEFAULT_CHANNEL,
- _input->index(), _input->index() - 1, line, cpos));
- return token.get();
-}
-
-size_t Lexer::getLine() const {
- return getInterpreter<atn::LexerATNSimulator>()->getLine();
-}
-
-size_t Lexer::getCharPositionInLine() {
- return getInterpreter<atn::LexerATNSimulator>()->getCharPositionInLine();
-}
-
-void Lexer::setLine(size_t line) {
- getInterpreter<atn::LexerATNSimulator>()->setLine(line);
-}
-
-void Lexer::setCharPositionInLine(size_t charPositionInLine) {
- getInterpreter<atn::LexerATNSimulator>()->setCharPositionInLine(
- charPositionInLine);
-}
-
-size_t Lexer::getCharIndex() { return _input->index(); }
-
-std::string Lexer::getText() {
- if (!_text.empty()) {
- return _text;
- }
- return getInterpreter<atn::LexerATNSimulator>()->getText(_input);
-}
-
-void Lexer::setText(const std::string& text) { _text = text; }
-
-std::unique_ptr<Token> Lexer::getToken() { return std::move(token); }
-
-void Lexer::setToken(std::unique_ptr<Token> newToken) {
- token = std::move(newToken);
-}
-
-void Lexer::setType(size_t ttype) { type = ttype; }
-
-size_t Lexer::getType() { return type; }
-
-void Lexer::setChannel(size_t newChannel) { channel = newChannel; }
-
-size_t Lexer::getChannel() { return channel; }
-
-std::vector<std::unique_ptr<Token>> Lexer::getAllTokens() {
- std::vector<std::unique_ptr<Token>> tokens;
- std::unique_ptr<Token> t = nextToken();
- while (t->getType() != EOF) {
- tokens.push_back(std::move(t));
- t = nextToken();
- }
- return tokens;
-}
-
-void Lexer::recover(const LexerNoViableAltException& /*e*/) {
- if (_input->LA(1) != EOF) {
- // skip a char and try again
- getInterpreter<atn::LexerATNSimulator>()->consume(_input);
- }
-}
-
-void Lexer::notifyListeners(const LexerNoViableAltException& /*e*/) {
- ++_syntaxErrors;
- std::string text =
- _input->getText(misc::Interval(tokenStartCharIndex, _input->index()));
- std::string msg = std::string("token recognition error at: '") +
- getErrorDisplay(text) + std::string("'");
-
- ProxyErrorListener& listener = getErrorListenerDispatch();
- listener.syntaxError(this, nullptr, tokenStartLine,
- tokenStartCharPositionInLine, msg,
- std::current_exception());
-}
-
-std::string Lexer::getErrorDisplay(const std::string& s) {
- std::stringstream ss;
- for (auto c : s) {
- switch (c) {
- case '\n':
- ss << "\\n";
- break;
- case '\t':
- ss << "\\t";
- break;
- case '\r':
- ss << "\\r";
- break;
- default:
- ss << c;
- break;
- }
- }
- return ss.str();
-}
-
-void Lexer::recover(RecognitionException* /*re*/) {
- // TO_DO: Do we lose character or line position information?
- _input->consume();
-}
-
-size_t Lexer::getNumberOfSyntaxErrors() { return _syntaxErrors; }
-
-void Lexer::InitializeInstanceFields() {
- _syntaxErrors = 0;
- token = nullptr;
- _factory = CommonTokenFactory::DEFAULT;
- tokenStartCharIndex = INVALID_INDEX;
- tokenStartLine = 0;
- tokenStartCharPositionInLine = 0;
- hitEOF = false;
- channel = 0;
- type = 0;
- mode = Lexer::DEFAULT_MODE;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Lexer.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Lexer.h
deleted file mode 100644
index 1c4743e5d5..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Lexer.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "CharStream.h"
-#include "Recognizer.h"
-#include "Token.h"
-#include "TokenSource.h"
-
-namespace antlr4 {
-
-/// A lexer is recognizer that draws input symbols from a character stream.
-/// lexer grammars result in a subclass of this object. A Lexer object
-/// uses simplified match() and error recovery mechanisms in the interest
-/// of speed.
-class ANTLR4CPP_PUBLIC Lexer : public Recognizer, public TokenSource {
- public:
- static const size_t DEFAULT_MODE = 0;
- static const size_t MORE = static_cast<size_t>(-2);
- static const size_t SKIP = static_cast<size_t>(-3);
-
- static const size_t DEFAULT_TOKEN_CHANNEL = Token::DEFAULT_CHANNEL;
- static const size_t HIDDEN = Token::HIDDEN_CHANNEL;
- static const size_t MIN_CHAR_VALUE = 0;
- static const size_t MAX_CHAR_VALUE = 0x10FFFF;
-
- CharStream*
- _input; // Pure reference, usually from statically allocated instance.
-
- protected:
- /// How to create token objects.
- Ref<TokenFactory<CommonToken>> _factory;
-
- public:
- /// The goal of all lexer rules/methods is to create a token object.
- /// This is an instance variable as multiple rules may collaborate to
- /// create a single token. nextToken will return this object after
- /// matching lexer rule(s). If you subclass to allow multiple token
- /// emissions, then set this to the last token to be matched or
- /// something nonnull so that the auto token emit mechanism will not
- /// emit another token.
-
- // Life cycle of a token is this:
- // Created by emit() (via the token factory) or by action code, holding
- // ownership of it. Ownership is handed over to the token stream when calling
- // nextToken().
- std::unique_ptr<Token> token;
-
- /// <summary>
- /// What character index in the stream did the current token start at?
- /// Needed, for example, to get the text for current token. Set at
- /// the start of nextToken.
- /// </summary>
- size_t tokenStartCharIndex;
-
- /// <summary>
- /// The line on which the first character of the token resides </summary>
- size_t tokenStartLine;
-
- /// The character position of first character within the line.
- size_t tokenStartCharPositionInLine;
-
- /// Once we see EOF on char stream, next token will be EOF.
- /// If you have DONE : EOF ; then you see DONE EOF.
- bool hitEOF;
-
- /// The channel number for the current token.
- size_t channel;
-
- /// The token type for the current token.
- size_t type;
-
- // Use the vector as a stack.
- std::vector<size_t> modeStack;
- size_t mode;
-
- Lexer();
- Lexer(CharStream* input);
- virtual ~Lexer() {}
-
- virtual void reset();
-
- /// Return a token from this source; i.e., match a token on the char stream.
- virtual std::unique_ptr<Token> nextToken() override;
-
- /// Instruct the lexer to skip creating a token for current lexer rule
- /// and look for another token. nextToken() knows to keep looking when
- /// a lexer rule finishes with token set to SKIP_TOKEN. Recall that
- /// if token == null at end of any token rule, it creates one for you
- /// and emits it.
- virtual void skip();
- virtual void more();
- virtual void setMode(size_t m);
- virtual void pushMode(size_t m);
- virtual size_t popMode();
-
- template <typename T1>
- void setTokenFactory(TokenFactory<T1>* factory) {
- this->_factory = factory;
- }
-
- virtual Ref<TokenFactory<CommonToken>> getTokenFactory() override;
-
- /// Set the char stream and reset the lexer
- virtual void setInputStream(IntStream* input) override;
-
- virtual std::string getSourceName() override;
-
- virtual CharStream* getInputStream() override;
-
- /// By default does not support multiple emits per nextToken invocation
- /// for efficiency reasons. Subclasses can override this method, nextToken,
- /// and getToken (to push tokens into a list and pull from that list
- /// rather than a single variable as this implementation does).
- virtual void emit(std::unique_ptr<Token> newToken);
-
- /// The standard method called to automatically emit a token at the
- /// outermost lexical rule. The token object should point into the
- /// char buffer start..stop. If there is a text override in 'text',
- /// use that to set the token's text. Override this method to emit
- /// custom Token objects or provide a new factory.
- virtual Token* emit();
-
- virtual Token* emitEOF();
-
- virtual size_t getLine() const override;
-
- virtual size_t getCharPositionInLine() override;
-
- virtual void setLine(size_t line);
-
- virtual void setCharPositionInLine(size_t charPositionInLine);
-
- /// What is the index of the current character of lookahead?
- virtual size_t getCharIndex();
-
- /// Return the text matched so far for the current token or any
- /// text override.
- virtual std::string getText();
-
- /// Set the complete text of this token; it wipes any previous
- /// changes to the text.
- virtual void setText(const std::string& text);
-
- /// Override if emitting multiple tokens.
- virtual std::unique_ptr<Token> getToken();
-
- virtual void setToken(std::unique_ptr<Token> newToken);
-
- virtual void setType(size_t ttype);
-
- virtual size_t getType();
-
- virtual void setChannel(size_t newChannel);
-
- virtual size_t getChannel();
-
- virtual const std::vector<std::string>& getChannelNames() const = 0;
-
- virtual const std::vector<std::string>& getModeNames() const = 0;
-
- /// Return a list of all Token objects in input char stream.
- /// Forces load of all tokens. Does not include EOF token.
- virtual std::vector<std::unique_ptr<Token>> getAllTokens();
-
- virtual void recover(const LexerNoViableAltException& e);
-
- virtual void notifyListeners(const LexerNoViableAltException& e);
-
- virtual std::string getErrorDisplay(const std::string& s);
-
- /// Lexers can normally match any char in it's vocabulary after matching
- /// a token, so do the easy thing and just kill a character and hope
- /// it all works out. You can instead use the rule invocation stack
- /// to do sophisticated error recovery if you are in a fragment rule.
- virtual void recover(RecognitionException* re);
-
- /// <summary>
- /// Gets the number of syntax errors reported during parsing. This value is
- /// incremented each time <seealso cref="#notifyErrorListeners"/> is called.
- /// </summary>
- /// <seealso cref= #notifyListeners </seealso>
- virtual size_t getNumberOfSyntaxErrors();
-
- protected:
- /// You can set the text for the current token to override what is in
- /// the input char buffer (via setText()).
- std::string _text;
-
- private:
- size_t _syntaxErrors;
- void InitializeInstanceFields();
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/LexerInterpreter.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/LexerInterpreter.cpp
deleted file mode 100644
index 7555ec9bd7..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/LexerInterpreter.cpp
+++ /dev/null
@@ -1,82 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-#include "Vocabulary.h"
-#include "atn/ATNType.h"
-#include "atn/EmptyPredictionContext.h"
-#include "atn/LexerATNSimulator.h"
-#include "dfa/DFA.h"
-
-#include "LexerInterpreter.h"
-
-using namespace antlr4;
-
-LexerInterpreter::LexerInterpreter(const std::string& grammarFileName,
- const std::vector<std::string>& tokenNames,
- const std::vector<std::string>& ruleNames,
- const std::vector<std::string>& channelNames,
- const std::vector<std::string>& modeNames,
- const atn::ATN& atn, CharStream* input)
- : LexerInterpreter(grammarFileName,
- dfa::Vocabulary::fromTokenNames(tokenNames), ruleNames,
- channelNames, modeNames, atn, input) {}
-
-LexerInterpreter::LexerInterpreter(const std::string& grammarFileName,
- const dfa::Vocabulary& vocabulary,
- const std::vector<std::string>& ruleNames,
- const std::vector<std::string>& channelNames,
- const std::vector<std::string>& modeNames,
- const atn::ATN& atn, CharStream* input)
- : Lexer(input),
- _grammarFileName(grammarFileName),
- _atn(atn),
- _ruleNames(ruleNames),
- _channelNames(channelNames),
- _modeNames(modeNames),
- _vocabulary(vocabulary) {
- if (_atn.grammarType != atn::ATNType::LEXER) {
- throw IllegalArgumentException("The ATN must be a lexer ATN.");
- }
-
- for (size_t i = 0; i < atn.maxTokenType; i++) {
- _tokenNames.push_back(vocabulary.getDisplayName(i));
- }
-
- for (size_t i = 0; i < atn.getNumberOfDecisions(); ++i) {
- _decisionToDFA.push_back(dfa::DFA(_atn.getDecisionState(i), i));
- }
- _interpreter = new atn::LexerATNSimulator(
- this, _atn, _decisionToDFA,
- _sharedContextCache); /* mem-check: deleted in d-tor */
-}
-
-LexerInterpreter::~LexerInterpreter() { delete _interpreter; }
-
-const atn::ATN& LexerInterpreter::getATN() const { return _atn; }
-
-std::string LexerInterpreter::getGrammarFileName() const {
- return _grammarFileName;
-}
-
-const std::vector<std::string>& LexerInterpreter::getTokenNames() const {
- return _tokenNames;
-}
-
-const std::vector<std::string>& LexerInterpreter::getRuleNames() const {
- return _ruleNames;
-}
-
-const std::vector<std::string>& LexerInterpreter::getChannelNames() const {
- return _channelNames;
-}
-
-const std::vector<std::string>& LexerInterpreter::getModeNames() const {
- return _modeNames;
-}
-
-const dfa::Vocabulary& LexerInterpreter::getVocabulary() const {
- return _vocabulary;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/LexerInterpreter.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/LexerInterpreter.h
deleted file mode 100644
index 7c545e975f..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/LexerInterpreter.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Lexer.h"
-#include "Vocabulary.h"
-#include "atn/PredictionContext.h"
-
-namespace antlr4 {
-
-class ANTLR4CPP_PUBLIC LexerInterpreter : public Lexer {
- public:
- // @deprecated
- LexerInterpreter(const std::string& grammarFileName,
- const std::vector<std::string>& tokenNames,
- const std::vector<std::string>& ruleNames,
- const std::vector<std::string>& channelNames,
- const std::vector<std::string>& modeNames,
- const atn::ATN& atn, CharStream* input);
- LexerInterpreter(const std::string& grammarFileName,
- const dfa::Vocabulary& vocabulary,
- const std::vector<std::string>& ruleNames,
- const std::vector<std::string>& channelNames,
- const std::vector<std::string>& modeNames,
- const atn::ATN& atn, CharStream* input);
-
- ~LexerInterpreter();
-
- virtual const atn::ATN& getATN() const override;
- virtual std::string getGrammarFileName() const override;
- virtual const std::vector<std::string>& getTokenNames() const override;
- virtual const std::vector<std::string>& getRuleNames() const override;
- virtual const std::vector<std::string>& getChannelNames() const override;
- virtual const std::vector<std::string>& getModeNames() const override;
-
- virtual const dfa::Vocabulary& getVocabulary() const override;
-
- protected:
- const std::string _grammarFileName;
- const atn::ATN& _atn;
-
- // @deprecated
- std::vector<std::string> _tokenNames;
- const std::vector<std::string>& _ruleNames;
- const std::vector<std::string>& _channelNames;
- const std::vector<std::string>& _modeNames;
- std::vector<dfa::DFA> _decisionToDFA;
-
- atn::PredictionContextCache _sharedContextCache;
-
- private:
- dfa::Vocabulary _vocabulary;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/LexerNoViableAltException.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/LexerNoViableAltException.cpp
deleted file mode 100644
index e3a144394d..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/LexerNoViableAltException.cpp
+++ /dev/null
@@ -1,37 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "CharStream.h"
-#include "Lexer.h"
-#include "misc/Interval.h"
-#include "support/CPPUtils.h"
-
-#include "LexerNoViableAltException.h"
-
-using namespace antlr4;
-
-LexerNoViableAltException::LexerNoViableAltException(
- Lexer* lexer, CharStream* input, size_t startIndex,
- atn::ATNConfigSet* deadEndConfigs)
- : RecognitionException(lexer, input, nullptr, nullptr),
- _startIndex(startIndex),
- _deadEndConfigs(deadEndConfigs) {}
-
-size_t LexerNoViableAltException::getStartIndex() { return _startIndex; }
-
-atn::ATNConfigSet* LexerNoViableAltException::getDeadEndConfigs() {
- return _deadEndConfigs;
-}
-
-std::string LexerNoViableAltException::toString() {
- std::string symbol;
- if (_startIndex < getInputStream()->size()) {
- symbol = static_cast<CharStream*>(getInputStream())
- ->getText(misc::Interval(_startIndex, _startIndex));
- symbol = antlrcpp::escapeWhitespace(symbol, false);
- }
- std::string format = "LexerNoViableAltException('" + symbol + "')";
- return format;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/LexerNoViableAltException.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/LexerNoViableAltException.h
deleted file mode 100644
index 58cfe32bfa..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/LexerNoViableAltException.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "RecognitionException.h"
-#include "atn/ATNConfigSet.h"
-
-namespace antlr4 {
-
-class ANTLR4CPP_PUBLIC LexerNoViableAltException : public RecognitionException {
- public:
- LexerNoViableAltException(Lexer* lexer, CharStream* input, size_t startIndex,
- atn::ATNConfigSet* deadEndConfigs);
-
- virtual size_t getStartIndex();
- virtual atn::ATNConfigSet* getDeadEndConfigs();
- virtual std::string toString();
-
- private:
- /// Matching attempted at what input index?
- const size_t _startIndex;
-
- /// Which configurations did we try at input.index() that couldn't match
- /// input.LA(1)?
- atn::ATNConfigSet* _deadEndConfigs;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ListTokenSource.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ListTokenSource.cpp
deleted file mode 100644
index a4add053f1..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ListTokenSource.cpp
+++ /dev/null
@@ -1,95 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "CharStream.h"
-#include "CommonToken.h"
-#include "Token.h"
-
-#include "ListTokenSource.h"
-
-using namespace antlr4;
-
-ListTokenSource::ListTokenSource(std::vector<std::unique_ptr<Token>> tokens_)
- : ListTokenSource(std::move(tokens_), "") {}
-
-ListTokenSource::ListTokenSource(std::vector<std::unique_ptr<Token>> tokens_,
- const std::string& sourceName_)
- : tokens(std::move(tokens_)), sourceName(sourceName_) {
- InitializeInstanceFields();
- if (tokens.empty()) {
- throw "tokens cannot be null";
- }
-
- // Check if there is an eof token and create one if not.
- if (tokens.back()->getType() != Token::EOF) {
- Token* lastToken = tokens.back().get();
- size_t start = INVALID_INDEX;
- size_t previousStop = lastToken->getStopIndex();
- if (previousStop != INVALID_INDEX) {
- start = previousStop + 1;
- }
-
- size_t stop = std::max(INVALID_INDEX, start - 1);
- tokens.emplace_back((_factory->create(
- {this, getInputStream()}, Token::EOF, "EOF", Token::DEFAULT_CHANNEL,
- start, stop, static_cast<int>(lastToken->getLine()),
- lastToken->getCharPositionInLine())));
- }
-}
-
-size_t ListTokenSource::getCharPositionInLine() {
- if (i < tokens.size()) {
- return tokens[i]->getCharPositionInLine();
- }
- return 0;
-}
-
-std::unique_ptr<Token> ListTokenSource::nextToken() {
- if (i < tokens.size()) {
- return std::move(tokens[i++]);
- }
- return nullptr;
-}
-
-size_t ListTokenSource::getLine() const {
- if (i < tokens.size()) {
- return tokens[i]->getLine();
- }
-
- return 1;
-}
-
-CharStream* ListTokenSource::getInputStream() {
- if (i < tokens.size()) {
- return tokens[i]->getInputStream();
- } else if (!tokens.empty()) {
- return tokens.back()->getInputStream();
- }
-
- // no input stream information is available
- return nullptr;
-}
-
-std::string ListTokenSource::getSourceName() {
- if (sourceName != "") {
- return sourceName;
- }
-
- CharStream* inputStream = getInputStream();
- if (inputStream != nullptr) {
- return inputStream->getSourceName();
- }
-
- return "List";
-}
-
-Ref<TokenFactory<CommonToken>> ListTokenSource::getTokenFactory() {
- return _factory;
-}
-
-void ListTokenSource::InitializeInstanceFields() {
- i = 0;
- _factory = CommonTokenFactory::DEFAULT;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ListTokenSource.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ListTokenSource.h
deleted file mode 100644
index 0013651446..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ListTokenSource.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "CommonTokenFactory.h"
-#include "TokenSource.h"
-
-namespace antlr4 {
-
-/// Provides an implementation of <seealso cref="TokenSource"/> as a wrapper
-/// around a list of <seealso cref="Token"/> objects.
-///
-/// If the final token in the list is an <seealso cref="Token#EOF"/> token, it
-/// will be used as the EOF token for every call to <seealso cref="#nextToken"/>
-/// after the end of the list is reached. Otherwise, an EOF token will be
-/// created.
-class ANTLR4CPP_PUBLIC ListTokenSource : public TokenSource {
- protected:
- // This list will be emptied token by token as we call nextToken().
- // Token streams can be used to buffer tokens for a while.
- std::vector<std::unique_ptr<Token>> tokens;
-
- private:
- /// <summary>
- /// The name of the input source. If this value is {@code null}, a call to
- /// <seealso cref="#getSourceName"/> should return the source name used to
- /// create the the next token in <seealso cref="#tokens"/> (or the previous
- /// token if the end of the input has been reached).
- /// </summary>
- const std::string sourceName;
-
- protected:
- /// The index into <seealso cref="#tokens"/> of token to return by the next
- /// call to <seealso cref="#nextToken"/>. The end of the input is indicated by
- /// this value being greater than or equal to the number of items in <seealso
- /// cref="#tokens"/>.
- size_t i;
-
- private:
- /// This is the backing field for <seealso cref="#getTokenFactory"/> and
- /// <seealso cref="setTokenFactory"/>.
- Ref<TokenFactory<CommonToken>> _factory = CommonTokenFactory::DEFAULT;
-
- public:
- /// Constructs a new <seealso cref="ListTokenSource"/> instance from the
- /// specified collection of <seealso cref="Token"/> objects.
- ///
- /// <param name="tokens"> The collection of <seealso cref="Token"/> objects to
- /// provide as a <seealso cref="TokenSource"/>. </param> <exception
- /// cref="NullPointerException"> if {@code tokens} is {@code null}
- /// </exception>
- ListTokenSource(std::vector<std::unique_ptr<Token>> tokens);
- ListTokenSource(const ListTokenSource& other) = delete;
-
- ListTokenSource& operator=(const ListTokenSource& other) = delete;
-
- /// <summary>
- /// Constructs a new <seealso cref="ListTokenSource"/> instance from the
- /// specified collection of <seealso cref="Token"/> objects and source name.
- /// </summary>
- /// <param name="tokens"> The collection of <seealso cref="Token"/> objects to
- /// provide as a <seealso cref="TokenSource"/>. </param> <param
- /// name="sourceName"> The name of the <seealso cref="TokenSource"/>. If this
- /// value is
- /// {@code null}, <seealso cref="#getSourceName"/> will attempt to infer the
- /// name from the next <seealso cref="Token"/> (or the previous token if the
- /// end of the input has been reached).
- /// </param>
- /// <exception cref="NullPointerException"> if {@code tokens} is {@code null}
- /// </exception>
- ListTokenSource(std::vector<std::unique_ptr<Token>> tokens_,
- const std::string& sourceName_);
-
- virtual size_t getCharPositionInLine() override;
- virtual std::unique_ptr<Token> nextToken() override;
- virtual size_t getLine() const override;
- virtual CharStream* getInputStream() override;
- virtual std::string getSourceName() override;
-
- template <typename T1>
- void setTokenFactory(TokenFactory<T1>* factory) {
- this->_factory = factory;
- }
-
- virtual Ref<TokenFactory<CommonToken>> getTokenFactory() override;
-
- private:
- void InitializeInstanceFields();
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/NoViableAltException.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/NoViableAltException.cpp
deleted file mode 100644
index f13f9f69bf..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/NoViableAltException.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Parser.h"
-
-#include "NoViableAltException.h"
-
-using namespace antlr4;
-
-NoViableAltException::NoViableAltException(Parser* recognizer)
- : NoViableAltException(recognizer, recognizer->getTokenStream(),
- recognizer->getCurrentToken(),
- recognizer->getCurrentToken(), nullptr,
- recognizer->getContext()) {}
-
-NoViableAltException::NoViableAltException(Parser* recognizer,
- TokenStream* input,
- Token* startToken,
- Token* offendingToken,
- atn::ATNConfigSet* deadEndConfigs,
- ParserRuleContext* ctx)
- : RecognitionException("No viable alternative", recognizer, input, ctx,
- offendingToken),
- _deadEndConfigs(deadEndConfigs),
- _startToken(startToken) {}
-
-Token* NoViableAltException::getStartToken() const { return _startToken; }
-
-atn::ATNConfigSet* NoViableAltException::getDeadEndConfigs() const {
- return _deadEndConfigs;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/NoViableAltException.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/NoViableAltException.h
deleted file mode 100644
index b0fb71ef73..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/NoViableAltException.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "RecognitionException.h"
-#include "Token.h"
-#include "atn/ATNConfigSet.h"
-
-namespace antlr4 {
-
-/// Indicates that the parser could not decide which of two or more paths
-/// to take based upon the remaining input. It tracks the starting token
-/// of the offending input and also knows where the parser was
-/// in the various paths when the error. Reported by reportNoViableAlternative()
-class ANTLR4CPP_PUBLIC NoViableAltException : public RecognitionException {
- public:
- NoViableAltException(Parser* recognizer); // LL(1) error
- NoViableAltException(Parser* recognizer, TokenStream* input,
- Token* startToken, Token* offendingToken,
- atn::ATNConfigSet* deadEndConfigs,
- ParserRuleContext* ctx);
-
- virtual Token* getStartToken() const;
- virtual atn::ATNConfigSet* getDeadEndConfigs() const;
-
- private:
- /// Which configurations did we try at input.index() that couldn't match
- /// input.LT(1)?
- atn::ATNConfigSet* _deadEndConfigs;
-
- /// The token object at the start index; the input stream might
- /// not be buffering tokens so get a reference to it. (At the
- /// time the error occurred, of course the stream needs to keep a
- /// buffer all of the tokens but later we might not have access to those.)
- Token* _startToken;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Parser.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Parser.cpp
deleted file mode 100644
index 223f9c2df9..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Parser.cpp
+++ /dev/null
@@ -1,637 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ANTLRErrorListener.h"
-#include "DefaultErrorStrategy.h"
-#include "Exceptions.h"
-#include "Lexer.h"
-#include "ParserRuleContext.h"
-#include "atn/ATN.h"
-#include "atn/ATNDeserializationOptions.h"
-#include "atn/ATNDeserializer.h"
-#include "atn/ParserATNSimulator.h"
-#include "atn/RuleStartState.h"
-#include "atn/RuleTransition.h"
-#include "dfa/DFA.h"
-#include "misc/IntervalSet.h"
-#include "tree/ErrorNodeImpl.h"
-#include "tree/TerminalNode.h"
-#include "tree/pattern/ParseTreePattern.h"
-#include "tree/pattern/ParseTreePatternMatcher.h"
-
-#include "atn/ParseInfo.h"
-#include "atn/ProfilingATNSimulator.h"
-
-#include "Parser.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-using namespace antlrcpp;
-
-std::map<std::vector<uint16_t>, atn::ATN> Parser::bypassAltsAtnCache;
-
-Parser::TraceListener::TraceListener(Parser* outerInstance_)
- : outerInstance(outerInstance_) {}
-
-Parser::TraceListener::~TraceListener() {}
-
-void Parser::TraceListener::enterEveryRule(ParserRuleContext* ctx) {
- std::cout << "enter " << outerInstance->getRuleNames()[ctx->getRuleIndex()]
- << ", LT(1)=" << outerInstance->_input->LT(1)->getText()
- << std::endl;
-}
-
-void Parser::TraceListener::visitTerminal(tree::TerminalNode* node) {
- std::cout << "consume " << node->getSymbol() << " rule "
- << outerInstance
- ->getRuleNames()[outerInstance->getContext()->getRuleIndex()]
- << std::endl;
-}
-
-void Parser::TraceListener::visitErrorNode(tree::ErrorNode* /*node*/) {}
-
-void Parser::TraceListener::exitEveryRule(ParserRuleContext* ctx) {
- std::cout << "exit " << outerInstance->getRuleNames()[ctx->getRuleIndex()]
- << ", LT(1)=" << outerInstance->_input->LT(1)->getText()
- << std::endl;
-}
-
-Parser::TrimToSizeListener Parser::TrimToSizeListener::INSTANCE;
-
-Parser::TrimToSizeListener::~TrimToSizeListener() {}
-
-void Parser::TrimToSizeListener::enterEveryRule(ParserRuleContext* /*ctx*/) {}
-
-void Parser::TrimToSizeListener::visitTerminal(tree::TerminalNode* /*node*/) {}
-
-void Parser::TrimToSizeListener::visitErrorNode(tree::ErrorNode* /*node*/) {}
-
-void Parser::TrimToSizeListener::exitEveryRule(ParserRuleContext* ctx) {
- ctx->children.shrink_to_fit();
-}
-
-Parser::Parser(TokenStream* input) {
- InitializeInstanceFields();
- setInputStream(input);
-}
-
-Parser::~Parser() {
- _tracker.reset();
- delete _tracer;
-}
-
-void Parser::reset() {
- if (getInputStream() != nullptr) {
- getInputStream()->seek(0);
- }
- _errHandler->reset(this); // Watch out, this is not shared_ptr.reset().
-
- _matchedEOF = false;
- _syntaxErrors = 0;
- setTrace(false);
- _precedenceStack.clear();
- _precedenceStack.push_back(0);
- _ctx = nullptr;
- _tracker.reset();
-
- atn::ATNSimulator* interpreter = getInterpreter<atn::ParserATNSimulator>();
- if (interpreter != nullptr) {
- interpreter->reset();
- }
-}
-
-Token* Parser::match(size_t ttype) {
- Token* t = getCurrentToken();
- if (t->getType() == ttype) {
- if (ttype == EOF) {
- _matchedEOF = true;
- }
- _errHandler->reportMatch(this);
- consume();
- } else {
- t = _errHandler->recoverInline(this);
- if (_buildParseTrees && t->getTokenIndex() == INVALID_INDEX) {
- // we must have conjured up a new token during single token insertion
- // if it's not the current symbol
- _ctx->addChild(createErrorNode(t));
- }
- }
- return t;
-}
-
-Token* Parser::matchWildcard() {
- Token* t = getCurrentToken();
- if (t->getType() > 0) {
- _errHandler->reportMatch(this);
- consume();
- } else {
- t = _errHandler->recoverInline(this);
- if (_buildParseTrees && t->getTokenIndex() == INVALID_INDEX) {
- // we must have conjured up a new token during single token insertion
- // if it's not the current symbol
- _ctx->addChild(createErrorNode(t));
- }
- }
-
- return t;
-}
-
-void Parser::setBuildParseTree(bool buildParseTrees) {
- this->_buildParseTrees = buildParseTrees;
-}
-
-bool Parser::getBuildParseTree() { return _buildParseTrees; }
-
-void Parser::setTrimParseTree(bool trimParseTrees) {
- if (trimParseTrees) {
- if (getTrimParseTree()) {
- return;
- }
- addParseListener(&TrimToSizeListener::INSTANCE);
- } else {
- removeParseListener(&TrimToSizeListener::INSTANCE);
- }
-}
-
-bool Parser::getTrimParseTree() {
- return std::find(getParseListeners().begin(), getParseListeners().end(),
- &TrimToSizeListener::INSTANCE) != getParseListeners().end();
-}
-
-std::vector<tree::ParseTreeListener*> Parser::getParseListeners() {
- return _parseListeners;
-}
-
-void Parser::addParseListener(tree::ParseTreeListener* listener) {
- if (!listener) {
- throw NullPointerException("listener");
- }
-
- this->_parseListeners.push_back(listener);
-}
-
-void Parser::removeParseListener(tree::ParseTreeListener* listener) {
- if (!_parseListeners.empty()) {
- auto it =
- std::find(_parseListeners.begin(), _parseListeners.end(), listener);
- if (it != _parseListeners.end()) {
- _parseListeners.erase(it);
- }
- }
-}
-
-void Parser::removeParseListeners() { _parseListeners.clear(); }
-
-void Parser::triggerEnterRuleEvent() {
- for (auto listener : _parseListeners) {
- listener->enterEveryRule(_ctx);
- _ctx->enterRule(listener);
- }
-}
-
-void Parser::triggerExitRuleEvent() {
- // reverse order walk of listeners
- for (auto it = _parseListeners.rbegin(); it != _parseListeners.rend(); ++it) {
- _ctx->exitRule(*it);
- (*it)->exitEveryRule(_ctx);
- }
-}
-
-size_t Parser::getNumberOfSyntaxErrors() { return _syntaxErrors; }
-
-Ref<TokenFactory<CommonToken>> Parser::getTokenFactory() {
- return _input->getTokenSource()->getTokenFactory();
-}
-
-const atn::ATN& Parser::getATNWithBypassAlts() {
- std::vector<uint16_t> serializedAtn = getSerializedATN();
- if (serializedAtn.empty()) {
- throw UnsupportedOperationException(
- "The current parser does not support an ATN with bypass alternatives.");
- }
-
- std::lock_guard<std::mutex> lck(_mutex);
-
- // XXX: using the entire serialized ATN as key into the map is a big resource
- // waste.
- // How large can that thing become?
- if (bypassAltsAtnCache.find(serializedAtn) == bypassAltsAtnCache.end()) {
- atn::ATNDeserializationOptions deserializationOptions;
- deserializationOptions.setGenerateRuleBypassTransitions(true);
-
- atn::ATNDeserializer deserializer(deserializationOptions);
- bypassAltsAtnCache[serializedAtn] = deserializer.deserialize(serializedAtn);
- }
-
- return bypassAltsAtnCache[serializedAtn];
-}
-
-tree::pattern::ParseTreePattern Parser::compileParseTreePattern(
- const std::string& pattern, int patternRuleIndex) {
- if (getTokenStream() != nullptr) {
- TokenSource* tokenSource = getTokenStream()->getTokenSource();
- if (is<Lexer*>(tokenSource)) {
- Lexer* lexer = dynamic_cast<Lexer*>(tokenSource);
- return compileParseTreePattern(pattern, patternRuleIndex, lexer);
- }
- }
- throw UnsupportedOperationException("Parser can't discover a lexer to use");
-}
-
-tree::pattern::ParseTreePattern Parser::compileParseTreePattern(
- const std::string& pattern, int patternRuleIndex, Lexer* lexer) {
- tree::pattern::ParseTreePatternMatcher m(lexer, this);
- return m.compile(pattern, patternRuleIndex);
-}
-
-Ref<ANTLRErrorStrategy> Parser::getErrorHandler() { return _errHandler; }
-
-void Parser::setErrorHandler(Ref<ANTLRErrorStrategy> const& handler) {
- _errHandler = handler;
-}
-
-IntStream* Parser::getInputStream() { return getTokenStream(); }
-
-void Parser::setInputStream(IntStream* input) {
- setTokenStream(static_cast<TokenStream*>(input));
-}
-
-TokenStream* Parser::getTokenStream() { return _input; }
-
-void Parser::setTokenStream(TokenStream* input) {
- _input = nullptr; // Just a reference we don't own.
- reset();
- _input = input;
-}
-
-Token* Parser::getCurrentToken() { return _input->LT(1); }
-
-void Parser::notifyErrorListeners(const std::string& msg) {
- notifyErrorListeners(getCurrentToken(), msg, nullptr);
-}
-
-void Parser::notifyErrorListeners(Token* offendingToken, const std::string& msg,
- std::exception_ptr e) {
- _syntaxErrors++;
- size_t line = offendingToken->getLine();
- size_t charPositionInLine = offendingToken->getCharPositionInLine();
-
- ProxyErrorListener& listener = getErrorListenerDispatch();
- listener.syntaxError(this, offendingToken, line, charPositionInLine, msg, e);
-}
-
-Token* Parser::consume() {
- Token* o = getCurrentToken();
- if (o->getType() != EOF) {
- getInputStream()->consume();
- }
-
- bool hasListener = _parseListeners.size() > 0 && !_parseListeners.empty();
- if (_buildParseTrees || hasListener) {
- if (_errHandler->inErrorRecoveryMode(this)) {
- tree::ErrorNode* node = createErrorNode(o);
- _ctx->addChild(node);
- if (_parseListeners.size() > 0) {
- for (auto listener : _parseListeners) {
- listener->visitErrorNode(node);
- }
- }
- } else {
- tree::TerminalNode* node = _ctx->addChild(createTerminalNode(o));
- if (_parseListeners.size() > 0) {
- for (auto listener : _parseListeners) {
- listener->visitTerminal(node);
- }
- }
- }
- }
- return o;
-}
-
-void Parser::addContextToParseTree() {
- // Add current context to parent if we have a parent.
- if (_ctx->parent == nullptr) return;
-
- ParserRuleContext* parent = dynamic_cast<ParserRuleContext*>(_ctx->parent);
- parent->addChild(_ctx);
-}
-
-void Parser::enterRule(ParserRuleContext* localctx, size_t state,
- size_t /*ruleIndex*/) {
- setState(state);
- _ctx = localctx;
- _ctx->start = _input->LT(1);
- if (_buildParseTrees) {
- addContextToParseTree();
- }
- if (_parseListeners.size() > 0) {
- triggerEnterRuleEvent();
- }
-}
-
-void Parser::exitRule() {
- if (_matchedEOF) {
- // if we have matched EOF, it cannot consume past EOF so we use LT(1) here
- _ctx->stop = _input->LT(1); // LT(1) will be end of file
- } else {
- _ctx->stop = _input->LT(-1); // stop node is what we just matched
- }
-
- // trigger event on ctx, before it reverts to parent
- if (_parseListeners.size() > 0) {
- triggerExitRuleEvent();
- }
- setState(_ctx->invokingState);
- _ctx = dynamic_cast<ParserRuleContext*>(_ctx->parent);
-}
-
-void Parser::enterOuterAlt(ParserRuleContext* localctx, size_t altNum) {
- localctx->setAltNumber(altNum);
-
- // if we have new localctx, make sure we replace existing ctx
- // that is previous child of parse tree
- if (_buildParseTrees && _ctx != localctx) {
- if (_ctx->parent != nullptr) {
- ParserRuleContext* parent =
- dynamic_cast<ParserRuleContext*>(_ctx->parent);
- parent->removeLastChild();
- parent->addChild(localctx);
- }
- }
- _ctx = localctx;
-}
-
-int Parser::getPrecedence() const {
- if (_precedenceStack.empty()) {
- return -1;
- }
-
- return _precedenceStack.back();
-}
-
-void Parser::enterRecursionRule(ParserRuleContext* localctx, size_t ruleIndex) {
- enterRecursionRule(localctx,
- getATN().ruleToStartState[ruleIndex]->stateNumber,
- ruleIndex, 0);
-}
-
-void Parser::enterRecursionRule(ParserRuleContext* localctx, size_t state,
- size_t /*ruleIndex*/, int precedence) {
- setState(state);
- _precedenceStack.push_back(precedence);
- _ctx = localctx;
- _ctx->start = _input->LT(1);
- if (!_parseListeners.empty()) {
- triggerEnterRuleEvent(); // simulates rule entry for left-recursive rules
- }
-}
-
-void Parser::pushNewRecursionContext(ParserRuleContext* localctx, size_t state,
- size_t /*ruleIndex*/) {
- ParserRuleContext* previous = _ctx;
- previous->parent = localctx;
- previous->invokingState = state;
- previous->stop = _input->LT(-1);
-
- _ctx = localctx;
- _ctx->start = previous->start;
- if (_buildParseTrees) {
- _ctx->addChild(previous);
- }
-
- if (_parseListeners.size() > 0) {
- triggerEnterRuleEvent(); // simulates rule entry for left-recursive rules
- }
-}
-
-void Parser::unrollRecursionContexts(ParserRuleContext* parentctx) {
- _precedenceStack.pop_back();
- _ctx->stop = _input->LT(-1);
- ParserRuleContext* retctx = _ctx; // save current ctx (return value)
-
- // unroll so ctx is as it was before call to recursive method
- if (_parseListeners.size() > 0) {
- while (_ctx != parentctx) {
- triggerExitRuleEvent();
- _ctx = dynamic_cast<ParserRuleContext*>(_ctx->parent);
- }
- } else {
- _ctx = parentctx;
- }
-
- // hook into tree
- retctx->parent = parentctx;
-
- if (_buildParseTrees && parentctx != nullptr) {
- // add return ctx into invoking rule's tree
- parentctx->addChild(retctx);
- }
-}
-
-ParserRuleContext* Parser::getInvokingContext(size_t ruleIndex) {
- ParserRuleContext* p = _ctx;
- while (p) {
- if (p->getRuleIndex() == ruleIndex) {
- return p;
- }
- if (p->parent == nullptr) break;
- p = dynamic_cast<ParserRuleContext*>(p->parent);
- }
- return nullptr;
-}
-
-ParserRuleContext* Parser::getContext() { return _ctx; }
-
-void Parser::setContext(ParserRuleContext* ctx) { _ctx = ctx; }
-
-bool Parser::precpred(RuleContext* /*localctx*/, int precedence) {
- return precedence >= _precedenceStack.back();
-}
-
-bool Parser::inContext(const std::string& /*context*/) {
- // TO_DO: useful in parser?
- return false;
-}
-
-bool Parser::isExpectedToken(size_t symbol) {
- const atn::ATN& atn = getInterpreter<atn::ParserATNSimulator>()->atn;
- ParserRuleContext* ctx = _ctx;
- atn::ATNState* s = atn.states[getState()];
- misc::IntervalSet following = atn.nextTokens(s);
-
- if (following.contains(symbol)) {
- return true;
- }
-
- if (!following.contains(Token::EPSILON)) {
- return false;
- }
-
- while (ctx && ctx->invokingState != ATNState::INVALID_STATE_NUMBER &&
- following.contains(Token::EPSILON)) {
- atn::ATNState* invokingState = atn.states[ctx->invokingState];
- atn::RuleTransition* rt =
- static_cast<atn::RuleTransition*>(invokingState->transitions[0]);
- following = atn.nextTokens(rt->followState);
- if (following.contains(symbol)) {
- return true;
- }
-
- ctx = dynamic_cast<ParserRuleContext*>(ctx->parent);
- }
-
- if (following.contains(Token::EPSILON) && symbol == EOF) {
- return true;
- }
-
- return false;
-}
-
-bool Parser::isMatchedEOF() const { return _matchedEOF; }
-
-misc::IntervalSet Parser::getExpectedTokens() {
- return getATN().getExpectedTokens(getState(), getContext());
-}
-
-misc::IntervalSet Parser::getExpectedTokensWithinCurrentRule() {
- const atn::ATN& atn = getInterpreter<atn::ParserATNSimulator>()->atn;
- atn::ATNState* s = atn.states[getState()];
- return atn.nextTokens(s);
-}
-
-size_t Parser::getRuleIndex(const std::string& ruleName) {
- const std::map<std::string, size_t>& m = getRuleIndexMap();
- auto iterator = m.find(ruleName);
- if (iterator == m.end()) {
- return INVALID_INDEX;
- }
- return iterator->second;
-}
-
-ParserRuleContext* Parser::getRuleContext() { return _ctx; }
-
-std::vector<std::string> Parser::getRuleInvocationStack() {
- return getRuleInvocationStack(_ctx);
-}
-
-std::vector<std::string> Parser::getRuleInvocationStack(RuleContext* p) {
- std::vector<std::string> const& ruleNames = getRuleNames();
- std::vector<std::string> stack;
- RuleContext* run = p;
- while (run != nullptr) {
- // compute what follows who invoked us
- size_t ruleIndex = run->getRuleIndex();
- if (ruleIndex == INVALID_INDEX) {
- stack.push_back("n/a");
- } else {
- stack.push_back(ruleNames[ruleIndex]);
- }
- if (p->parent == nullptr) break;
- run = dynamic_cast<RuleContext*>(run->parent);
- }
- return stack;
-}
-
-std::vector<std::string> Parser::getDFAStrings() {
- atn::ParserATNSimulator* simulator =
- getInterpreter<atn::ParserATNSimulator>();
- if (!simulator->decisionToDFA.empty()) {
- std::lock_guard<std::mutex> lck(_mutex);
-
- std::vector<std::string> s;
- for (size_t d = 0; d < simulator->decisionToDFA.size(); d++) {
- dfa::DFA& dfa = simulator->decisionToDFA[d];
- s.push_back(dfa.toString(getVocabulary()));
- }
- return s;
- }
- return std::vector<std::string>();
-}
-
-void Parser::dumpDFA() {
- atn::ParserATNSimulator* simulator =
- getInterpreter<atn::ParserATNSimulator>();
- if (!simulator->decisionToDFA.empty()) {
- std::lock_guard<std::mutex> lck(_mutex);
- bool seenOne = false;
- for (size_t d = 0; d < simulator->decisionToDFA.size(); d++) {
- dfa::DFA& dfa = simulator->decisionToDFA[d];
- if (!dfa.states.empty()) {
- if (seenOne) {
- std::cout << std::endl;
- }
- std::cout << "Decision " << dfa.decision << ":" << std::endl;
- std::cout << dfa.toString(getVocabulary());
- seenOne = true;
- }
- }
- }
-}
-
-std::string Parser::getSourceName() { return _input->getSourceName(); }
-
-atn::ParseInfo Parser::getParseInfo() const {
- atn::ProfilingATNSimulator* interp =
- getInterpreter<atn::ProfilingATNSimulator>();
- return atn::ParseInfo(interp);
-}
-
-void Parser::setProfile(bool profile) {
- atn::ParserATNSimulator* interp =
- getInterpreter<atn::ProfilingATNSimulator>();
- atn::PredictionMode saveMode =
- interp != nullptr ? interp->getPredictionMode() : atn::PredictionMode::LL;
- if (profile) {
- if (!is<atn::ProfilingATNSimulator*>(interp)) {
- setInterpreter(
- new atn::ProfilingATNSimulator(this)); /* mem-check: replacing
- existing interpreter which
- gets deleted. */
- }
- } else if (is<atn::ProfilingATNSimulator*>(interp)) {
- /* mem-check: replacing existing interpreter which gets deleted. */
- atn::ParserATNSimulator* sim = new atn::ParserATNSimulator(
- this, getATN(), interp->decisionToDFA, interp->getSharedContextCache());
- setInterpreter(sim);
- }
- getInterpreter<atn::ParserATNSimulator>()->setPredictionMode(saveMode);
-}
-
-void Parser::setTrace(bool trace) {
- if (!trace) {
- if (_tracer) removeParseListener(_tracer);
- delete _tracer;
- _tracer = nullptr;
- } else {
- if (_tracer)
- removeParseListener(
- _tracer); // Just in case this is triggered multiple times.
- _tracer = new TraceListener(this);
- addParseListener(_tracer);
- }
-}
-
-bool Parser::isTrace() const { return _tracer != nullptr; }
-
-tree::TerminalNode* Parser::createTerminalNode(Token* t) {
- return _tracker.createInstance<tree::TerminalNodeImpl>(t);
-}
-
-tree::ErrorNode* Parser::createErrorNode(Token* t) {
- return _tracker.createInstance<tree::ErrorNodeImpl>(t);
-}
-
-void Parser::InitializeInstanceFields() {
- _errHandler = std::make_shared<DefaultErrorStrategy>();
- _precedenceStack.clear();
- _precedenceStack.push_back(0);
- _buildParseTrees = true;
- _syntaxErrors = 0;
- _matchedEOF = false;
- _input = nullptr;
- _tracer = nullptr;
- _ctx = nullptr;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Parser.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Parser.h
deleted file mode 100644
index 6457d0992a..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Parser.h
+++ /dev/null
@@ -1,489 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Recognizer.h"
-#include "TokenSource.h"
-#include "TokenStream.h"
-#include "misc/Interval.h"
-#include "tree/ParseTree.h"
-#include "tree/ParseTreeListener.h"
-
-namespace antlr4 {
-
-/// This is all the parsing support code essentially; most of it is error
-/// recovery stuff.
-class ANTLR4CPP_PUBLIC Parser : public Recognizer {
- public:
- class TraceListener : public tree::ParseTreeListener {
- public:
- TraceListener(Parser* outerInstance);
- virtual ~TraceListener();
-
- virtual void enterEveryRule(ParserRuleContext* ctx) override;
- virtual void visitTerminal(tree::TerminalNode* node) override;
- virtual void visitErrorNode(tree::ErrorNode* node) override;
- virtual void exitEveryRule(ParserRuleContext* ctx) override;
-
- private:
- Parser* const outerInstance;
- };
-
- class TrimToSizeListener : public tree::ParseTreeListener {
- public:
- static TrimToSizeListener INSTANCE;
-
- virtual ~TrimToSizeListener();
-
- virtual void enterEveryRule(ParserRuleContext* ctx) override;
- virtual void visitTerminal(tree::TerminalNode* node) override;
- virtual void visitErrorNode(tree::ErrorNode* node) override;
- virtual void exitEveryRule(ParserRuleContext* ctx) override;
- };
-
- Parser(TokenStream* input);
- virtual ~Parser();
-
- /// reset the parser's state
- virtual void reset();
-
- /// <summary>
- /// Match current input symbol against {@code ttype}. If the symbol type
- /// matches, <seealso cref="ANTLRErrorStrategy#reportMatch"/> and <seealso
- /// cref="#consume"/> are called to complete the match process.
- ///
- /// If the symbol type does not match,
- /// <seealso cref="ANTLRErrorStrategy#recoverInline"/> is called on the
- /// current error strategy to attempt recovery. If <seealso
- /// cref="#getBuildParseTree"/> is
- /// {@code true} and the token index of the symbol returned by
- /// <seealso cref="ANTLRErrorStrategy#recoverInline"/> is -1, the symbol is
- /// added to the parse tree by calling {@link
- /// #createErrorNode(ParserRuleContext, Token)} then
- /// {@link ParserRuleContext#addErrorNode(ErrorNode)}.
- /// </summary>
- /// <param name="ttype"> the token type to match </param>
- /// <returns> the matched symbol </returns>
- /// <exception cref="RecognitionException"> if the current input symbol did
- /// not match
- /// {@code ttype} and the error strategy could not recover from the
- /// mismatched symbol </exception>
- virtual Token* match(size_t ttype);
-
- /// <summary>
- /// Match current input symbol as a wildcard. If the symbol type matches
- /// (i.e. has a value greater than 0), <seealso
- /// cref="ANTLRErrorStrategy#reportMatch"/> and <seealso cref="#consume"/> are
- /// called to complete the match process. <p/> If the symbol type does not
- /// match, <seealso cref="ANTLRErrorStrategy#recoverInline"/> is called on the
- /// current error strategy to attempt recovery. If <seealso
- /// cref="#getBuildParseTree"/> is
- /// {@code true} and the token index of the symbol returned by
- /// <seealso cref="ANTLRErrorStrategy#recoverInline"/> is -1, the symbol is
- /// added to the parse tree by calling <seealso
- /// cref="ParserRuleContext#addErrorNode"/>.
- /// </summary>
- /// <returns> the matched symbol </returns>
- /// <exception cref="RecognitionException"> if the current input symbol did
- /// not match a wildcard and the error strategy could not recover from the
- /// mismatched symbol </exception>
- virtual Token* matchWildcard();
-
- /// <summary>
- /// Track the <seealso cref="ParserRuleContext"/> objects during the parse and
- /// hook them up using the <seealso cref="ParserRuleContext#children"/> list
- /// so that it forms a parse tree. The <seealso cref="ParserRuleContext"/>
- /// returned from the start rule represents the root of the parse tree. <p/>
- /// Note that if we are not building parse trees, rule contexts only point
- /// upwards. When a rule exits, it returns the context but that gets garbage
- /// collected if nobody holds a reference. It points upwards but nobody
- /// points at it.
- /// <p/>
- /// When we build parse trees, we are adding all of these contexts to
- /// <seealso cref="ParserRuleContext#children"/> list. Contexts are then not
- /// candidates for garbage collection.
- /// </summary>
- virtual void setBuildParseTree(bool buildParseTrees);
-
- /// <summary>
- /// Gets whether or not a complete parse tree will be constructed while
- /// parsing. This property is {@code true} for a newly constructed parser.
- /// </summary>
- /// <returns> {@code true} if a complete parse tree will be constructed while
- /// parsing, otherwise {@code false} </returns>
- virtual bool getBuildParseTree();
-
- /// <summary>
- /// Trim the internal lists of the parse tree during parsing to conserve
- /// memory. This property is set to {@code false} by default for a newly
- /// constructed parser.
- /// </summary>
- /// <param name="trimParseTrees"> {@code true} to trim the capacity of the
- /// <seealso cref="ParserRuleContext#children"/> list to its size after a rule
- /// is parsed. </param>
- virtual void setTrimParseTree(bool trimParseTrees);
-
- /// <returns> {@code true} if the <seealso cref="ParserRuleContext#children"/>
- /// list is trimmed using the default <seealso
- /// cref="Parser.TrimToSizeListener"/> during the parse process. </returns>
- virtual bool getTrimParseTree();
-
- virtual std::vector<tree::ParseTreeListener*> getParseListeners();
-
- /// <summary>
- /// Registers {@code listener} to receive events during the parsing process.
- /// <p/>
- /// To support output-preserving grammar transformations (including but not
- /// limited to left-recursion removal, automated left-factoring, and
- /// optimized code generation), calls to listener methods during the parse
- /// may differ substantially from calls made by
- /// <seealso cref="ParseTreeWalker#DEFAULT"/> used after the parse is
- /// complete. In particular, rule entry and exit events may occur in a
- /// different order during the parse than after the parser. In addition, calls
- /// to certain rule entry methods may be omitted. <p/> With the following
- /// specific exceptions, calls to listener events are <em>deterministic</em>,
- /// i.e. for identical input the calls to listener methods will be the same.
- ///
- /// <ul>
- /// <li>Alterations to the grammar used to generate code may change the
- /// behavior of the listener calls.</li>
- /// <li>Alterations to the command line options passed to ANTLR 4 when
- /// generating the parser may change the behavior of the listener calls.</li>
- /// <li>Changing the version of the ANTLR Tool used to generate the parser
- /// may change the behavior of the listener calls.</li>
- /// </ul>
- /// </summary>
- /// <param name="listener"> the listener to add
- /// </param>
- /// <exception cref="NullPointerException"> if {@code} listener is {@code
- /// null} </exception>
- virtual void addParseListener(tree::ParseTreeListener* listener);
-
- /// <summary>
- /// Remove {@code listener} from the list of parse listeners.
- /// <p/>
- /// If {@code listener} is {@code null} or has not been added as a parse
- /// listener, this method does nothing.
- /// </summary>
- /// <seealso cref= #addParseListener
- /// </seealso>
- /// <param name="listener"> the listener to remove </param>
- virtual void removeParseListener(tree::ParseTreeListener* listener);
-
- /// <summary>
- /// Remove all parse listeners.
- /// </summary>
- /// <seealso cref= #addParseListener </seealso>
- virtual void removeParseListeners();
-
- /// <summary>
- /// Notify any parse listeners of an enter rule event.
- /// </summary>
- /// <seealso cref= #addParseListener </seealso>
- virtual void triggerEnterRuleEvent();
-
- /// <summary>
- /// Notify any parse listeners of an exit rule event.
- /// </summary>
- /// <seealso cref= #addParseListener </seealso>
- virtual void triggerExitRuleEvent();
-
- /// <summary>
- /// Gets the number of syntax errors reported during parsing. This value is
- /// incremented each time <seealso cref="#notifyErrorListeners"/> is called.
- /// </summary>
- /// <seealso cref= #notifyErrorListeners </seealso>
- virtual size_t getNumberOfSyntaxErrors();
-
- virtual Ref<TokenFactory<CommonToken>> getTokenFactory() override;
-
- /// <summary>
- /// Tell our token source and error strategy about a new way to create tokens.
- /// </summary>
- template <typename T1>
- void setTokenFactory(TokenFactory<T1>* factory) {
- _input->getTokenSource()->setTokenFactory(factory);
- }
-
- /// The ATN with bypass alternatives is expensive to create so we create it
- /// lazily. The ATN is owned by us.
- virtual const atn::ATN& getATNWithBypassAlts();
-
- /// <summary>
- /// The preferred method of getting a tree pattern. For example, here's a
- /// sample use:
- ///
- /// <pre>
- /// ParseTree t = parser.expr();
- /// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
- /// MyParser.RULE_expr); ParseTreeMatch m = p.match(t); String id =
- /// m.get("ID");
- /// </pre>
- /// </summary>
- virtual tree::pattern::ParseTreePattern compileParseTreePattern(
- const std::string& pattern, int patternRuleIndex);
-
- /// <summary>
- /// The same as <seealso cref="#compileParseTreePattern(String, int)"/> but
- /// specify a <seealso cref="Lexer"/> rather than trying to deduce it from
- /// this parser.
- /// </summary>
- virtual tree::pattern::ParseTreePattern compileParseTreePattern(
- const std::string& pattern, int patternRuleIndex, Lexer* lexer);
-
- virtual Ref<ANTLRErrorStrategy> getErrorHandler();
- virtual void setErrorHandler(Ref<ANTLRErrorStrategy> const& handler);
-
- virtual IntStream* getInputStream() override;
- void setInputStream(IntStream* input) override;
-
- virtual TokenStream* getTokenStream();
-
- /// Set the token stream and reset the parser.
- virtual void setTokenStream(TokenStream* input);
-
- /// <summary>
- /// Match needs to return the current input symbol, which gets put
- /// into the label for the associated token ref; e.g., x=ID.
- /// </summary>
- virtual Token* getCurrentToken();
-
- void notifyErrorListeners(const std::string& msg);
-
- virtual void notifyErrorListeners(Token* offendingToken,
- const std::string& msg,
- std::exception_ptr e);
-
- /// Consume and return the <seealso cref="#getCurrentToken current symbol"/>.
- /// <p/>
- /// E.g., given the following input with {@code A} being the current
- /// lookahead symbol, this function moves the cursor to {@code B} and returns
- /// {@code A}.
- ///
- /// <pre>
- /// A B
- /// ^
- /// </pre>
- ///
- /// If the parser is not in error recovery mode, the consumed symbol is added
- /// to the parse tree using <seealso
- /// cref="ParserRuleContext#addChild(TerminalNode)"/>, and <seealso
- /// cref="ParseTreeListener#visitTerminal"/> is called on any parse listeners.
- /// If the parser <em>is</em> in error recovery mode, the consumed symbol is
- /// added to the parse tree using {@link #createErrorNode(ParserRuleContext,
- /// Token)} then
- /// {@link ParserRuleContext#addErrorNode(ErrorNode)} and
- /// <seealso cref="ParseTreeListener#visitErrorNode"/> is called on any parse
- /// listeners.
- virtual Token* consume();
-
- /// Always called by generated parsers upon entry to a rule. Access field
- /// <seealso cref="#_ctx"/> get the current context.
- virtual void enterRule(ParserRuleContext* localctx, size_t state,
- size_t ruleIndex);
-
- virtual void exitRule();
-
- virtual void enterOuterAlt(ParserRuleContext* localctx, size_t altNum);
-
- /**
- * Get the precedence level for the top-most precedence rule.
- *
- * @return The precedence level for the top-most precedence rule, or -1 if
- * the parser context is not nested within a precedence rule.
- */
- int getPrecedence() const;
-
- /// @deprecated Use
- /// <seealso cref="#enterRecursionRule(ParserRuleContext, int, int, int)"/>
- /// instead.
- virtual void enterRecursionRule(ParserRuleContext* localctx,
- size_t ruleIndex);
- virtual void enterRecursionRule(ParserRuleContext* localctx, size_t state,
- size_t ruleIndex, int precedence);
-
- /** Like {@link #enterRule} but for recursive rules.
- * Make the current context the child of the incoming localctx.
- */
- virtual void pushNewRecursionContext(ParserRuleContext* localctx,
- size_t state, size_t ruleIndex);
- virtual void unrollRecursionContexts(ParserRuleContext* parentctx);
- virtual ParserRuleContext* getInvokingContext(size_t ruleIndex);
- virtual ParserRuleContext* getContext();
- virtual void setContext(ParserRuleContext* ctx);
- virtual bool precpred(RuleContext* localctx, int precedence) override;
- virtual bool inContext(const std::string& context);
-
- /// <summary>
- /// Checks whether or not {@code symbol} can follow the current state in the
- /// ATN. The behavior of this method is equivalent to the following, but is
- /// implemented such that the complete context-sensitive follow set does not
- /// need to be explicitly constructed.
- ///
- /// <pre>
- /// return getExpectedTokens().contains(symbol);
- /// </pre>
- /// </summary>
- /// <param name="symbol"> the symbol type to check </param>
- /// <returns> {@code true} if {@code symbol} can follow the current state in
- /// the ATN, otherwise {@code false}. </returns>
- virtual bool isExpectedToken(size_t symbol);
-
- bool isMatchedEOF() const;
-
- /// <summary>
- /// Computes the set of input symbols which could follow the current parser
- /// state and context, as given by <seealso cref="#getState"/> and <seealso
- /// cref="#getContext"/>, respectively.
- /// </summary>
- /// <seealso cref= ATN#getExpectedTokens(int, RuleContext) </seealso>
- virtual misc::IntervalSet getExpectedTokens();
-
- virtual misc::IntervalSet getExpectedTokensWithinCurrentRule();
-
- /// Get a rule's index (i.e., {@code RULE_ruleName} field) or INVALID_INDEX if
- /// not found.
- virtual size_t getRuleIndex(const std::string& ruleName);
-
- virtual ParserRuleContext* getRuleContext();
-
- /// <summary>
- /// Return List&lt;String&gt; of the rule names in your parser instance
- /// leading up to a call to the current rule. You could override if
- /// you want more details such as the file/line info of where
- /// in the ATN a rule is invoked.
- ///
- /// This is very useful for error messages.
- /// </summary>
- virtual std::vector<std::string> getRuleInvocationStack();
-
- virtual std::vector<std::string> getRuleInvocationStack(RuleContext* p);
-
- /// <summary>
- /// For debugging and other purposes. </summary>
- virtual std::vector<std::string> getDFAStrings();
-
- /// <summary>
- /// For debugging and other purposes. </summary>
- virtual void dumpDFA();
-
- virtual std::string getSourceName();
-
- atn::ParseInfo getParseInfo() const;
-
- /**
- * @since 4.3
- */
- void setProfile(bool profile);
-
- /// <summary>
- /// During a parse is sometimes useful to listen in on the rule entry and exit
- /// events as well as token matches. This is for quick and dirty debugging.
- /// </summary>
- virtual void setTrace(bool trace);
-
- /**
- * Gets whether a {@link TraceListener} is registered as a parse listener
- * for the parser.
- *
- * @see #setTrace(boolean)
- */
- bool isTrace() const;
-
- tree::ParseTreeTracker& getTreeTracker() { return _tracker; }
-
- /** How to create a token leaf node associated with a parent.
- * Typically, the terminal node to create is not a function of the parent
- * but this method must still set the parent pointer of the terminal node
- * returned. I would prefer having {@link
- * ParserRuleContext#addAnyChild(ParseTree)} set the parent pointer, but the
- * parent pointer is implementation dependent and currently there is no
- * setParent() in {@link TerminalNode} (and can't add method in Java 1.7
- * without breaking backward compatibility).
- *
- * @since 4.7
- */
- tree::TerminalNode* createTerminalNode(Token* t);
-
- /** How to create an error node, given a token, associated with a parent.
- * Typically, the error node to create is not a function of the parent
- * but this method must still set the parent pointer of the terminal node
- * returned. I would prefer having {@link
- * ParserRuleContext#addAnyChild(ParseTree)} set the parent pointer, but the
- * parent pointer is implementation dependent and currently there is no
- * setParent() in {@link ErrorNode} (and can't add method in Java 1.7 without
- * breaking backward compatibility).
- *
- * @since 4.7
- */
- tree::ErrorNode* createErrorNode(Token* t);
-
- protected:
- /// The ParserRuleContext object for the currently executing rule.
- /// This is always non-null during the parsing process.
- // ml: this is one of the contexts tracked in _allocatedContexts.
- ParserRuleContext* _ctx;
-
- /// The error handling strategy for the parser. The default is
- /// DefaultErrorStrategy. See also getErrorHandler.
- Ref<ANTLRErrorStrategy> _errHandler;
-
- /// <summary>
- /// The input stream.
- /// </summary>
- /// <seealso cref= #getInputStream </seealso>
- /// <seealso cref= #setInputStream </seealso>
- TokenStream* _input;
-
- std::vector<int> _precedenceStack;
-
- /// <summary>
- /// Specifies whether or not the parser should construct a parse tree during
- /// the parsing process. The default value is {@code true}.
- /// </summary>
- /// <seealso cref= #getBuildParseTree </seealso>
- /// <seealso cref= #setBuildParseTree </seealso>
- bool _buildParseTrees;
-
- /// The list of <seealso cref="ParseTreeListener"/> listeners registered to
- /// receive events during the parse. <seealso cref= #addParseListener
- /// </seealso>
- std::vector<tree::ParseTreeListener*> _parseListeners;
-
- /// <summary>
- /// The number of syntax errors reported during parsing. This value is
- /// incremented each time <seealso cref="#notifyErrorListeners"/> is called.
- /// </summary>
- size_t _syntaxErrors;
-
- /** Indicates parser has match()ed EOF token. See {@link #exitRule()}. */
- bool _matchedEOF;
-
- virtual void addContextToParseTree();
-
- // All rule contexts created during a parse run. This is cleared when calling
- // reset().
- tree::ParseTreeTracker _tracker;
-
- private:
- /// This field maps from the serialized ATN string to the deserialized
- /// <seealso cref="ATN"/> with bypass alternatives.
- ///
- /// <seealso cref= ATNDeserializationOptions#isGenerateRuleBypassTransitions()
- /// </seealso>
- static std::map<std::vector<uint16_t>, atn::ATN> bypassAltsAtnCache;
-
- /// When setTrace(true) is called, a reference to the
- /// TraceListener is stored here so it can be easily removed in a
- /// later call to setTrace(false). The listener itself is
- /// implemented as a parser listener so this field is not directly used by
- /// other parser methods.
- TraceListener* _tracer;
-
- void InitializeInstanceFields();
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ParserInterpreter.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ParserInterpreter.cpp
deleted file mode 100644
index b2a9abb470..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ParserInterpreter.cpp
+++ /dev/null
@@ -1,340 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ANTLRErrorStrategy.h"
-#include "CommonToken.h"
-#include "FailedPredicateException.h"
-#include "InputMismatchException.h"
-#include "InterpreterRuleContext.h"
-#include "Lexer.h"
-#include "Token.h"
-#include "Vocabulary.h"
-#include "atn/ATN.h"
-#include "atn/ActionTransition.h"
-#include "atn/AtomTransition.h"
-#include "atn/LoopEndState.h"
-#include "atn/ParserATNSimulator.h"
-#include "atn/PrecedencePredicateTransition.h"
-#include "atn/PredicateTransition.h"
-#include "atn/RuleStartState.h"
-#include "atn/RuleStopState.h"
-#include "atn/RuleTransition.h"
-#include "atn/StarLoopEntryState.h"
-#include "dfa/DFA.h"
-#include "tree/ErrorNode.h"
-
-#include "support/CPPUtils.h"
-
-#include "ParserInterpreter.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-
-using namespace antlrcpp;
-
-ParserInterpreter::ParserInterpreter(const std::string& grammarFileName,
- const std::vector<std::string>& tokenNames,
- const std::vector<std::string>& ruleNames,
- const atn::ATN& atn, TokenStream* input)
- : ParserInterpreter(grammarFileName,
- dfa::Vocabulary::fromTokenNames(tokenNames), ruleNames,
- atn, input) {}
-
-ParserInterpreter::ParserInterpreter(const std::string& grammarFileName,
- const dfa::Vocabulary& vocabulary,
- const std::vector<std::string>& ruleNames,
- const atn::ATN& atn, TokenStream* input)
- : Parser(input),
- _grammarFileName(grammarFileName),
- _atn(atn),
- _ruleNames(ruleNames),
- _vocabulary(vocabulary) {
- for (size_t i = 0; i < atn.maxTokenType; ++i) {
- _tokenNames.push_back(vocabulary.getDisplayName(i));
- }
-
- // init decision DFA
- for (size_t i = 0; i < atn.getNumberOfDecisions(); ++i) {
- atn::DecisionState* decisionState = atn.getDecisionState(i);
- _decisionToDFA.push_back(dfa::DFA(decisionState, i));
- }
-
- // get atn simulator that knows how to do predictions
- _interpreter = new atn::ParserATNSimulator(
- this, atn, _decisionToDFA,
- _sharedContextCache); /* mem-check: deleted in d-tor */
-}
-
-ParserInterpreter::~ParserInterpreter() { delete _interpreter; }
-
-void ParserInterpreter::reset() {
- Parser::reset();
- _overrideDecisionReached = false;
- _overrideDecisionRoot = nullptr;
-}
-
-const atn::ATN& ParserInterpreter::getATN() const { return _atn; }
-
-const std::vector<std::string>& ParserInterpreter::getTokenNames() const {
- return _tokenNames;
-}
-
-const dfa::Vocabulary& ParserInterpreter::getVocabulary() const {
- return _vocabulary;
-}
-
-const std::vector<std::string>& ParserInterpreter::getRuleNames() const {
- return _ruleNames;
-}
-
-std::string ParserInterpreter::getGrammarFileName() const {
- return _grammarFileName;
-}
-
-ParserRuleContext* ParserInterpreter::parse(size_t startRuleIndex) {
- atn::RuleStartState* startRuleStartState =
- _atn.ruleToStartState[startRuleIndex];
-
- _rootContext = createInterpreterRuleContext(
- nullptr, atn::ATNState::INVALID_STATE_NUMBER, startRuleIndex);
-
- if (startRuleStartState->isLeftRecursiveRule) {
- enterRecursionRule(_rootContext, startRuleStartState->stateNumber,
- startRuleIndex, 0);
- } else {
- enterRule(_rootContext, startRuleStartState->stateNumber, startRuleIndex);
- }
-
- while (true) {
- atn::ATNState* p = getATNState();
- switch (p->getStateType()) {
- case atn::ATNState::RULE_STOP:
- // pop; return from rule
- if (_ctx->isEmpty()) {
- if (startRuleStartState->isLeftRecursiveRule) {
- ParserRuleContext* result = _ctx;
- auto parentContext = _parentContextStack.top();
- _parentContextStack.pop();
- unrollRecursionContexts(parentContext.first);
- return result;
- } else {
- exitRule();
- return _rootContext;
- }
- }
-
- visitRuleStopState(p);
- break;
-
- default:
- try {
- visitState(p);
- } catch (RecognitionException& e) {
- setState(_atn.ruleToStopState[p->ruleIndex]->stateNumber);
- getErrorHandler()->reportError(this, e);
- getContext()->exception = std::current_exception();
- recover(e);
- }
-
- break;
- }
- }
-}
-
-void ParserInterpreter::enterRecursionRule(ParserRuleContext* localctx,
- size_t state, size_t ruleIndex,
- int precedence) {
- _parentContextStack.push({_ctx, localctx->invokingState});
- Parser::enterRecursionRule(localctx, state, ruleIndex, precedence);
-}
-
-void ParserInterpreter::addDecisionOverride(int decision, int tokenIndex,
- int forcedAlt) {
- _overrideDecision = decision;
- _overrideDecisionInputIndex = tokenIndex;
- _overrideDecisionAlt = forcedAlt;
-}
-
-Ref<InterpreterRuleContext> ParserInterpreter::getOverrideDecisionRoot() const {
- return _overrideDecisionRoot;
-}
-
-InterpreterRuleContext* ParserInterpreter::getRootContext() {
- return _rootContext;
-}
-
-atn::ATNState* ParserInterpreter::getATNState() {
- return _atn.states[getState()];
-}
-
-void ParserInterpreter::visitState(atn::ATNState* p) {
- size_t predictedAlt = 1;
- if (is<DecisionState*>(p)) {
- predictedAlt = visitDecisionState(dynamic_cast<DecisionState*>(p));
- }
-
- atn::Transition* transition = p->transitions[predictedAlt - 1];
- switch (transition->getSerializationType()) {
- case atn::Transition::EPSILON:
- if (p->getStateType() == ATNState::STAR_LOOP_ENTRY &&
- (dynamic_cast<StarLoopEntryState*>(p))->isPrecedenceDecision &&
- !is<LoopEndState*>(transition->target)) {
- // We are at the start of a left recursive rule's (...)* loop
- // and we're not taking the exit branch of loop.
- InterpreterRuleContext* localctx = createInterpreterRuleContext(
- _parentContextStack.top().first, _parentContextStack.top().second,
- static_cast<int>(_ctx->getRuleIndex()));
- pushNewRecursionContext(
- localctx, _atn.ruleToStartState[p->ruleIndex]->stateNumber,
- static_cast<int>(_ctx->getRuleIndex()));
- }
- break;
-
- case atn::Transition::ATOM:
- match(static_cast<int>(
- static_cast<atn::AtomTransition*>(transition)->_label));
- break;
-
- case atn::Transition::RANGE:
- case atn::Transition::SET:
- case atn::Transition::NOT_SET:
- if (!transition->matches(static_cast<int>(_input->LA(1)),
- Token::MIN_USER_TOKEN_TYPE,
- Lexer::MAX_CHAR_VALUE)) {
- recoverInline();
- }
- matchWildcard();
- break;
-
- case atn::Transition::WILDCARD:
- matchWildcard();
- break;
-
- case atn::Transition::RULE: {
- atn::RuleStartState* ruleStartState =
- static_cast<atn::RuleStartState*>(transition->target);
- size_t ruleIndex = ruleStartState->ruleIndex;
- InterpreterRuleContext* newctx =
- createInterpreterRuleContext(_ctx, p->stateNumber, ruleIndex);
- if (ruleStartState->isLeftRecursiveRule) {
- enterRecursionRule(
- newctx, ruleStartState->stateNumber, ruleIndex,
- static_cast<atn::RuleTransition*>(transition)->precedence);
- } else {
- enterRule(newctx, transition->target->stateNumber, ruleIndex);
- }
- } break;
-
- case atn::Transition::PREDICATE: {
- atn::PredicateTransition* predicateTransition =
- static_cast<atn::PredicateTransition*>(transition);
- if (!sempred(_ctx, predicateTransition->ruleIndex,
- predicateTransition->predIndex)) {
- throw FailedPredicateException(this);
- }
- } break;
-
- case atn::Transition::ACTION: {
- atn::ActionTransition* actionTransition =
- static_cast<atn::ActionTransition*>(transition);
- action(_ctx, actionTransition->ruleIndex, actionTransition->actionIndex);
- } break;
-
- case atn::Transition::PRECEDENCE: {
- if (!precpred(_ctx,
- static_cast<atn::PrecedencePredicateTransition*>(transition)
- ->precedence)) {
- throw FailedPredicateException(
- this,
- "precpred(_ctx, " +
- std::to_string(
- static_cast<atn::PrecedencePredicateTransition*>(transition)
- ->precedence) +
- ")");
- }
- } break;
-
- default:
- throw UnsupportedOperationException("Unrecognized ATN transition type.");
- }
-
- setState(transition->target->stateNumber);
-}
-
-size_t ParserInterpreter::visitDecisionState(DecisionState* p) {
- size_t predictedAlt = 1;
- if (p->transitions.size() > 1) {
- getErrorHandler()->sync(this);
- int decision = p->decision;
- if (decision == _overrideDecision &&
- _input->index() == _overrideDecisionInputIndex &&
- !_overrideDecisionReached) {
- predictedAlt = _overrideDecisionAlt;
- _overrideDecisionReached = true;
- } else {
- predictedAlt = getInterpreter<ParserATNSimulator>()->adaptivePredict(
- _input, decision, _ctx);
- }
- }
- return predictedAlt;
-}
-
-InterpreterRuleContext* ParserInterpreter::createInterpreterRuleContext(
- ParserRuleContext* parent, size_t invokingStateNumber, size_t ruleIndex) {
- return _tracker.createInstance<InterpreterRuleContext>(
- parent, invokingStateNumber, ruleIndex);
-}
-
-void ParserInterpreter::visitRuleStopState(atn::ATNState* p) {
- atn::RuleStartState* ruleStartState = _atn.ruleToStartState[p->ruleIndex];
- if (ruleStartState->isLeftRecursiveRule) {
- std::pair<ParserRuleContext*, size_t> parentContext =
- _parentContextStack.top();
- _parentContextStack.pop();
-
- unrollRecursionContexts(parentContext.first);
- setState(parentContext.second);
- } else {
- exitRule();
- }
-
- atn::RuleTransition* ruleTransition = static_cast<atn::RuleTransition*>(
- _atn.states[getState()]->transitions[0]);
- setState(ruleTransition->followState->stateNumber);
-}
-
-void ParserInterpreter::recover(RecognitionException& e) {
- size_t i = _input->index();
- getErrorHandler()->recover(this, std::make_exception_ptr(e));
-
- if (_input->index() == i) {
- // no input consumed, better add an error node
- if (is<InputMismatchException*>(&e)) {
- InputMismatchException& ime = static_cast<InputMismatchException&>(e);
- Token* tok = e.getOffendingToken();
- size_t expectedTokenType =
- ime.getExpectedTokens().getMinElement(); // get any element
- _errorToken = getTokenFactory()->create(
- {tok->getTokenSource(), tok->getTokenSource()->getInputStream()},
- expectedTokenType, tok->getText(), Token::DEFAULT_CHANNEL,
- INVALID_INDEX, INVALID_INDEX, // invalid start/stop
- tok->getLine(), tok->getCharPositionInLine());
- _ctx->addChild(createErrorNode(_errorToken.get()));
- } else { // NoViableAlt
- Token* tok = e.getOffendingToken();
- _errorToken = getTokenFactory()->create(
- {tok->getTokenSource(), tok->getTokenSource()->getInputStream()},
- Token::INVALID_TYPE, tok->getText(), Token::DEFAULT_CHANNEL,
- INVALID_INDEX, INVALID_INDEX, // invalid start/stop
- tok->getLine(), tok->getCharPositionInLine());
- _ctx->addChild(createErrorNode(_errorToken.get()));
- }
- }
-}
-
-Token* ParserInterpreter::recoverInline() {
- return _errHandler->recoverInline(this);
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ParserInterpreter.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ParserInterpreter.h
deleted file mode 100644
index 121ac3682c..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ParserInterpreter.h
+++ /dev/null
@@ -1,186 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Parser.h"
-#include "Vocabulary.h"
-#include "atn/ATN.h"
-#include "atn/PredictionContext.h"
-#include "support/BitSet.h"
-
-namespace antlr4 {
-
-/// <summary>
-/// A parser simulator that mimics what ANTLR's generated
-/// parser code does. A ParserATNSimulator is used to make
-/// predictions via adaptivePredict but this class moves a pointer through the
-/// ATN to simulate parsing. ParserATNSimulator just
-/// makes us efficient rather than having to backtrack, for example.
-///
-/// This properly creates parse trees even for left recursive rules.
-///
-/// We rely on the left recursive rule invocation and special predicate
-/// transitions to make left recursive rules work.
-///
-/// See TestParserInterpreter for examples.
-/// </summary>
-class ANTLR4CPP_PUBLIC ParserInterpreter : public Parser {
- public:
- // @deprecated
- ParserInterpreter(const std::string& grammarFileName,
- const std::vector<std::string>& tokenNames,
- const std::vector<std::string>& ruleNames,
- const atn::ATN& atn, TokenStream* input);
- ParserInterpreter(const std::string& grammarFileName,
- const dfa::Vocabulary& vocabulary,
- const std::vector<std::string>& ruleNames,
- const atn::ATN& atn, TokenStream* input);
- ~ParserInterpreter();
-
- virtual void reset() override;
-
- virtual const atn::ATN& getATN() const override;
-
- // @deprecated
- virtual const std::vector<std::string>& getTokenNames() const override;
-
- virtual const dfa::Vocabulary& getVocabulary() const override;
-
- virtual const std::vector<std::string>& getRuleNames() const override;
- virtual std::string getGrammarFileName() const override;
-
- /// Begin parsing at startRuleIndex
- virtual ParserRuleContext* parse(size_t startRuleIndex);
-
- virtual void enterRecursionRule(ParserRuleContext* localctx, size_t state,
- size_t ruleIndex, int precedence) override;
-
- /** Override this parser interpreters normal decision-making process
- * at a particular decision and input token index. Instead of
- * allowing the adaptive prediction mechanism to choose the
- * first alternative within a block that leads to a successful parse,
- * force it to take the alternative, 1..n for n alternatives.
- *
- * As an implementation limitation right now, you can only specify one
- * override. This is sufficient to allow construction of different
- * parse trees for ambiguous input. It means re-parsing the entire input
- * in general because you're never sure where an ambiguous sequence would
- * live in the various parse trees. For example, in one interpretation,
- * an ambiguous input sequence would be matched completely in expression
- * but in another it could match all the way back to the root.
- *
- * s : e '!'? ;
- * e : ID
- * | ID '!'
- * ;
- *
- * Here, x! can be matched as (s (e ID) !) or (s (e ID !)). In the first
- * case, the ambiguous sequence is fully contained only by the root.
- * In the second case, the ambiguous sequences fully contained within just
- * e, as in: (e ID !).
- *
- * Rather than trying to optimize this and make
- * some intelligent decisions for optimization purposes, I settled on
- * just re-parsing the whole input and then using
- * {link Trees#getRootOfSubtreeEnclosingRegion} to find the minimal
- * subtree that contains the ambiguous sequence. I originally tried to
- * record the call stack at the point the parser detected and ambiguity but
- * left recursive rules create a parse tree stack that does not reflect
- * the actual call stack. That impedance mismatch was enough to make
- * it it challenging to restart the parser at a deeply nested rule
- * invocation.
- *
- * Only parser interpreters can override decisions so as to avoid inserting
- * override checking code in the critical ALL(*) prediction execution path.
- *
- * @since 4.5.1
- */
- void addDecisionOverride(int decision, int tokenIndex, int forcedAlt);
-
- Ref<InterpreterRuleContext> getOverrideDecisionRoot() const;
-
- /** Return the root of the parse, which can be useful if the parser
- * bails out. You still can access the top node. Note that,
- * because of the way left recursive rules add children, it's possible
- * that the root will not have any children if the start rule immediately
- * called and left recursive rule that fails.
- *
- * @since 4.5.1
- */
- InterpreterRuleContext* getRootContext();
-
- protected:
- const std::string _grammarFileName;
- std::vector<std::string> _tokenNames;
- const atn::ATN& _atn;
-
- std::vector<std::string> _ruleNames;
-
- std::vector<dfa::DFA>
- _decisionToDFA; // not shared like it is for generated parsers
- atn::PredictionContextCache _sharedContextCache;
-
- /** This stack corresponds to the _parentctx, _parentState pair of locals
- * that would exist on call stack frames with a recursive descent parser;
- * in the generated function for a left-recursive rule you'd see:
- *
- * private EContext e(int _p) throws RecognitionException {
- * ParserRuleContext _parentctx = _ctx; // Pair.a
- * int _parentState = getState(); // Pair.b
- * ...
- * }
- *
- * Those values are used to create new recursive rule invocation contexts
- * associated with left operand of an alt like "expr '*' expr".
- */
- std::stack<std::pair<ParserRuleContext*, size_t>> _parentContextStack;
-
- /** We need a map from (decision,inputIndex)->forced alt for computing
- * ambiguous parse trees. For now, we allow exactly one override.
- */
- int _overrideDecision = -1;
- size_t _overrideDecisionInputIndex = INVALID_INDEX;
- size_t _overrideDecisionAlt = INVALID_INDEX;
- bool _overrideDecisionReached =
- false; // latch and only override once; error might trigger infinite loop
-
- /** What is the current context when we override a decision? This tells
- * us what the root of the parse tree is when using override
- * for an ambiguity/lookahead check.
- */
- Ref<InterpreterRuleContext> _overrideDecisionRoot;
- InterpreterRuleContext* _rootContext;
-
- virtual atn::ATNState* getATNState();
- virtual void visitState(atn::ATNState* p);
-
- /** Method visitDecisionState() is called when the interpreter reaches
- * a decision state (instance of DecisionState). It gives an opportunity
- * for subclasses to track interesting things.
- */
- size_t visitDecisionState(atn::DecisionState* p);
-
- /** Provide simple "factory" for InterpreterRuleContext's.
- * @since 4.5.1
- */
- InterpreterRuleContext* createInterpreterRuleContext(
- ParserRuleContext* parent, size_t invokingStateNumber, size_t ruleIndex);
-
- virtual void visitRuleStopState(atn::ATNState* p);
-
- /** Rely on the error handler for this parser but, if no tokens are consumed
- * to recover, add an error node. Otherwise, nothing is seen in the parse
- * tree.
- */
- void recover(RecognitionException& e);
- Token* recoverInline();
-
- private:
- const dfa::Vocabulary& _vocabulary;
- std::unique_ptr<Token> _errorToken;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ParserRuleContext.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ParserRuleContext.cpp
deleted file mode 100644
index ae9cd7c6cd..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ParserRuleContext.cpp
+++ /dev/null
@@ -1,138 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Parser.h"
-#include "Token.h"
-#include "misc/Interval.h"
-#include "tree/ErrorNode.h"
-#include "tree/TerminalNode.h"
-
-#include "support/CPPUtils.h"
-
-#include "ParserRuleContext.h"
-
-using namespace antlr4;
-using namespace antlr4::tree;
-
-using namespace antlrcpp;
-
-ParserRuleContext ParserRuleContext::EMPTY;
-
-ParserRuleContext::ParserRuleContext() : start(nullptr), stop(nullptr) {}
-
-ParserRuleContext::ParserRuleContext(ParserRuleContext* parent,
- size_t invokingStateNumber)
- : RuleContext(parent, invokingStateNumber), start(nullptr), stop(nullptr) {}
-
-void ParserRuleContext::copyFrom(ParserRuleContext* ctx) {
- // from RuleContext
- this->parent = ctx->parent;
- this->invokingState = ctx->invokingState;
-
- this->start = ctx->start;
- this->stop = ctx->stop;
-
- // copy any error nodes to alt label node
- if (!ctx->children.empty()) {
- for (auto child : ctx->children) {
- auto errorNode = dynamic_cast<ErrorNode*>(child);
- if (errorNode != nullptr) {
- errorNode->setParent(this);
- children.push_back(errorNode);
- }
- }
-
- // Remove the just reparented error nodes from the source context.
- ctx->children.erase(
- std::remove_if(ctx->children.begin(), ctx->children.end(),
- [this](tree::ParseTree* e) -> bool {
- return std::find(children.begin(), children.end(),
- e) != children.end();
- }),
- ctx->children.end());
- }
-}
-
-void ParserRuleContext::enterRule(tree::ParseTreeListener* /*listener*/) {}
-
-void ParserRuleContext::exitRule(tree::ParseTreeListener* /*listener*/) {}
-
-tree::TerminalNode* ParserRuleContext::addChild(tree::TerminalNode* t) {
- t->setParent(this);
- children.push_back(t);
- return t;
-}
-
-RuleContext* ParserRuleContext::addChild(RuleContext* ruleInvocation) {
- children.push_back(ruleInvocation);
- return ruleInvocation;
-}
-
-void ParserRuleContext::removeLastChild() {
- if (!children.empty()) {
- children.pop_back();
- }
-}
-
-tree::TerminalNode* ParserRuleContext::getToken(size_t ttype, size_t i) {
- if (i >= children.size()) {
- return nullptr;
- }
-
- size_t j = 0; // what token with ttype have we found?
- for (auto o : children) {
- if (is<tree::TerminalNode*>(o)) {
- tree::TerminalNode* tnode = dynamic_cast<tree::TerminalNode*>(o);
- Token* symbol = tnode->getSymbol();
- if (symbol->getType() == ttype) {
- if (j++ == i) {
- return tnode;
- }
- }
- }
- }
-
- return nullptr;
-}
-
-std::vector<tree::TerminalNode*> ParserRuleContext::getTokens(size_t ttype) {
- std::vector<tree::TerminalNode*> tokens;
- for (auto& o : children) {
- if (is<tree::TerminalNode*>(o)) {
- tree::TerminalNode* tnode = dynamic_cast<tree::TerminalNode*>(o);
- Token* symbol = tnode->getSymbol();
- if (symbol->getType() == ttype) {
- tokens.push_back(tnode);
- }
- }
- }
-
- return tokens;
-}
-
-misc::Interval ParserRuleContext::getSourceInterval() {
- if (start == nullptr) {
- return misc::Interval::INVALID;
- }
-
- if (stop == nullptr || stop->getTokenIndex() < start->getTokenIndex()) {
- return misc::Interval(start->getTokenIndex(),
- start->getTokenIndex() - 1); // empty
- }
- return misc::Interval(start->getTokenIndex(), stop->getTokenIndex());
-}
-
-Token* ParserRuleContext::getStart() { return start; }
-
-Token* ParserRuleContext::getStop() { return stop; }
-
-std::string ParserRuleContext::toInfoString(Parser* recognizer) {
- std::vector<std::string> rules = recognizer->getRuleInvocationStack(this);
- std::reverse(rules.begin(), rules.end());
- std::string rulesStr = antlrcpp::arrayToString(rules);
- return "ParserRuleContext" + rulesStr +
- "{start=" + std::to_string(start->getTokenIndex()) +
- ", stop=" + std::to_string(stop->getTokenIndex()) + '}';
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ParserRuleContext.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ParserRuleContext.h
deleted file mode 100644
index f936210665..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ParserRuleContext.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "RuleContext.h"
-#include "support/CPPUtils.h"
-
-namespace antlr4 {
-
-/// <summary>
-/// A rule invocation record for parsing.
-///
-/// Contains all of the information about the current rule not stored in the
-/// RuleContext. It handles parse tree children list, Any ATN state
-/// tracing, and the default values available for rule invocatons:
-/// start, stop, rule index, current alt number.
-///
-/// Subclasses made for each rule and grammar track the parameters,
-/// return values, locals, and labels specific to that rule. These
-/// are the objects that are returned from rules.
-///
-/// Note text is not an actual field of a rule return value; it is computed
-/// from start and stop using the input stream's toString() method. I
-/// could add a ctor to this so that we can pass in and store the input
-/// stream, but I'm not sure we want to do that. It would seem to be undefined
-/// to get the .text property anyway if the rule matches tokens from multiple
-/// input streams.
-///
-/// I do not use getters for fields of objects that are used simply to
-/// group values such as this aggregate. The getters/setters are there to
-/// satisfy the superclass interface.
-/// </summary>
-class ANTLR4CPP_PUBLIC ParserRuleContext : public RuleContext {
- public:
- static ParserRuleContext EMPTY;
-
- /// <summary>
- /// For debugging/tracing purposes, we want to track all of the nodes in
- /// the ATN traversed by the parser for a particular rule.
- /// This list indicates the sequence of ATN nodes used to match
- /// the elements of the children list. This list does not include
- /// ATN nodes and other rules used to match rule invocations. It
- /// traces the rule invocation node itself but nothing inside that
- /// other rule's ATN submachine.
- ///
- /// There is NOT a one-to-one correspondence between the children and
- /// states list. There are typically many nodes in the ATN traversed
- /// for each element in the children list. For example, for a rule
- /// invocation there is the invoking state and the following state.
- ///
- /// The parser setState() method updates field s and adds it to this list
- /// if we are debugging/tracing.
- ///
- /// This does not trace states visited during prediction.
- /// </summary>
- // public List<Integer> states;
-
- Token* start;
- Token* stop;
-
- /// The exception that forced this rule to return. If the rule successfully
- /// completed, this is "null exception pointer".
- std::exception_ptr exception;
-
- ParserRuleContext();
- ParserRuleContext(ParserRuleContext* parent, size_t invokingStateNumber);
- virtual ~ParserRuleContext() {}
-
- /** COPY a ctx (I'm deliberately not using copy constructor) to avoid
- * confusion with creating node with parent. Does not copy children
- * (except error leaves).
- */
- virtual void copyFrom(ParserRuleContext* ctx);
-
- // Double dispatch methods for listeners
-
- virtual void enterRule(tree::ParseTreeListener* listener);
- virtual void exitRule(tree::ParseTreeListener* listener);
-
- /** Add a token leaf node child and force its parent to be this node. */
- tree::TerminalNode* addChild(tree::TerminalNode* t);
- RuleContext* addChild(RuleContext* ruleInvocation);
-
- /// Used by enterOuterAlt to toss out a RuleContext previously added as
- /// we entered a rule. If we have # label, we will need to remove
- /// generic ruleContext object.
- virtual void removeLastChild();
-
- virtual tree::TerminalNode* getToken(size_t ttype, std::size_t i);
-
- virtual std::vector<tree::TerminalNode*> getTokens(size_t ttype);
-
- template <typename T>
- T* getRuleContext(size_t i) {
- if (children.empty()) {
- return nullptr;
- }
-
- size_t j = 0; // what element have we found with ctxType?
- for (auto& child : children) {
- if (antlrcpp::is<T*>(child)) {
- if (j++ == i) {
- return dynamic_cast<T*>(child);
- }
- }
- }
- return nullptr;
- }
-
- template <typename T>
- std::vector<T*> getRuleContexts() {
- std::vector<T*> contexts;
- for (auto child : children) {
- if (antlrcpp::is<T*>(child)) {
- contexts.push_back(dynamic_cast<T*>(child));
- }
- }
-
- return contexts;
- }
-
- virtual misc::Interval getSourceInterval() override;
-
- /**
- * Get the initial token in this context.
- * Note that the range from start to stop is inclusive, so for rules that do
- * not consume anything (for example, zero length or error productions) this
- * token may exceed stop.
- */
- virtual Token* getStart();
-
- /**
- * Get the final token in this context.
- * Note that the range from start to stop is inclusive, so for rules that do
- * not consume anything (for example, zero length or error productions) this
- * token may precede start.
- */
- virtual Token* getStop();
-
- /// <summary>
- /// Used for rule context info debugging during parse-time, not so much for
- /// ATN debugging </summary>
- virtual std::string toInfoString(Parser* recognizer);
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ProxyErrorListener.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ProxyErrorListener.cpp
deleted file mode 100644
index 8b03798ccc..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ProxyErrorListener.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ProxyErrorListener.h"
-
-using namespace antlr4;
-
-void ProxyErrorListener::addErrorListener(ANTLRErrorListener* listener) {
- if (listener == nullptr) {
- throw "listener cannot be null.";
- }
-
- _delegates.insert(listener);
-}
-
-void ProxyErrorListener::removeErrorListener(ANTLRErrorListener* listener) {
- _delegates.erase(listener);
-}
-
-void ProxyErrorListener::removeErrorListeners() { _delegates.clear(); }
-
-void ProxyErrorListener::syntaxError(Recognizer* recognizer,
- Token* offendingSymbol, size_t line,
- size_t charPositionInLine,
- const std::string& msg,
- std::exception_ptr e) {
- for (auto listener : _delegates) {
- listener->syntaxError(recognizer, offendingSymbol, line, charPositionInLine,
- msg, e);
- }
-}
-
-void ProxyErrorListener::reportAmbiguity(Parser* recognizer,
- const dfa::DFA& dfa, size_t startIndex,
- size_t stopIndex, bool exact,
- const antlrcpp::BitSet& ambigAlts,
- atn::ATNConfigSet* configs) {
- for (auto listener : _delegates) {
- listener->reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact,
- ambigAlts, configs);
- }
-}
-
-void ProxyErrorListener::reportAttemptingFullContext(
- Parser* recognizer, const dfa::DFA& dfa, size_t startIndex,
- size_t stopIndex, const antlrcpp::BitSet& conflictingAlts,
- atn::ATNConfigSet* configs) {
- for (auto listener : _delegates) {
- listener->reportAttemptingFullContext(recognizer, dfa, startIndex,
- stopIndex, conflictingAlts, configs);
- }
-}
-
-void ProxyErrorListener::reportContextSensitivity(
- Parser* recognizer, const dfa::DFA& dfa, size_t startIndex,
- size_t stopIndex, size_t prediction, atn::ATNConfigSet* configs) {
- for (auto listener : _delegates) {
- listener->reportContextSensitivity(recognizer, dfa, startIndex, stopIndex,
- prediction, configs);
- }
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ProxyErrorListener.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ProxyErrorListener.h
deleted file mode 100644
index aa17175933..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/ProxyErrorListener.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "ANTLRErrorListener.h"
-#include "Exceptions.h"
-
-namespace antlr4 {
-
-/// This implementation of ANTLRErrorListener dispatches all calls to a
-/// collection of delegate listeners. This reduces the effort required to
-/// support multiple listeners.
-class ANTLR4CPP_PUBLIC ProxyErrorListener : public ANTLRErrorListener {
- private:
- std::set<ANTLRErrorListener*> _delegates; // Not owned.
-
- public:
- void addErrorListener(ANTLRErrorListener* listener);
- void removeErrorListener(ANTLRErrorListener* listener);
- void removeErrorListeners();
-
- void syntaxError(Recognizer* recognizer, Token* offendingSymbol, size_t line,
- size_t charPositionInLine, const std::string& msg,
- std::exception_ptr e) override;
-
- virtual void reportAmbiguity(Parser* recognizer, const dfa::DFA& dfa,
- size_t startIndex, size_t stopIndex, bool exact,
- const antlrcpp::BitSet& ambigAlts,
- atn::ATNConfigSet* configs) override;
-
- virtual void reportAttemptingFullContext(
- Parser* recognizer, const dfa::DFA& dfa, size_t startIndex,
- size_t stopIndex, const antlrcpp::BitSet& conflictingAlts,
- atn::ATNConfigSet* configs) override;
-
- virtual void reportContextSensitivity(Parser* recognizer, const dfa::DFA& dfa,
- size_t startIndex, size_t stopIndex,
- size_t prediction,
- atn::ATNConfigSet* configs) override;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RecognitionException.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RecognitionException.cpp
deleted file mode 100644
index 37524d7442..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RecognitionException.cpp
+++ /dev/null
@@ -1,67 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ParserRuleContext.h"
-#include "Recognizer.h"
-#include "atn/ATN.h"
-#include "misc/IntervalSet.h"
-#include "support/StringUtils.h"
-
-#include "RecognitionException.h"
-
-using namespace antlr4;
-
-RecognitionException::RecognitionException(Recognizer* recognizer,
- IntStream* input,
- ParserRuleContext* ctx,
- Token* offendingToken)
- : RecognitionException("", recognizer, input, ctx, offendingToken) {}
-
-RecognitionException::RecognitionException(const std::string& message,
- Recognizer* recognizer,
- IntStream* input,
- ParserRuleContext* ctx,
- Token* offendingToken)
- : RuntimeException(message),
- _recognizer(recognizer),
- _input(input),
- _ctx(ctx),
- _offendingToken(offendingToken) {
- InitializeInstanceFields();
- if (recognizer != nullptr) {
- _offendingState = recognizer->getState();
- }
-}
-
-RecognitionException::~RecognitionException() {}
-
-size_t RecognitionException::getOffendingState() const {
- return _offendingState;
-}
-
-void RecognitionException::setOffendingState(size_t offendingState) {
- _offendingState = offendingState;
-}
-
-misc::IntervalSet RecognitionException::getExpectedTokens() const {
- if (_recognizer) {
- return _recognizer->getATN().getExpectedTokens(_offendingState, _ctx);
- }
- return misc::IntervalSet::EMPTY_SET;
-}
-
-RuleContext* RecognitionException::getCtx() const { return _ctx; }
-
-IntStream* RecognitionException::getInputStream() const { return _input; }
-
-Token* RecognitionException::getOffendingToken() const {
- return _offendingToken;
-}
-
-Recognizer* RecognitionException::getRecognizer() const { return _recognizer; }
-
-void RecognitionException::InitializeInstanceFields() {
- _offendingState = INVALID_INDEX;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RecognitionException.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RecognitionException.h
deleted file mode 100644
index 7092533167..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RecognitionException.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Exceptions.h"
-
-namespace antlr4 {
-
-/// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
-/// 3 kinds of errors: prediction errors, failed predicate errors, and
-/// mismatched input errors. In each case, the parser knows where it is
-/// in the input, where it is in the ATN, the rule invocation stack,
-/// and what kind of problem occurred.
-class ANTLR4CPP_PUBLIC RecognitionException : public RuntimeException {
- private:
- /// The Recognizer where this exception originated.
- Recognizer* _recognizer;
- IntStream* _input;
- ParserRuleContext* _ctx;
-
- /// The current Token when an error occurred. Since not all streams
- /// support accessing symbols by index, we have to track the Token
- /// instance itself.
- Token* _offendingToken;
-
- size_t _offendingState;
-
- public:
- RecognitionException(Recognizer* recognizer, IntStream* input,
- ParserRuleContext* ctx, Token* offendingToken = nullptr);
- RecognitionException(const std::string& message, Recognizer* recognizer,
- IntStream* input, ParserRuleContext* ctx,
- Token* offendingToken = nullptr);
- RecognitionException(RecognitionException const&) = default;
- ~RecognitionException();
- RecognitionException& operator=(RecognitionException const&) = default;
-
- /// Get the ATN state number the parser was in at the time the error
- /// occurred. For NoViableAltException and
- /// LexerNoViableAltException exceptions, this is the
- /// DecisionState number. For others, it is the state whose outgoing
- /// edge we couldn't match.
- ///
- /// If the state number is not known, this method returns -1.
- virtual size_t getOffendingState() const;
-
- protected:
- void setOffendingState(size_t offendingState);
-
- /// Gets the set of input symbols which could potentially follow the
- /// previously matched symbol at the time this exception was thrown.
- ///
- /// If the set of expected tokens is not known and could not be computed,
- /// this method returns an empty set.
- ///
- /// @returns The set of token types that could potentially follow the current
- /// state in the ATN, or an empty set if the information is not available.
- public:
- virtual misc::IntervalSet getExpectedTokens() const;
-
- /// <summary>
- /// Gets the <seealso cref="RuleContext"/> at the time this exception was
- /// thrown. <p/> If the context is not available, this method returns {@code
- /// null}.
- /// </summary>
- /// <returns> The <seealso cref="RuleContext"/> at the time this exception was
- /// thrown. If the context is not available, this method returns {@code null}.
- /// </returns>
- virtual RuleContext* getCtx() const;
-
- /// <summary>
- /// Gets the input stream which is the symbol source for the recognizer where
- /// this exception was thrown.
- /// <p/>
- /// If the input stream is not available, this method returns {@code null}.
- /// </summary>
- /// <returns> The input stream which is the symbol source for the recognizer
- /// where this exception was thrown, or {@code null} if the stream is not
- /// available. </returns>
- virtual IntStream* getInputStream() const;
-
- virtual Token* getOffendingToken() const;
-
- /// <summary>
- /// Gets the <seealso cref="Recognizer"/> where this exception occurred.
- /// <p/>
- /// If the recognizer is not available, this method returns {@code null}.
- /// </summary>
- /// <returns> The recognizer where this exception occurred, or {@code null} if
- /// the recognizer is not available. </returns>
- virtual Recognizer* getRecognizer() const;
-
- private:
- void InitializeInstanceFields();
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Recognizer.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Recognizer.cpp
deleted file mode 100644
index 0ab5c1925c..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Recognizer.cpp
+++ /dev/null
@@ -1,163 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ConsoleErrorListener.h"
-#include "RecognitionException.h"
-#include "Token.h"
-#include "atn/ATN.h"
-#include "atn/ATNSimulator.h"
-#include "support/CPPUtils.h"
-#include "support/StringUtils.h"
-
-#include "Vocabulary.h"
-
-#include "Recognizer.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-std::map<const dfa::Vocabulary*, std::map<std::string, size_t>>
- Recognizer::_tokenTypeMapCache;
-std::map<std::vector<std::string>, std::map<std::string, size_t>>
- Recognizer::_ruleIndexMapCache;
-
-Recognizer::Recognizer() {
- InitializeInstanceFields();
- _proxListener.addErrorListener(&ConsoleErrorListener::INSTANCE);
-}
-
-Recognizer::~Recognizer() {}
-
-dfa::Vocabulary const& Recognizer::getVocabulary() const {
- static dfa::Vocabulary vocabulary =
- dfa::Vocabulary::fromTokenNames(getTokenNames());
- return vocabulary;
-}
-
-std::map<std::string, size_t> Recognizer::getTokenTypeMap() {
- const dfa::Vocabulary& vocabulary = getVocabulary();
-
- std::lock_guard<std::mutex> lck(_mutex);
- std::map<std::string, size_t> result;
- auto iterator = _tokenTypeMapCache.find(&vocabulary);
- if (iterator != _tokenTypeMapCache.end()) {
- result = iterator->second;
- } else {
- for (size_t i = 0; i <= getATN().maxTokenType; ++i) {
- std::string literalName = vocabulary.getLiteralName(i);
- if (!literalName.empty()) {
- result[literalName] = i;
- }
-
- std::string symbolicName = vocabulary.getSymbolicName(i);
- if (!symbolicName.empty()) {
- result[symbolicName] = i;
- }
- }
- result["EOF"] = EOF;
- _tokenTypeMapCache[&vocabulary] = result;
- }
-
- return result;
-}
-
-std::map<std::string, size_t> Recognizer::getRuleIndexMap() {
- const std::vector<std::string>& ruleNames = getRuleNames();
- if (ruleNames.empty()) {
- throw "The current recognizer does not provide a list of rule names.";
- }
-
- std::lock_guard<std::mutex> lck(_mutex);
- std::map<std::string, size_t> result;
- auto iterator = _ruleIndexMapCache.find(ruleNames);
- if (iterator != _ruleIndexMapCache.end()) {
- result = iterator->second;
- } else {
- result = antlrcpp::toMap(ruleNames);
- _ruleIndexMapCache[ruleNames] = result;
- }
- return result;
-}
-
-size_t Recognizer::getTokenType(const std::string& tokenName) {
- const std::map<std::string, size_t>& map = getTokenTypeMap();
- auto iterator = map.find(tokenName);
- if (iterator == map.end()) return Token::INVALID_TYPE;
-
- return iterator->second;
-}
-
-void Recognizer::setInterpreter(atn::ATNSimulator* interpreter) {
- // Usually the interpreter is set by the descendant (lexer or parser
- // (simulator), but can also be exchanged by the profiling ATN simulator.
- delete _interpreter;
- _interpreter = interpreter;
-}
-
-std::string Recognizer::getErrorHeader(RecognitionException* e) {
- // We're having issues with cross header dependencies, these two classes will
- // need to be rewritten to remove that.
- size_t line = e->getOffendingToken()->getLine();
- size_t charPositionInLine = e->getOffendingToken()->getCharPositionInLine();
- return std::string("line ") + std::to_string(line) + ":" +
- std::to_string(charPositionInLine);
-}
-
-std::string Recognizer::getTokenErrorDisplay(Token* t) {
- if (t == nullptr) {
- return "<no Token>";
- }
- std::string s = t->getText();
- if (s == "") {
- if (t->getType() == EOF) {
- s = "<EOF>";
- } else {
- s = std::string("<") + std::to_string(t->getType()) + std::string(">");
- }
- }
-
- antlrcpp::replaceAll(s, "\n", "\\n");
- antlrcpp::replaceAll(s, "\r", "\\r");
- antlrcpp::replaceAll(s, "\t", "\\t");
-
- return "'" + s + "'";
-}
-
-void Recognizer::addErrorListener(ANTLRErrorListener* listener) {
- _proxListener.addErrorListener(listener);
-}
-
-void Recognizer::removeErrorListener(ANTLRErrorListener* listener) {
- _proxListener.removeErrorListener(listener);
-}
-
-void Recognizer::removeErrorListeners() {
- _proxListener.removeErrorListeners();
-}
-
-ProxyErrorListener& Recognizer::getErrorListenerDispatch() {
- return _proxListener;
-}
-
-bool Recognizer::sempred(RuleContext* /*localctx*/, size_t /*ruleIndex*/,
- size_t /*actionIndex*/) {
- return true;
-}
-
-bool Recognizer::precpred(RuleContext* /*localctx*/, int /*precedence*/) {
- return true;
-}
-
-void Recognizer::action(RuleContext* /*localctx*/, size_t /*ruleIndex*/,
- size_t /*actionIndex*/) {}
-
-size_t Recognizer::getState() const { return _stateNumber; }
-
-void Recognizer::setState(size_t atnState) { _stateNumber = atnState; }
-
-void Recognizer::InitializeInstanceFields() {
- _stateNumber = ATNState::INVALID_STATE_NUMBER;
- _interpreter = nullptr;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Recognizer.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Recognizer.h
deleted file mode 100644
index f8df0fc8a6..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Recognizer.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "ProxyErrorListener.h"
-
-namespace antlr4 {
-
-class ANTLR4CPP_PUBLIC Recognizer {
- public:
- static const size_t EOF = static_cast<size_t>(
- -1); // std::numeric_limits<size_t>::max(); doesn't work in VS 2013.
-
- Recognizer();
- Recognizer(Recognizer const&) = delete;
- virtual ~Recognizer();
-
- Recognizer& operator=(Recognizer const&) = delete;
-
- /** Used to print out token names like ID during debugging and
- * error reporting. The generated parsers implement a method
- * that overrides this to point to their String[] tokenNames.
- *
- * @deprecated Use {@link #getVocabulary()} instead.
- */
- virtual std::vector<std::string> const& getTokenNames() const = 0;
- virtual std::vector<std::string> const& getRuleNames() const = 0;
-
- /**
- * Get the vocabulary used by the recognizer.
- *
- * @return A {@link Vocabulary} instance providing information about the
- * vocabulary used by the grammar.
- */
- virtual dfa::Vocabulary const& getVocabulary() const;
-
- /// <summary>
- /// Get a map from token names to token types.
- /// <p/>
- /// Used for XPath and tree pattern compilation.
- /// </summary>
- virtual std::map<std::string, size_t> getTokenTypeMap();
-
- /// <summary>
- /// Get a map from rule names to rule indexes.
- /// <p/>
- /// Used for XPath and tree pattern compilation.
- /// </summary>
- virtual std::map<std::string, size_t> getRuleIndexMap();
-
- virtual size_t getTokenType(const std::string& tokenName);
-
- /// <summary>
- /// If this recognizer was generated, it will have a serialized ATN
- /// representation of the grammar.
- /// <p/>
- /// For interpreters, we don't know their serialized ATN despite having
- /// created the interpreter from it.
- /// </summary>
- virtual const std::vector<uint16_t> getSerializedATN() const {
- throw "there is no serialized ATN";
- }
-
- /// <summary>
- /// For debugging and other purposes, might want the grammar name.
- /// Have ANTLR generate an implementation for this method.
- /// </summary>
- virtual std::string getGrammarFileName() const = 0;
-
- /// Get the ATN interpreter (in fact one of it's descendants) used by the
- /// recognizer for prediction.
- /// @returns The ATN interpreter used by the recognizer for prediction.
- template <class T>
- T* getInterpreter() const {
- return dynamic_cast<T*>(_interpreter);
- }
-
- /**
- * Set the ATN interpreter used by the recognizer for prediction.
- *
- * @param interpreter The ATN interpreter used by the recognizer for
- * prediction.
- */
- void setInterpreter(atn::ATNSimulator* interpreter);
-
- /// What is the error header, normally line/character position information?
- virtual std::string getErrorHeader(RecognitionException* e);
-
- /** How should a token be displayed in an error message? The default
- * is to display just the text, but during development you might
- * want to have a lot of information spit out. Override in that case
- * to use t.toString() (which, for CommonToken, dumps everything about
- * the token). This is better than forcing you to override a method in
- * your token objects because you don't have to go modify your lexer
- * so that it creates a new Java type.
- *
- * @deprecated This method is not called by the ANTLR 4 Runtime. Specific
- * implementations of {@link ANTLRErrorStrategy} may provide a similar
- * feature when necessary. For example, see
- * {@link DefaultErrorStrategy#getTokenErrorDisplay}.
- */
- virtual std::string getTokenErrorDisplay(Token* t);
-
- /// <exception cref="NullPointerException"> if {@code listener} is {@code
- /// null}. </exception>
- virtual void addErrorListener(ANTLRErrorListener* listener);
-
- virtual void removeErrorListener(ANTLRErrorListener* listener);
-
- virtual void removeErrorListeners();
-
- virtual ProxyErrorListener& getErrorListenerDispatch();
-
- // subclass needs to override these if there are sempreds or actions
- // that the ATN interp needs to execute
- virtual bool sempred(RuleContext* localctx, size_t ruleIndex,
- size_t actionIndex);
-
- virtual bool precpred(RuleContext* localctx, int precedence);
-
- virtual void action(RuleContext* localctx, size_t ruleIndex,
- size_t actionIndex);
-
- virtual size_t getState() const;
-
- // Get the ATN used by the recognizer for prediction.
- virtual const atn::ATN& getATN() const = 0;
-
- /// <summary>
- /// Indicate that the recognizer has changed internal state that is
- /// consistent with the ATN state passed in. This way we always know
- /// where we are in the ATN as the parser goes along. The rule
- /// context objects form a stack that lets us see the stack of
- /// invoking rules. Combine this and we have complete ATN
- /// configuration information.
- /// </summary>
- void setState(size_t atnState);
-
- virtual IntStream* getInputStream() = 0;
-
- virtual void setInputStream(IntStream* input) = 0;
-
- virtual Ref<TokenFactory<CommonToken>> getTokenFactory() = 0;
-
- template <typename T1>
- void setTokenFactory(TokenFactory<T1>* input);
-
- protected:
- atn::ATNSimulator*
- _interpreter; // Set and deleted in descendants (or the profiler).
-
- // Mutex to manage synchronized access for multithreading.
- std::mutex _mutex;
-
- private:
- static std::map<const dfa::Vocabulary*, std::map<std::string, size_t>>
- _tokenTypeMapCache;
- static std::map<std::vector<std::string>, std::map<std::string, size_t>>
- _ruleIndexMapCache;
-
- ProxyErrorListener _proxListener; // Manages a collection of listeners.
-
- size_t _stateNumber;
-
- void InitializeInstanceFields();
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuleContext.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuleContext.cpp
deleted file mode 100644
index 3f2ade1946..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuleContext.cpp
+++ /dev/null
@@ -1,133 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Parser.h"
-#include "atn/ATN.h"
-#include "atn/ATNState.h"
-#include "misc/Interval.h"
-#include "tree/ParseTreeVisitor.h"
-#include "tree/Trees.h"
-
-#include "RuleContext.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-RuleContext::RuleContext() { InitializeInstanceFields(); }
-
-RuleContext::RuleContext(RuleContext* parent_, size_t invokingState_) {
- InitializeInstanceFields();
- this->parent = parent_;
- this->invokingState = invokingState_;
-}
-
-int RuleContext::depth() {
- int n = 1;
- RuleContext* p = this;
- while (true) {
- if (p->parent == nullptr) break;
- p = static_cast<RuleContext*>(p->parent);
- n++;
- }
- return n;
-}
-
-bool RuleContext::isEmpty() {
- return invokingState == ATNState::INVALID_STATE_NUMBER;
-}
-
-misc::Interval RuleContext::getSourceInterval() {
- return misc::Interval::INVALID;
-}
-
-std::string RuleContext::getText() {
- if (children.empty()) {
- return "";
- }
-
- std::stringstream ss;
- for (size_t i = 0; i < children.size(); i++) {
- ParseTree* tree = children[i];
- if (tree != nullptr) ss << tree->getText();
- }
-
- return ss.str();
-}
-
-size_t RuleContext::getRuleIndex() const { return INVALID_INDEX; }
-
-size_t RuleContext::getAltNumber() const {
- return atn::ATN::INVALID_ALT_NUMBER;
-}
-
-void RuleContext::setAltNumber(size_t /*altNumber*/) {}
-
-antlrcpp::Any RuleContext::accept(tree::ParseTreeVisitor* visitor) {
- return visitor->visitChildren(this);
-}
-
-std::string RuleContext::toStringTree(Parser* recog) {
- return tree::Trees::toStringTree(this, recog);
-}
-
-std::string RuleContext::toStringTree(std::vector<std::string>& ruleNames) {
- return tree::Trees::toStringTree(this, ruleNames);
-}
-
-std::string RuleContext::toStringTree() { return toStringTree(nullptr); }
-
-std::string RuleContext::toString(const std::vector<std::string>& ruleNames) {
- return toString(ruleNames, nullptr);
-}
-
-std::string RuleContext::toString(const std::vector<std::string>& ruleNames,
- RuleContext* stop) {
- std::stringstream ss;
-
- RuleContext* currentParent = this;
- ss << "[";
- while (currentParent != stop) {
- if (ruleNames.empty()) {
- if (!currentParent->isEmpty()) {
- ss << currentParent->invokingState;
- }
- } else {
- size_t ruleIndex = currentParent->getRuleIndex();
-
- std::string ruleName = (ruleIndex < ruleNames.size())
- ? ruleNames[ruleIndex]
- : std::to_string(ruleIndex);
- ss << ruleName;
- }
-
- if (currentParent->parent == nullptr) // No parent anymore.
- break;
- currentParent = static_cast<RuleContext*>(currentParent->parent);
- if (!ruleNames.empty() || !currentParent->isEmpty()) {
- ss << " ";
- }
- }
-
- ss << "]";
-
- return ss.str();
-}
-
-std::string RuleContext::toString() { return toString(nullptr); }
-
-std::string RuleContext::toString(Recognizer* recog) {
- return toString(recog, &ParserRuleContext::EMPTY);
-}
-
-std::string RuleContext::toString(Recognizer* recog, RuleContext* stop) {
- if (recog == nullptr)
- return toString(std::vector<std::string>(), stop); // Don't use an
- // initializer {} here
- // or we end up calling
- // ourselve recursivly.
- return toString(recog->getRuleNames(), stop);
-}
-
-void RuleContext::InitializeInstanceFields() { invokingState = INVALID_INDEX; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuleContext.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuleContext.h
deleted file mode 100644
index 3d34f24e12..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuleContext.h
+++ /dev/null
@@ -1,143 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "tree/ParseTree.h"
-
-namespace antlr4 {
-
-/** A rule context is a record of a single rule invocation.
- *
- * We form a stack of these context objects using the parent
- * pointer. A parent pointer of null indicates that the current
- * context is the bottom of the stack. The ParserRuleContext subclass
- * as a children list so that we can turn this data structure into a
- * tree.
- *
- * The root node always has a null pointer and invokingState of -1.
- *
- * Upon entry to parsing, the first invoked rule function creates a
- * context object (asubclass specialized for that rule such as
- * SContext) and makes it the root of a parse tree, recorded by field
- * Parser._ctx.
- *
- * public final SContext s() throws RecognitionException {
- * SContext _localctx = new SContext(_ctx, getState()); <-- create new node
- * enterRule(_localctx, 0, RULE_s); <-- push it
- * ...
- * exitRule(); <-- pop back to
- * _localctx return _localctx;
- * }
- *
- * A subsequent rule invocation of r from the start rule s pushes a
- * new context object for r whose parent points at s and use invoking
- * state is the state with r emanating as edge label.
- *
- * The invokingState fields from a context object to the root
- * together form a stack of rule indication states where the root
- * (bottom of the stack) has a -1 sentinel value. If we invoke start
- * symbol s then call r1, which calls r2, the would look like
- * this:
- *
- * SContext[-1] <- root node (bottom of the stack)
- * R1Context[p] <- p in rule s called r1
- * R2Context[q] <- q in rule r1 called r2
- *
- * So the top of the stack, _ctx, represents a call to the current
- * rule and it holds the return address from another rule that invoke
- * to this rule. To invoke a rule, we must always have a current context.
- *
- * The parent contexts are useful for computing lookahead sets and
- * getting error information.
- *
- * These objects are used during parsing and prediction.
- * For the special case of parsers, we use the subclass
- * ParserRuleContext.
- *
- * @see ParserRuleContext
- */
-class ANTLR4CPP_PUBLIC RuleContext : public tree::ParseTree {
- public:
- /// What state invoked the rule associated with this context?
- /// The "return address" is the followState of invokingState
- /// If parent is null, this should be -1 and this context object represents
- /// the start rule.
- size_t invokingState;
-
- RuleContext();
- RuleContext(RuleContext* parent, size_t invokingState);
-
- virtual int depth();
-
- /// A context is empty if there is no invoking state; meaning nobody called
- /// current context.
- virtual bool isEmpty();
-
- // satisfy the ParseTree / SyntaxTree interface
-
- virtual misc::Interval getSourceInterval() override;
-
- virtual std::string getText() override;
-
- virtual size_t getRuleIndex() const;
-
- /** For rule associated with this parse tree internal node, return
- * the outer alternative number used to match the input. Default
- * implementation does not compute nor store this alt num. Create
- * a subclass of ParserRuleContext with backing field and set
- * option contextSuperClass.
- * to set it.
- *
- * @since 4.5.3
- */
- virtual size_t getAltNumber() const;
-
- /** Set the outer alternative number for this context node. Default
- * implementation does nothing to avoid backing field overhead for
- * trees that don't need it. Create
- * a subclass of ParserRuleContext with backing field and set
- * option contextSuperClass.
- *
- * @since 4.5.3
- */
- virtual void setAltNumber(size_t altNumber);
-
- virtual antlrcpp::Any accept(tree::ParseTreeVisitor* visitor) override;
-
- /// <summary>
- /// Print out a whole tree, not just a node, in LISP format
- /// (root child1 .. childN). Print just a node if this is a leaf.
- /// We have to know the recognizer so we can get rule names.
- /// </summary>
- virtual std::string toStringTree(Parser* recog) override;
-
- /// <summary>
- /// Print out a whole tree, not just a node, in LISP format
- /// (root child1 .. childN). Print just a node if this is a leaf.
- /// </summary>
- virtual std::string toStringTree(std::vector<std::string>& ruleNames);
-
- virtual std::string toStringTree() override;
- virtual std::string toString() override;
- std::string toString(Recognizer* recog);
- std::string toString(const std::vector<std::string>& ruleNames);
-
- // recog null unless ParserRuleContext, in which case we use subclass
- // toString(...)
- std::string toString(Recognizer* recog, RuleContext* stop);
-
- virtual std::string toString(const std::vector<std::string>& ruleNames,
- RuleContext* stop);
-
- bool operator==(const RuleContext& other) {
- return this == &other;
- } // Simple address comparison.
-
- private:
- void InitializeInstanceFields();
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuleContextWithAltNum.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuleContextWithAltNum.cpp
deleted file mode 100644
index e1ef3024b0..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuleContextWithAltNum.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATN.h"
-
-#include "RuleContextWithAltNum.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-RuleContextWithAltNum::RuleContextWithAltNum() : ParserRuleContext() {
- altNum = ATN::INVALID_ALT_NUMBER;
-}
-
-RuleContextWithAltNum::RuleContextWithAltNum(ParserRuleContext* parent,
- int invokingStateNumber)
- : ParserRuleContext(parent, invokingStateNumber) {}
-
-size_t RuleContextWithAltNum::getAltNumber() const { return altNum; }
-
-void RuleContextWithAltNum::setAltNumber(size_t number) { altNum = number; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuleContextWithAltNum.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuleContextWithAltNum.h
deleted file mode 100644
index d0d1053f7d..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuleContextWithAltNum.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "ParserRuleContext.h"
-
-namespace antlr4 {
-
-/// A handy class for use with
-///
-/// options {contextSuperClass=org.antlr.v4.runtime.RuleContextWithAltNum;}
-///
-/// that provides a backing field / impl for the outer alternative number
-/// matched for an internal parse tree node.
-///
-/// I'm only putting into Java runtime as I'm certain I'm the only one that
-/// will really every use this.
-class ANTLR4CPP_PUBLIC RuleContextWithAltNum : public ParserRuleContext {
- public:
- size_t altNum = 0;
-
- RuleContextWithAltNum();
- RuleContextWithAltNum(ParserRuleContext* parent, int invokingStateNumber);
-
- virtual size_t getAltNumber() const override;
- virtual void setAltNumber(size_t altNum) override;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuntimeMetaData.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuntimeMetaData.cpp
deleted file mode 100644
index c3b1633bfc..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuntimeMetaData.cpp
+++ /dev/null
@@ -1,62 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "RuntimeMetaData.h"
-
-using namespace antlr4;
-
-const std::string RuntimeMetaData::VERSION = "4.7.1";
-
-std::string RuntimeMetaData::getRuntimeVersion() { return VERSION; }
-
-void RuntimeMetaData::checkVersion(const std::string& generatingToolVersion,
- const std::string& compileTimeVersion) {
- std::string runtimeVersion = VERSION;
- bool runtimeConflictsWithGeneratingTool = false;
- bool runtimeConflictsWithCompileTimeTool = false;
-
- if (generatingToolVersion != "") {
- runtimeConflictsWithGeneratingTool =
- runtimeVersion != generatingToolVersion &&
- getMajorMinorVersion(runtimeVersion) !=
- getMajorMinorVersion(generatingToolVersion);
- }
-
- runtimeConflictsWithCompileTimeTool =
- runtimeVersion != compileTimeVersion &&
- getMajorMinorVersion(runtimeVersion) !=
- getMajorMinorVersion(compileTimeVersion);
-
- if (runtimeConflictsWithGeneratingTool) {
- std::cerr << "ANTLR Tool version " << generatingToolVersion
- << " used for code generation does not match "
- "the current runtime version "
- << runtimeVersion << std::endl;
- }
- if (runtimeConflictsWithCompileTimeTool) {
- std::cerr << "ANTLR Runtime version " << compileTimeVersion
- << " used for parser compilation does not match "
- "the current runtime version "
- << runtimeVersion << std::endl;
- }
-}
-
-std::string RuntimeMetaData::getMajorMinorVersion(const std::string& version) {
- size_t firstDot = version.find('.');
- size_t secondDot = firstDot != std::string::npos
- ? version.find('.', firstDot + 1)
- : std::string::npos;
- size_t firstDash = version.find('-');
- size_t referenceLength = version.size();
- if (secondDot != std::string::npos) {
- referenceLength = std::min(referenceLength, secondDot);
- }
-
- if (firstDash != std::string::npos) {
- referenceLength = std::min(referenceLength, firstDash);
- }
-
- return version.substr(0, referenceLength);
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuntimeMetaData.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuntimeMetaData.h
deleted file mode 100644
index 72c2354bdb..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/RuntimeMetaData.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-
-/// <summary>
-/// This class provides access to the current version of the ANTLR 4 runtime
-/// library as compile-time and runtime constants, along with methods for
-/// checking for matching version numbers and notifying listeners in the case
-/// where a version mismatch is detected.
-///
-/// <para>
-/// The runtime version information is provided by <seealso cref="#VERSION"/>
-/// and <seealso cref="#getRuntimeVersion()"/>. Detailed information about these
-/// values is provided in the documentation for each member.</para>
-///
-/// <para>
-/// The runtime version check is implemented by <seealso cref="#checkVersion"/>.
-/// Detailed information about incorporating this call into user code, as well
-/// as its use in generated code, is provided in the documentation for the
-/// method.</para>
-///
-/// <para>
-/// Version strings x.y and x.y.z are considered "compatible" and no error
-/// would be generated. Likewise, version strings x.y-SNAPSHOT and x.y.z are
-/// considered "compatible" because the major and minor components x.y
-/// are the same in each.</para>
-///
-/// <para>
-/// To trap any error messages issued by this code, use System.setErr()
-/// in your main() startup code.
-/// </para>
-///
-/// @since 4.3
-/// </summary>
-class ANTLR4CPP_PUBLIC RuntimeMetaData {
- public:
- /// A compile-time constant containing the current version of the ANTLR 4
- /// runtime library.
- ///
- /// <para>
- /// This compile-time constant value allows generated parsers and other
- /// libraries to include a literal reference to the version of the ANTLR 4
- /// runtime library the code was compiled against. At each release, we
- /// change this value.</para>
- ///
- /// <para>Version numbers are assumed to have the form
- ///
- /// <em>major</em>.<em>minor</em>.<em>patch</em>.<em>revision</em>-<em>suffix</em>,
- ///
- /// with the individual components defined as follows.</para>
- ///
- /// <ul>
- /// <li><em>major</em> is a required non-negative integer, and is equal to
- /// {@code 4} for ANTLR 4.</li>
- /// <li><em>minor</em> is a required non-negative integer.</li>
- /// <li><em>patch</em> is an optional non-negative integer. When
- /// <em>patch</em> is omitted, the {@code .} (dot) appearing before it is
- /// also omitted.</li>
- /// <li><em>revision</em> is an optional non-negative integer, and may only
- /// be included when <em>patch</em> is also included. When <em>revision</em>
- /// is omitted, the {@code .} (dot) appearing before it is also omitted.</li>
- /// <li><em>suffix</em> is an optional string. When <em>suffix</em> is
- /// omitted, the {@code -} (hyphen-minus) appearing before it is also
- /// omitted.</li>
- /// </ul>
- static const std::string VERSION;
-
- /// <summary>
- /// Gets the currently executing version of the ANTLR 4 runtime library.
- ///
- /// <para>
- /// This method provides runtime access to the <seealso cref="#VERSION"/>
- /// field, as opposed to directly referencing the field as a compile-time
- /// constant.</para>
- /// </summary>
- /// <returns> The currently executing version of the ANTLR 4 library
- /// </returns>
-
- static std::string getRuntimeVersion();
-
- /// <summary>
- /// This method provides the ability to detect mismatches between the version
- /// of ANTLR 4 used to generate a parser, the version of the ANTLR runtime a
- /// parser was compiled against, and the version of the ANTLR runtime which
- /// is currently executing.
- ///
- /// <para>
- /// The version check is designed to detect the following two specific
- /// scenarios.</para>
- ///
- /// <ul>
- /// <li>The ANTLR Tool version used for code generation does not match the
- /// currently executing runtime version.</li>
- /// <li>The ANTLR Runtime version referenced at the time a parser was
- /// compiled does not match the currently executing runtime version.</li>
- /// </ul>
- ///
- /// <para>
- /// Starting with ANTLR 4.3, the code generator emits a call to this method
- /// using two constants in each generated lexer and parser: a hard-coded
- /// constant indicating the version of the tool used to generate the parser
- /// and a reference to the compile-time constant <seealso cref="#VERSION"/>.
- /// At runtime, this method is called during the initialization of the
- /// generated parser to detect mismatched versions, and notify the registered
- /// listeners prior to creating instances of the parser.</para>
- ///
- /// <para>
- /// This method does not perform any detection or filtering of semantic
- /// changes between tool and runtime versions. It simply checks for a
- /// version match and emits an error to stderr if a difference
- /// is detected.</para>
- ///
- /// <para>
- /// Note that some breaking changes between releases could result in other
- /// types of runtime exceptions, such as a <seealso cref="LinkageError"/>,
- /// prior to calling this method. In these cases, the underlying version
- /// mismatch will not be reported here. This method is primarily intended to
- /// notify users of potential semantic changes between releases that do not
- /// result in binary compatibility problems which would be detected by the
- /// class loader. As with semantic changes, changes that break binary
- /// compatibility between releases are mentioned in the release notes
- /// accompanying the affected release.</para>
- ///
- /// <para>
- /// <strong>Additional note for target developers:</strong> The version check
- /// implemented by this class is designed to address specific compatibility
- /// concerns that may arise during the execution of Java applications. Other
- /// targets should consider the implementation of this method in the context
- /// of that target's known execution environment, which may or may not
- /// resemble the design provided for the Java target.</para>
- /// </summary>
- /// <param name="generatingToolVersion"> The version of the tool used to
- /// generate a parser. This value may be null when called from user code that
- /// was not generated by, and does not reference, the ANTLR 4 Tool itself.
- /// </param> <param name="compileTimeVersion"> The version of the runtime the
- /// parser was compiled against. This should always be passed using a direct
- /// reference to <seealso cref="#VERSION"/>. </param>
- static void checkVersion(const std::string& generatingToolVersion,
- const std::string& compileTimeVersion);
-
- /// <summary>
- /// Gets the major and minor version numbers from a version string. For
- /// details about the syntax of the input {@code version}.
- /// E.g., from x.y.z return x.y.
- /// </summary>
- /// <param name="version"> The complete version string. </param>
- /// <returns> A string of the form <em>major</em>.<em>minor</em> containing
- /// only the major and minor components of the version string. </returns>
- static std::string getMajorMinorVersion(const std::string& version);
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Token.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Token.cpp
deleted file mode 100644
index fce01c12dc..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Token.cpp
+++ /dev/null
@@ -1,8 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Token.h"
-
-antlr4::Token::~Token() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Token.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Token.h
deleted file mode 100644
index 42515b9348..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Token.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "IntStream.h"
-
-namespace antlr4 {
-
-/// A token has properties: text, type, line, character position in the line
-/// (so we can ignore tabs), token channel, index, and source from which
-/// we obtained this token.
-class ANTLR4CPP_PUBLIC Token {
- public:
- static const size_t INVALID_TYPE = 0;
-
- /// During lookahead operations, this "token" signifies we hit rule end ATN
- /// state and did not follow it despite needing to.
- static const size_t EPSILON = static_cast<size_t>(-2);
- static const size_t MIN_USER_TOKEN_TYPE = 1;
- static const size_t EOF = IntStream::EOF;
-
- virtual ~Token();
-
- /// All tokens go to the parser (unless skip() is called in that rule)
- /// on a particular "channel". The parser tunes to a particular channel
- /// so that whitespace etc... can go to the parser on a "hidden" channel.
- static const size_t DEFAULT_CHANNEL = 0;
-
- /// Anything on different channel than DEFAULT_CHANNEL is not parsed
- /// by parser.
- static const size_t HIDDEN_CHANNEL = 1;
-
- /**
- * This is the minimum constant value which can be assigned to a
- * user-defined token channel.
- *
- * <p>
- * The non-negative numbers less than {@link #MIN_USER_CHANNEL_VALUE} are
- * assigned to the predefined channels {@link #DEFAULT_CHANNEL} and
- * {@link #HIDDEN_CHANNEL}.</p>
- *
- * @see Token#getChannel()
- */
- static const size_t MIN_USER_CHANNEL_VALUE = 2;
-
- /// Get the text of the token.
- virtual std::string getText() const = 0;
-
- /// Get the token type of the token
- virtual size_t getType() const = 0;
-
- /// The line number on which the 1st character of this token was matched,
- /// line=1..n
- virtual size_t getLine() const = 0;
-
- /// The index of the first character of this token relative to the
- /// beginning of the line at which it occurs, 0..n-1
- virtual size_t getCharPositionInLine() const = 0;
-
- /// Return the channel this token. Each token can arrive at the parser
- /// on a different channel, but the parser only "tunes" to a single channel.
- /// The parser ignores everything not on DEFAULT_CHANNEL.
- virtual size_t getChannel() const = 0;
-
- /// An index from 0..n-1 of the token object in the input stream.
- /// This must be valid in order to print token streams and
- /// use TokenRewriteStream.
- ///
- /// Return INVALID_INDEX to indicate that this token was conjured up since
- /// it doesn't have a valid index.
- virtual size_t getTokenIndex() const = 0;
-
- /// The starting character index of the token
- /// This method is optional; return INVALID_INDEX if not implemented.
- virtual size_t getStartIndex() const = 0;
-
- /// The last character index of the token.
- /// This method is optional; return INVALID_INDEX if not implemented.
- virtual size_t getStopIndex() const = 0;
-
- /// Gets the <seealso cref="TokenSource"/> which created this token.
- virtual TokenSource* getTokenSource() const = 0;
-
- /// Gets the <seealso cref="CharStream"/> from which this token was derived.
- virtual CharStream* getInputStream() const = 0;
-
- virtual std::string toString() const = 0;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenFactory.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenFactory.h
deleted file mode 100644
index 33e46db197..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenFactory.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-
-/// The default mechanism for creating tokens. It's used by default in Lexer and
-/// the error handling strategy (to create missing tokens). Notifying the
-/// parser of a new factory means that it notifies it's token source and error
-/// strategy.
-template <typename Symbol>
-class ANTLR4CPP_PUBLIC TokenFactory {
- public:
- virtual ~TokenFactory() {}
-
- /// This is the method used to create tokens in the lexer and in the
- /// error handling strategy. If text!=null, than the start and stop positions
- /// are wiped to -1 in the text override is set in the CommonToken.
- virtual std::unique_ptr<Symbol> create(
- std::pair<TokenSource*, CharStream*> source, size_t type,
- const std::string& text, size_t channel, size_t start, size_t stop,
- size_t line, size_t charPositionInLine) = 0;
-
- /// Generically useful
- virtual std::unique_ptr<Symbol> create(size_t type,
- const std::string& text) = 0;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenSource.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenSource.cpp
deleted file mode 100644
index d51283a03b..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenSource.cpp
+++ /dev/null
@@ -1,8 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "TokenSource.h"
-
-antlr4::TokenSource::~TokenSource() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenSource.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenSource.h
deleted file mode 100644
index eb3b94b452..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenSource.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "TokenFactory.h"
-
-namespace antlr4 {
-
-/// <summary>
-/// A source of tokens must provide a sequence of tokens via <seealso
-/// cref="#nextToken()"/> and also must reveal it's source of characters;
-/// <seealso cref="CommonToken"/>'s text is computed from a <seealso
-/// cref="CharStream"/>; it only store indices into the char stream. <p/> Errors
-/// from the lexer are never passed to the parser. Either you want to keep going
-/// or you do not upon token recognition error. If you do not want to continue
-/// lexing then you do not want to continue parsing. Just throw an exception not
-/// under <seealso cref="RecognitionException"/> and Java will naturally toss
-/// you all the way out of the recognizers. If you want to continue lexing then
-/// you should not throw an exception to the parser--it has already requested a
-/// token. Keep lexing until you get a valid one. Just report errors and keep
-/// going, looking for a valid token.
-/// </summary>
-class ANTLR4CPP_PUBLIC TokenSource {
- public:
- virtual ~TokenSource();
-
- /// Return a <seealso cref="Token"/> object from your input stream (usually a
- /// <seealso cref="CharStream"/>). Do not fail/return upon lexing error; keep
- /// chewing on the characters until you get a good one; errors are not passed
- /// through to the parser.
- virtual std::unique_ptr<Token> nextToken() = 0;
-
- /// <summary>
- /// Get the line number for the current position in the input stream. The
- /// first line in the input is line 1.
- /// </summary>
- /// <returns> The line number for the current position in the input stream, or
- /// 0 if the current token source does not track line numbers. </returns>
- virtual size_t getLine() const = 0;
-
- /// <summary>
- /// Get the index into the current line for the current position in the input
- /// stream. The first character on a line has position 0.
- /// </summary>
- /// <returns> The line number for the current position in the input stream, or
- /// (sze_t)-1 if the current token source does not track character positions.
- /// </returns>
- virtual size_t getCharPositionInLine() = 0;
-
- /// <summary>
- /// Get the <seealso cref="CharStream"/> from which this token source is
- /// currently providing tokens.
- /// </summary>
- /// <returns> The <seealso cref="CharStream"/> associated with the current
- /// position in the input, or {@code null} if no input stream is available for
- /// the token source. </returns>
- virtual CharStream* getInputStream() = 0;
-
- /// <summary>
- /// Gets the name of the underlying input source. This method returns a
- /// non-null, non-empty string. If such a name is not known, this method
- /// returns <seealso cref="IntStream#UNKNOWN_SOURCE_NAME"/>.
- /// </summary>
- virtual std::string getSourceName() = 0;
-
- /// <summary>
- /// Set the <seealso cref="TokenFactory"/> this token source should use for
- /// creating <seealso cref="Token"/> objects from the input.
- /// </summary>
- /// <param name="factory"> The <seealso cref="TokenFactory"/> to use for
- /// creating tokens. </param>
- template <typename T1>
- void setTokenFactory(TokenFactory<T1>* /*factory*/) {}
-
- /// <summary>
- /// Gets the <seealso cref="TokenFactory"/> this token source is currently
- /// using for creating <seealso cref="Token"/> objects from the input.
- /// </summary>
- /// <returns> The <seealso cref="TokenFactory"/> currently used by this token
- /// source. </returns>
- virtual Ref<TokenFactory<CommonToken>> getTokenFactory() = 0;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenStream.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenStream.cpp
deleted file mode 100644
index 32c768de72..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenStream.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "TokenStream.h"
-
-using namespace antlr4;
-
-TokenStream::~TokenStream() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenStream.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenStream.h
deleted file mode 100644
index 1dd53e3d25..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenStream.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "IntStream.h"
-
-namespace antlr4 {
-
-/// <summary>
-/// An <seealso cref="IntStream"/> whose symbols are <seealso cref="Token"/>
-/// instances.
-/// </summary>
-class ANTLR4CPP_PUBLIC TokenStream : public IntStream {
- /// <summary>
- /// Get the <seealso cref="Token"/> instance associated with the value
- /// returned by <seealso cref="#LA LA(k)"/>. This method has the same pre- and
- /// post-conditions as <seealso cref="IntStream#LA"/>. In addition, when the
- /// preconditions of this method are met, the return value is non-null and the
- /// value of
- /// {@code LT(k).getType()==LA(k)}.
- /// </summary>
- /// <seealso cref= IntStream#LA </seealso>
- public:
- virtual ~TokenStream();
-
- virtual Token* LT(ssize_t k) = 0;
-
- /// <summary>
- /// Gets the <seealso cref="Token"/> at the specified {@code index} in the
- /// stream. When the preconditions of this method are met, the return value is
- /// non-null. <p/> The preconditions for this method are the same as the
- /// preconditions of <seealso cref="IntStream#seek"/>. If the behavior of
- /// {@code seek(index)} is unspecified for the current state and given {@code
- /// index}, then the behavior of this method is also unspecified. <p/> The
- /// symbol referred to by {@code index} differs from {@code seek()} only in
- /// the case of filtering streams where {@code index} lies before the end of
- /// the stream. Unlike {@code seek()}, this method does not adjust
- /// {@code index} to point to a non-ignored symbol.
- /// </summary>
- /// <exception cref="IllegalArgumentException"> if {code index} is less than 0
- /// </exception> <exception cref="UnsupportedOperationException"> if the
- /// stream does not support retrieving the token at the specified index
- /// </exception>
- virtual Token* get(size_t index) const = 0;
-
- /// Gets the underlying TokenSource which provides tokens for this stream.
- virtual TokenSource* getTokenSource() const = 0;
-
- /// <summary>
- /// Return the text of all tokens within the specified {@code interval}. This
- /// method behaves like the following code (including potential exceptions
- /// for violating preconditions of <seealso cref="#get"/>, but may be
- /// optimized by the specific implementation.
- ///
- /// <pre>
- /// TokenStream stream = ...;
- /// String text = "";
- /// for (int i = interval.a; i <= interval.b; i++) {
- /// text += stream.get(i).getText();
- /// }
- /// </pre>
- /// </summary>
- /// <param name="interval"> The interval of tokens within this stream to get
- /// text for. </param> <returns> The text of all tokens within the specified
- /// interval in this stream.
- /// </returns>
- /// <exception cref="NullPointerException"> if {@code interval} is {@code
- /// null} </exception>
- virtual std::string getText(const misc::Interval& interval) = 0;
-
- /// <summary>
- /// Return the text of all tokens in the stream. This method behaves like the
- /// following code, including potential exceptions from the calls to
- /// <seealso cref="IntStream#size"/> and <seealso cref="#getText(Interval)"/>,
- /// but may be optimized by the specific implementation.
- ///
- /// <pre>
- /// TokenStream stream = ...;
- /// String text = stream.getText(new Interval(0, stream.size()));
- /// </pre>
- /// </summary>
- /// <returns> The text of all tokens in the stream. </returns>
- virtual std::string getText() = 0;
-
- /// <summary>
- /// Return the text of all tokens in the source interval of the specified
- /// context. This method behaves like the following code, including potential
- /// exceptions from the call to <seealso cref="#getText(Interval)"/>, but may
- /// be optimized by the specific implementation.
- /// </p>
- /// If {@code ctx.getSourceInterval()} does not return a valid interval of
- /// tokens provided by this stream, the behavior is unspecified.
- ///
- /// <pre>
- /// TokenStream stream = ...;
- /// String text = stream.getText(ctx.getSourceInterval());
- /// </pre>
- /// </summary>
- /// <param name="ctx"> The context providing the source interval of tokens to
- /// get text for. </param> <returns> The text of all tokens within the source
- /// interval of {@code ctx}. </returns>
- virtual std::string getText(RuleContext* ctx) = 0;
-
- /// <summary>
- /// Return the text of all tokens in this stream between {@code start} and
- /// {@code stop} (inclusive).
- /// <p/>
- /// If the specified {@code start} or {@code stop} token was not provided by
- /// this stream, or if the {@code stop} occurred before the {@code start}
- /// token, the behavior is unspecified.
- /// <p/>
- /// For streams which ensure that the <seealso cref="Token#getTokenIndex"/>
- /// method is accurate for all of its provided tokens, this method behaves
- /// like the following code. Other streams may implement this method in other
- /// ways provided the behavior is consistent with this at a high level.
- ///
- /// <pre>
- /// TokenStream stream = ...;
- /// String text = "";
- /// for (int i = start.getTokenIndex(); i <= stop.getTokenIndex(); i++) {
- /// text += stream.get(i).getText();
- /// }
- /// </pre>
- /// </summary>
- /// <param name="start"> The first token in the interval to get text for.
- /// </param> <param name="stop"> The last token in the interval to get text
- /// for (inclusive). </param> <returns> The text of all tokens lying between
- /// the specified {@code start} and {@code stop} tokens.
- /// </returns>
- /// <exception cref="UnsupportedOperationException"> if this stream does not
- /// support this method for the specified tokens </exception>
- virtual std::string getText(Token* start, Token* stop) = 0;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenStreamRewriter.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenStreamRewriter.cpp
deleted file mode 100644
index 188d04cf42..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenStreamRewriter.cpp
+++ /dev/null
@@ -1,463 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-#include "Token.h"
-#include "TokenStream.h"
-#include "misc/Interval.h"
-
-#include "TokenStreamRewriter.h"
-
-using namespace antlr4;
-
-using antlr4::misc::Interval;
-
-TokenStreamRewriter::RewriteOperation::RewriteOperation(
- TokenStreamRewriter* outerInstance_, size_t index_)
- : outerInstance(outerInstance_) {
- InitializeInstanceFields();
- this->index = index_;
-}
-
-TokenStreamRewriter::RewriteOperation::RewriteOperation(
- TokenStreamRewriter* outerInstance_, size_t index_,
- const std::string& text_)
- : outerInstance(outerInstance_) {
- InitializeInstanceFields();
- this->index = index_;
- this->text = text_;
-}
-
-TokenStreamRewriter::RewriteOperation::~RewriteOperation() {}
-
-size_t TokenStreamRewriter::RewriteOperation::execute(std::string* /*buf*/) {
- return index;
-}
-
-std::string TokenStreamRewriter::RewriteOperation::toString() {
- std::string opName = "TokenStreamRewriter";
- size_t dollarIndex = opName.find('$');
- opName = opName.substr(dollarIndex + 1, opName.length() - (dollarIndex + 1));
- return "<" + opName + "@" +
- outerInstance->tokens->get(dollarIndex)->getText() + ":\"" + text +
- "\">";
-}
-
-void TokenStreamRewriter::RewriteOperation::InitializeInstanceFields() {
- instructionIndex = 0;
- index = 0;
-}
-
-TokenStreamRewriter::InsertBeforeOp::InsertBeforeOp(
- TokenStreamRewriter* outerInstance_, size_t index_,
- const std::string& text_)
- : RewriteOperation(outerInstance_, index_, text_),
- outerInstance(outerInstance_) {}
-
-size_t TokenStreamRewriter::InsertBeforeOp::execute(std::string* buf) {
- buf->append(text);
- if (outerInstance->tokens->get(index)->getType() != Token::EOF) {
- buf->append(outerInstance->tokens->get(index)->getText());
- }
- return index + 1;
-}
-
-TokenStreamRewriter::ReplaceOp::ReplaceOp(TokenStreamRewriter* outerInstance_,
- size_t from, size_t to,
- const std::string& text)
- : RewriteOperation(outerInstance_, from, text),
- outerInstance(outerInstance_) {
- InitializeInstanceFields();
- lastIndex = to;
-}
-
-size_t TokenStreamRewriter::ReplaceOp::execute(std::string* buf) {
- buf->append(text);
- return lastIndex + 1;
-}
-
-std::string TokenStreamRewriter::ReplaceOp::toString() {
- if (text.empty()) {
- return "<DeleteOp@" + outerInstance->tokens->get(index)->getText() + ".." +
- outerInstance->tokens->get(lastIndex)->getText() + ">";
- }
- return "<ReplaceOp@" + outerInstance->tokens->get(index)->getText() + ".." +
- outerInstance->tokens->get(lastIndex)->getText() + ":\"" + text +
- "\">";
-}
-
-void TokenStreamRewriter::ReplaceOp::InitializeInstanceFields() {
- lastIndex = 0;
-}
-
-//------------------ TokenStreamRewriter
-//-------------------------------------------------------------------------------
-
-const std::string TokenStreamRewriter::DEFAULT_PROGRAM_NAME = "default";
-
-TokenStreamRewriter::TokenStreamRewriter(TokenStream* tokens_)
- : tokens(tokens_) {
- _programs[DEFAULT_PROGRAM_NAME].reserve(PROGRAM_INIT_SIZE);
-}
-
-TokenStreamRewriter::~TokenStreamRewriter() {
- for (auto program : _programs) {
- for (auto operation : program.second) {
- delete operation;
- }
- }
-}
-
-TokenStream* TokenStreamRewriter::getTokenStream() { return tokens; }
-
-void TokenStreamRewriter::rollback(size_t instructionIndex) {
- rollback(DEFAULT_PROGRAM_NAME, instructionIndex);
-}
-
-void TokenStreamRewriter::rollback(const std::string& programName,
- size_t instructionIndex) {
- std::vector<RewriteOperation*> is = _programs[programName];
- if (is.size() > 0) {
- _programs.insert({programName, std::vector<RewriteOperation*>(
- is.begin() + MIN_TOKEN_INDEX,
- is.begin() + instructionIndex)});
- }
-}
-
-void TokenStreamRewriter::deleteProgram() {
- deleteProgram(DEFAULT_PROGRAM_NAME);
-}
-
-void TokenStreamRewriter::deleteProgram(const std::string& programName) {
- rollback(programName, MIN_TOKEN_INDEX);
-}
-
-void TokenStreamRewriter::insertAfter(Token* t, const std::string& text) {
- insertAfter(DEFAULT_PROGRAM_NAME, t, text);
-}
-
-void TokenStreamRewriter::insertAfter(size_t index, const std::string& text) {
- insertAfter(DEFAULT_PROGRAM_NAME, index, text);
-}
-
-void TokenStreamRewriter::insertAfter(const std::string& programName, Token* t,
- const std::string& text) {
- insertAfter(programName, t->getTokenIndex(), text);
-}
-
-void TokenStreamRewriter::insertAfter(const std::string& programName,
- size_t index, const std::string& text) {
- // to insert after, just insert before next index (even if past end)
- insertBefore(programName, index + 1, text);
-}
-
-void TokenStreamRewriter::insertBefore(Token* t, const std::string& text) {
- insertBefore(DEFAULT_PROGRAM_NAME, t, text);
-}
-
-void TokenStreamRewriter::insertBefore(size_t index, const std::string& text) {
- insertBefore(DEFAULT_PROGRAM_NAME, index, text);
-}
-
-void TokenStreamRewriter::insertBefore(const std::string& programName, Token* t,
- const std::string& text) {
- insertBefore(programName, t->getTokenIndex(), text);
-}
-
-void TokenStreamRewriter::insertBefore(const std::string& programName,
- size_t index, const std::string& text) {
- RewriteOperation* op =
- new InsertBeforeOp(this, index, text); /* mem-check: deleted in d-tor */
- std::vector<RewriteOperation*>& rewrites = getProgram(programName);
- op->instructionIndex = rewrites.size();
- rewrites.push_back(op);
-}
-
-void TokenStreamRewriter::replace(size_t index, const std::string& text) {
- replace(DEFAULT_PROGRAM_NAME, index, index, text);
-}
-
-void TokenStreamRewriter::replace(size_t from, size_t to,
- const std::string& text) {
- replace(DEFAULT_PROGRAM_NAME, from, to, text);
-}
-
-void TokenStreamRewriter::replace(Token* indexT, const std::string& text) {
- replace(DEFAULT_PROGRAM_NAME, indexT, indexT, text);
-}
-
-void TokenStreamRewriter::replace(Token* from, Token* to,
- const std::string& text) {
- replace(DEFAULT_PROGRAM_NAME, from, to, text);
-}
-
-void TokenStreamRewriter::replace(const std::string& programName, size_t from,
- size_t to, const std::string& text) {
- if (from > to || to >= tokens->size()) {
- throw IllegalArgumentException(
- "replace: range invalid: " + std::to_string(from) + ".." +
- std::to_string(to) + "(size = " + std::to_string(tokens->size()) + ")");
- }
- RewriteOperation* op =
- new ReplaceOp(this, from, to, text); /* mem-check: deleted in d-tor */
- std::vector<RewriteOperation*>& rewrites = getProgram(programName);
- op->instructionIndex = rewrites.size();
- rewrites.push_back(op);
-}
-
-void TokenStreamRewriter::replace(const std::string& programName, Token* from,
- Token* to, const std::string& text) {
- replace(programName, from->getTokenIndex(), to->getTokenIndex(), text);
-}
-
-void TokenStreamRewriter::Delete(size_t index) {
- Delete(DEFAULT_PROGRAM_NAME, index, index);
-}
-
-void TokenStreamRewriter::Delete(size_t from, size_t to) {
- Delete(DEFAULT_PROGRAM_NAME, from, to);
-}
-
-void TokenStreamRewriter::Delete(Token* indexT) {
- Delete(DEFAULT_PROGRAM_NAME, indexT, indexT);
-}
-
-void TokenStreamRewriter::Delete(Token* from, Token* to) {
- Delete(DEFAULT_PROGRAM_NAME, from, to);
-}
-
-void TokenStreamRewriter::Delete(const std::string& programName, size_t from,
- size_t to) {
- std::string nullString;
- replace(programName, from, to, nullString);
-}
-
-void TokenStreamRewriter::Delete(const std::string& programName, Token* from,
- Token* to) {
- std::string nullString;
- replace(programName, from, to, nullString);
-}
-
-size_t TokenStreamRewriter::getLastRewriteTokenIndex() {
- return getLastRewriteTokenIndex(DEFAULT_PROGRAM_NAME);
-}
-
-size_t TokenStreamRewriter::getLastRewriteTokenIndex(
- const std::string& programName) {
- if (_lastRewriteTokenIndexes.find(programName) ==
- _lastRewriteTokenIndexes.end()) {
- return INVALID_INDEX;
- }
- return _lastRewriteTokenIndexes[programName];
-}
-
-void TokenStreamRewriter::setLastRewriteTokenIndex(
- const std::string& programName, size_t i) {
- _lastRewriteTokenIndexes.insert({programName, i});
-}
-
-std::vector<TokenStreamRewriter::RewriteOperation*>&
-TokenStreamRewriter::getProgram(const std::string& name) {
- auto iterator = _programs.find(name);
- if (iterator == _programs.end()) {
- return initializeProgram(name);
- }
- return iterator->second;
-}
-
-std::vector<TokenStreamRewriter::RewriteOperation*>&
-TokenStreamRewriter::initializeProgram(const std::string& name) {
- _programs[name].reserve(PROGRAM_INIT_SIZE);
- return _programs[name];
-}
-
-std::string TokenStreamRewriter::getText() {
- return getText(DEFAULT_PROGRAM_NAME, Interval(0UL, tokens->size() - 1));
-}
-
-std::string TokenStreamRewriter::getText(std::string programName) {
- return getText(programName, Interval(0UL, tokens->size() - 1));
-}
-
-std::string TokenStreamRewriter::getText(const Interval& interval) {
- return getText(DEFAULT_PROGRAM_NAME, interval);
-}
-
-std::string TokenStreamRewriter::getText(const std::string& programName,
- const Interval& interval) {
- std::vector<TokenStreamRewriter::RewriteOperation*>& rewrites =
- _programs[programName];
- size_t start = interval.a;
- size_t stop = interval.b;
-
- // ensure start/end are in range
- if (stop > tokens->size() - 1) {
- stop = tokens->size() - 1;
- }
- if (start == INVALID_INDEX) {
- start = 0;
- }
-
- if (rewrites.empty() || rewrites.empty()) {
- return tokens->getText(interval); // no instructions to execute
- }
- std::string buf;
-
- // First, optimize instruction stream
- std::unordered_map<size_t, TokenStreamRewriter::RewriteOperation*> indexToOp =
- reduceToSingleOperationPerIndex(rewrites);
-
- // Walk buffer, executing instructions and emitting tokens
- size_t i = start;
- while (i <= stop && i < tokens->size()) {
- RewriteOperation* op = indexToOp[i];
- indexToOp.erase(i); // remove so any left have index size-1
- Token* t = tokens->get(i);
- if (op == nullptr) {
- // no operation at that index, just dump token
- if (t->getType() != Token::EOF) {
- buf.append(t->getText());
- }
- i++; // move to next token
- } else {
- i = op->execute(&buf); // execute operation and skip
- }
- }
-
- // include stuff after end if it's last index in buffer
- // So, if they did an insertAfter(lastValidIndex, "foo"), include
- // foo if end==lastValidIndex.
- if (stop == tokens->size() - 1) {
- // Scan any remaining operations after last token
- // should be included (they will be inserts).
- for (auto op : indexToOp) {
- if (op.second->index >= tokens->size() - 1) {
- buf.append(op.second->text);
- }
- }
- }
- return buf;
-}
-
-std::unordered_map<size_t, TokenStreamRewriter::RewriteOperation*>
-TokenStreamRewriter::reduceToSingleOperationPerIndex(
- std::vector<TokenStreamRewriter::RewriteOperation*>& rewrites) {
- // WALK REPLACES
- for (size_t i = 0; i < rewrites.size(); ++i) {
- TokenStreamRewriter::RewriteOperation* op = rewrites[i];
- ReplaceOp* rop = dynamic_cast<ReplaceOp*>(op);
- if (rop == nullptr) continue;
-
- // Wipe prior inserts within range
- std::vector<InsertBeforeOp*> inserts =
- getKindOfOps<InsertBeforeOp>(rewrites, i);
- for (auto iop : inserts) {
- if (iop->index == rop->index) {
- // E.g., insert before 2, delete 2..2; update replace
- // text to include insert before, kill insert
- delete rewrites[iop->instructionIndex];
- rewrites[iop->instructionIndex] = nullptr;
- rop->text = iop->text + (!rop->text.empty() ? rop->text : "");
- } else if (iop->index > rop->index && iop->index <= rop->lastIndex) {
- // delete insert as it's a no-op.
- delete rewrites[iop->instructionIndex];
- rewrites[iop->instructionIndex] = nullptr;
- }
- }
- // Drop any prior replaces contained within
- std::vector<ReplaceOp*> prevReplaces = getKindOfOps<ReplaceOp>(rewrites, i);
- for (auto prevRop : prevReplaces) {
- if (prevRop->index >= rop->index &&
- prevRop->lastIndex <= rop->lastIndex) {
- // delete replace as it's a no-op.
- delete rewrites[prevRop->instructionIndex];
- rewrites[prevRop->instructionIndex] = nullptr;
- continue;
- }
- // throw exception unless disjoint or identical
- bool disjoint =
- prevRop->lastIndex < rop->index || prevRop->index > rop->lastIndex;
- bool same =
- prevRop->index == rop->index && prevRop->lastIndex == rop->lastIndex;
- // Delete special case of replace (text==null):
- // D.i-j.u D.x-y.v | boundaries overlap combine to
- // max(min)..max(right)
- if (prevRop->text.empty() && rop->text.empty() && !disjoint) {
- delete rewrites[prevRop->instructionIndex];
- rewrites[prevRop->instructionIndex] = nullptr; // kill first delete
- rop->index = std::min(prevRop->index, rop->index);
- rop->lastIndex = std::max(prevRop->lastIndex, rop->lastIndex);
- std::cout << "new rop " << rop << std::endl;
- } else if (!disjoint && !same) {
- throw IllegalArgumentException(
- "replace op boundaries of " + rop->toString() +
- " overlap with previous " + prevRop->toString());
- }
- }
- }
-
- // WALK INSERTS
- for (size_t i = 0; i < rewrites.size(); i++) {
- InsertBeforeOp* iop = dynamic_cast<InsertBeforeOp*>(rewrites[i]);
- if (iop == nullptr) continue;
-
- // combine current insert with prior if any at same index
-
- std::vector<InsertBeforeOp*> prevInserts =
- getKindOfOps<InsertBeforeOp>(rewrites, i);
- for (auto prevIop : prevInserts) {
- if (prevIop->index == iop->index) { // combine objects
- // convert to strings...we're in
- // process of toString'ing whole
- // token buffer so no lazy eval issue
- // with any templates
- iop->text = catOpText(&iop->text, &prevIop->text);
- // delete redundant prior insert
- delete rewrites[prevIop->instructionIndex];
- rewrites[prevIop->instructionIndex] = nullptr;
- }
- }
- // look for replaces where iop.index is in range; error
- std::vector<ReplaceOp*> prevReplaces = getKindOfOps<ReplaceOp>(rewrites, i);
- for (auto rop : prevReplaces) {
- if (iop->index == rop->index) {
- rop->text = catOpText(&iop->text, &rop->text);
- delete rewrites[i];
- rewrites[i] = nullptr; // delete current insert
- continue;
- }
- if (iop->index >= rop->index && iop->index <= rop->lastIndex) {
- throw IllegalArgumentException("insert op " + iop->toString() +
- " within boundaries of previous " +
- rop->toString());
- }
- }
- }
-
- std::unordered_map<size_t, TokenStreamRewriter::RewriteOperation*> m;
- for (TokenStreamRewriter::RewriteOperation* op : rewrites) {
- if (op == nullptr) { // ignore deleted ops
- continue;
- }
- if (m.count(op->index) > 0) {
- throw RuntimeException("should only be one op per index");
- }
- m[op->index] = op;
- }
-
- return m;
-}
-
-std::string TokenStreamRewriter::catOpText(std::string* a, std::string* b) {
- std::string x = "";
- std::string y = "";
- if (a != nullptr) {
- x = *a;
- }
- if (b != nullptr) {
- y = *b;
- }
- return x + y;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenStreamRewriter.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenStreamRewriter.h
deleted file mode 100644
index a73372997b..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/TokenStreamRewriter.h
+++ /dev/null
@@ -1,307 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-namespace antlr4 {
-
-/**
- * Useful for rewriting out a buffered input token stream after doing some
- * augmentation or other manipulations on it.
- *
- * <p>
- * You can insert stuff, replace, and delete chunks. Note that the operations
- * are done lazily--only if you convert the buffer to a {@link String} with
- * {@link TokenStream#getText()}. This is very efficient because you are not
- * moving data around all the time. As the buffer of tokens is converted to
- * strings, the {@link #getText()} method(s) scan the input token stream and
- * check to see if there is an operation at the current index. If so, the
- * operation is done and then normal {@link String} rendering continues on the
- * buffer. This is like having multiple Turing machine instruction streams
- * (programs) operating on a single input tape. :)</p>
- *
- * <p>
- * This rewriter makes no modifications to the token stream. It does not ask the
- * stream to fill itself up nor does it advance the input cursor. The token
- * stream {@link TokenStream#index()} will return the same value before and
- * after any {@link #getText()} call.</p>
- *
- * <p>
- * The rewriter only works on tokens that you have in the buffer and ignores the
- * current input cursor. If you are buffering tokens on-demand, calling
- * {@link #getText()} halfway through the input will only do rewrites for those
- * tokens in the first half of the file.</p>
- *
- * <p>
- * Since the operations are done lazily at {@link #getText}-time, operations do
- * not screw up the token index values. That is, an insert operation at token
- * index {@code i} does not change the index values for tokens
- * {@code i}+1..n-1.</p>
- *
- * <p>
- * Because operations never actually alter the buffer, you may always get the
- * original token stream back without undoing anything. Since the instructions
- * are queued up, you can easily simulate transactions and roll back any changes
- * if there is an error just by removing instructions. For example,</p>
- *
- * <pre>
- * CharStream input = new ANTLRFileStream("input");
- * TLexer lex = new TLexer(input);
- * CommonTokenStream tokens = new CommonTokenStream(lex);
- * T parser = new T(tokens);
- * TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
- * parser.startRule();
- * </pre>
- *
- * <p>
- * Then in the rules, you can execute (assuming rewriter is visible):</p>
- *
- * <pre>
- * Token t,u;
- * ...
- * rewriter.insertAfter(t, "text to put after t");}
- * rewriter.insertAfter(u, "text after u");}
- * System.out.println(rewriter.getText());
- * </pre>
- *
- * <p>
- * You can also have multiple "instruction streams" and get multiple rewrites
- * from a single pass over the input. Just name the instruction streams and use
- * that name again when printing the buffer. This could be useful for generating
- * a C file and also its header file--all from the same buffer:</p>
- *
- * <pre>
- * rewriter.insertAfter("pass1", t, "text to put after t");}
- * rewriter.insertAfter("pass2", u, "text after u");}
- * System.out.println(rewriter.getText("pass1"));
- * System.out.println(rewriter.getText("pass2"));
- * </pre>
- *
- * <p>
- * If you don't use named rewrite streams, a "default" stream is used as the
- * first example shows.</p>
- */
-class ANTLR4CPP_PUBLIC TokenStreamRewriter {
- public:
- static const std::string DEFAULT_PROGRAM_NAME;
- static const size_t PROGRAM_INIT_SIZE = 100;
- static const size_t MIN_TOKEN_INDEX = 0;
-
- TokenStreamRewriter(TokenStream* tokens);
- virtual ~TokenStreamRewriter();
-
- TokenStream* getTokenStream();
-
- virtual void rollback(size_t instructionIndex);
-
- /// Rollback the instruction stream for a program so that
- /// the indicated instruction (via instructionIndex) is no
- /// longer in the stream. UNTESTED!
- virtual void rollback(const std::string& programName,
- size_t instructionIndex);
-
- virtual void deleteProgram();
-
- /// Reset the program so that no instructions exist.
- virtual void deleteProgram(const std::string& programName);
- virtual void insertAfter(Token* t, const std::string& text);
- virtual void insertAfter(size_t index, const std::string& text);
- virtual void insertAfter(const std::string& programName, Token* t,
- const std::string& text);
- virtual void insertAfter(const std::string& programName, size_t index,
- const std::string& text);
-
- virtual void insertBefore(Token* t, const std::string& text);
- virtual void insertBefore(size_t index, const std::string& text);
- virtual void insertBefore(const std::string& programName, Token* t,
- const std::string& text);
- virtual void insertBefore(const std::string& programName, size_t index,
- const std::string& text);
-
- virtual void replace(size_t index, const std::string& text);
- virtual void replace(size_t from, size_t to, const std::string& text);
- virtual void replace(Token* indexT, const std::string& text);
- virtual void replace(Token* from, Token* to, const std::string& text);
- virtual void replace(const std::string& programName, size_t from, size_t to,
- const std::string& text);
- virtual void replace(const std::string& programName, Token* from, Token* to,
- const std::string& text);
-
- virtual void Delete(size_t index);
- virtual void Delete(size_t from, size_t to);
- virtual void Delete(Token* indexT);
- virtual void Delete(Token* from, Token* to);
- virtual void Delete(const std::string& programName, size_t from, size_t to);
- virtual void Delete(const std::string& programName, Token* from, Token* to);
-
- virtual size_t getLastRewriteTokenIndex();
-
- /// Return the text from the original tokens altered per the
- /// instructions given to this rewriter.
- virtual std::string getText();
-
- /** Return the text from the original tokens altered per the
- * instructions given to this rewriter in programName.
- */
- std::string getText(std::string programName);
-
- /// Return the text associated with the tokens in the interval from the
- /// original token stream but with the alterations given to this rewriter.
- /// The interval refers to the indexes in the original token stream.
- /// We do not alter the token stream in any way, so the indexes
- /// and intervals are still consistent. Includes any operations done
- /// to the first and last token in the interval. So, if you did an
- /// insertBefore on the first token, you would get that insertion.
- /// The same is true if you do an insertAfter the stop token.
- virtual std::string getText(const misc::Interval& interval);
-
- virtual std::string getText(const std::string& programName,
- const misc::Interval& interval);
-
- protected:
- class RewriteOperation {
- public:
- /// What index into rewrites List are we?
- size_t index;
- std::string text;
-
- /// Token buffer index.
- size_t instructionIndex;
-
- RewriteOperation(TokenStreamRewriter* outerInstance, size_t index);
- RewriteOperation(TokenStreamRewriter* outerInstance, size_t index,
- const std::string& text);
- virtual ~RewriteOperation();
-
- /// Execute the rewrite operation by possibly adding to the buffer.
- /// Return the index of the next token to operate on.
-
- virtual size_t execute(std::string* buf);
- virtual std::string toString();
-
- private:
- TokenStreamRewriter* const outerInstance;
- void InitializeInstanceFields();
- };
-
- class InsertBeforeOp : public RewriteOperation {
- private:
- TokenStreamRewriter* const outerInstance;
-
- public:
- InsertBeforeOp(TokenStreamRewriter* outerInstance, size_t index,
- const std::string& text);
-
- virtual size_t execute(std::string* buf) override;
- };
-
- class ReplaceOp : public RewriteOperation {
- private:
- TokenStreamRewriter* const outerInstance;
-
- public:
- size_t lastIndex;
-
- ReplaceOp(TokenStreamRewriter* outerInstance, size_t from, size_t to,
- const std::string& text);
- virtual size_t execute(std::string* buf) override;
- virtual std::string toString() override;
-
- private:
- void InitializeInstanceFields();
- };
-
- /// Our source stream
- TokenStream* const tokens;
-
- /// You may have multiple, named streams of rewrite operations.
- /// I'm calling these things "programs."
- /// Maps String (name) -> rewrite (List)
- std::map<std::string, std::vector<RewriteOperation*>> _programs;
-
- /// <summary>
- /// Map String (program name) -> Integer index </summary>
- std::map<std::string, size_t> _lastRewriteTokenIndexes;
- virtual size_t getLastRewriteTokenIndex(const std::string& programName);
- virtual void setLastRewriteTokenIndex(const std::string& programName,
- size_t i);
- virtual std::vector<RewriteOperation*>& getProgram(const std::string& name);
-
- /// <summary>
- /// We need to combine operations and report invalid operations (like
- /// overlapping replaces that are not completed nested). Inserts to
- /// same index need to be combined etc... Here are the cases:
- ///
- /// I.i.u I.j.v leave alone, nonoverlapping
- /// I.i.u I.i.v combine: Iivu
- ///
- /// R.i-j.u R.x-y.v | i-j in x-y delete first R
- /// R.i-j.u R.i-j.v delete first R
- /// R.i-j.u R.x-y.v | x-y in i-j ERROR
- /// R.i-j.u R.x-y.v | boundaries overlap ERROR
- ///
- /// Delete special case of replace (text==null):
- /// D.i-j.u D.x-y.v | boundaries overlap combine to
- /// max(min)..max(right)
- ///
- /// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
- /// we're not deleting i)
- /// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
- /// R.x-y.v I.i.u | i in x-y ERROR
- /// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
- /// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
- ///
- /// I.i.u = insert u before op @ index i
- /// R.x-y.u = replace x-y indexed tokens with u
- ///
- /// First we need to examine replaces. For any replace op:
- ///
- /// 1. wipe out any insertions before op within that range.
- /// 2. Drop any replace op before that is contained completely within
- /// that range.
- /// 3. Throw exception upon boundary overlap with any previous replace.
- ///
- /// Then we can deal with inserts:
- ///
- /// 1. for any inserts to same index, combine even if not adjacent.
- /// 2. for any prior replace with same left boundary, combine this
- /// insert with replace and delete this replace.
- /// 3. throw exception if index in same range as previous replace
- ///
- /// Don't actually delete; make op null in list. Easier to walk list.
- /// Later we can throw as we add to index -> op map.
- ///
- /// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
- /// inserted stuff would be before the replace range. But, if you
- /// add tokens in front of a method body '{' and then delete the method
- /// body, I think the stuff before the '{' you added should disappear too.
- ///
- /// Return a map from token index to operation.
- /// </summary>
- virtual std::unordered_map<size_t, RewriteOperation*>
- reduceToSingleOperationPerIndex(std::vector<RewriteOperation*>& rewrites);
-
- virtual std::string catOpText(std::string* a, std::string* b);
-
- /// Get all operations before an index of a particular kind.
- template <typename T>
- std::vector<T*> getKindOfOps(std::vector<RewriteOperation*> rewrites,
- size_t before) {
- std::vector<T*> ops;
- for (size_t i = 0; i < before && i < rewrites.size(); i++) {
- T* op = dynamic_cast<T*>(rewrites[i]);
- if (op == nullptr) { // ignore deleted or non matching entries
- continue;
- }
- ops.push_back(op);
- }
- return ops;
- }
-
- private:
- std::vector<RewriteOperation*>& initializeProgram(const std::string& name);
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp
deleted file mode 100644
index e99cdc24db..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/UnbufferedCharStream.cpp
+++ /dev/null
@@ -1,215 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-#include "misc/Interval.h"
-#include "support/StringUtils.h"
-
-#include "UnbufferedCharStream.h"
-
-using namespace antlrcpp;
-using namespace antlr4;
-using namespace antlr4::misc;
-
-UnbufferedCharStream::UnbufferedCharStream(std::wistream& input)
- : _input(input) {
- InitializeInstanceFields();
-
- // The vector's size is what used to be n in Java code.
- fill(1); // prime
-}
-
-void UnbufferedCharStream::consume() {
- if (LA(1) == EOF) {
- throw IllegalStateException("cannot consume EOF");
- }
-
- // buf always has at least data[p==0] in this method due to ctor
- _lastChar = _data[_p]; // track last char for LA(-1)
-
- if (_p == _data.size() - 1 && _numMarkers == 0) {
- size_t capacity = _data.capacity();
- _data.clear();
- _data.reserve(capacity);
-
- _p = 0;
- _lastCharBufferStart = _lastChar;
- } else {
- _p++;
- }
-
- _currentCharIndex++;
- sync(1);
-}
-
-void UnbufferedCharStream::sync(size_t want) {
- if (_p + want <= _data.size()) // Already enough data loaded?
- return;
-
- fill(_p + want - _data.size());
-}
-
-size_t UnbufferedCharStream::fill(size_t n) {
- for (size_t i = 0; i < n; i++) {
- if (_data.size() > 0 && _data.back() == 0xFFFF) {
- return i;
- }
-
- try {
- char32_t c = nextChar();
- add(c);
-#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023026
- } catch (IOException& ioe) {
- // throw_with_nested is not available before VS 2015.
- throw ioe;
-#else
- } catch (IOException& /*ioe*/) {
- std::throw_with_nested(RuntimeException());
-#endif
- }
- }
-
- return n;
-}
-
-char32_t UnbufferedCharStream::nextChar() {
- wchar_t result = 0;
- _input >> result;
- return result;
-}
-
-void UnbufferedCharStream::add(char32_t c) { _data += c; }
-
-size_t UnbufferedCharStream::LA(ssize_t i) {
- if (i == -1) { // special case
- return _lastChar;
- }
-
- // We can look back only as many chars as we have buffered.
- ssize_t index = static_cast<ssize_t>(_p) + i - 1;
- if (index < 0) {
- throw IndexOutOfBoundsException();
- }
-
- if (i > 0) {
- sync(static_cast<size_t>(i)); // No need to sync if we look back.
- }
- if (static_cast<size_t>(index) >= _data.size()) {
- return EOF;
- }
-
- if (_data[static_cast<size_t>(index)] == 0xFFFF) {
- return EOF;
- }
-
- return _data[static_cast<size_t>(index)];
-}
-
-ssize_t UnbufferedCharStream::mark() {
- if (_numMarkers == 0) {
- _lastCharBufferStart = _lastChar;
- }
-
- ssize_t mark = -static_cast<ssize_t>(_numMarkers) - 1;
- _numMarkers++;
- return mark;
-}
-
-void UnbufferedCharStream::release(ssize_t marker) {
- ssize_t expectedMark = -static_cast<ssize_t>(_numMarkers);
- if (marker != expectedMark) {
- throw IllegalStateException("release() called with an invalid marker.");
- }
-
- _numMarkers--;
- if (_numMarkers == 0 && _p > 0) {
- _data.erase(0, _p);
- _p = 0;
- _lastCharBufferStart = _lastChar;
- }
-}
-
-size_t UnbufferedCharStream::index() { return _currentCharIndex; }
-
-void UnbufferedCharStream::seek(size_t index) {
- if (index == _currentCharIndex) {
- return;
- }
-
- if (index > _currentCharIndex) {
- sync(index - _currentCharIndex);
- index = std::min(index, getBufferStartIndex() + _data.size() - 1);
- }
-
- // index == to bufferStartIndex should set p to 0
- ssize_t i =
- static_cast<ssize_t>(index) - static_cast<ssize_t>(getBufferStartIndex());
- if (i < 0) {
- throw IllegalArgumentException(
- std::string("cannot seek to negative index ") + std::to_string(index));
- } else if (i >= static_cast<ssize_t>(_data.size())) {
- throw UnsupportedOperationException(
- "Seek to index outside buffer: " + std::to_string(index) + " not in " +
- std::to_string(getBufferStartIndex()) + ".." +
- std::to_string(getBufferStartIndex() + _data.size()));
- }
-
- _p = static_cast<size_t>(i);
- _currentCharIndex = index;
- if (_p == 0) {
- _lastChar = _lastCharBufferStart;
- } else {
- _lastChar = _data[_p - 1];
- }
-}
-
-size_t UnbufferedCharStream::size() {
- throw UnsupportedOperationException("Unbuffered stream cannot know its size");
-}
-
-std::string UnbufferedCharStream::getSourceName() const {
- if (name.empty()) {
- return UNKNOWN_SOURCE_NAME;
- }
-
- return name;
-}
-
-std::string UnbufferedCharStream::getText(const misc::Interval& interval) {
- if (interval.a < 0 || interval.b >= interval.a - 1) {
- throw IllegalArgumentException("invalid interval");
- }
-
- size_t bufferStartIndex = getBufferStartIndex();
- if (!_data.empty() && _data.back() == 0xFFFF) {
- if (interval.a + interval.length() > bufferStartIndex + _data.size()) {
- throw IllegalArgumentException(
- "the interval extends past the end of the stream");
- }
- }
-
- if (interval.a < static_cast<ssize_t>(bufferStartIndex) ||
- interval.b >= ssize_t(bufferStartIndex + _data.size())) {
- throw UnsupportedOperationException(
- "interval " + interval.toString() +
- " outside buffer: " + std::to_string(bufferStartIndex) + ".." +
- std::to_string(bufferStartIndex + _data.size() - 1));
- }
- // convert from absolute to local index
- size_t i = interval.a - bufferStartIndex;
- return utf32_to_utf8(_data.substr(i, interval.length()));
-}
-
-size_t UnbufferedCharStream::getBufferStartIndex() const {
- return _currentCharIndex - _p;
-}
-
-void UnbufferedCharStream::InitializeInstanceFields() {
- _p = 0;
- _numMarkers = 0;
- _lastChar = 0;
- _lastCharBufferStart = 0;
- _currentCharIndex = 0;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/UnbufferedCharStream.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/UnbufferedCharStream.h
deleted file mode 100644
index a79bdfe003..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/UnbufferedCharStream.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "CharStream.h"
-
-namespace antlr4 {
-
-/// Do not buffer up the entire char stream. It does keep a small buffer
-/// for efficiency and also buffers while a mark exists (set by the
-/// lookahead prediction in parser). "Unbuffered" here refers to fact
-/// that it doesn't buffer all data, not that's it's on demand loading of char.
-class ANTLR4CPP_PUBLIC UnbufferedCharStream : public CharStream {
- public:
- /// The name or source of this char stream.
- std::string name;
-
- UnbufferedCharStream(std::wistream& input);
-
- virtual void consume() override;
- virtual size_t LA(ssize_t i) override;
-
- /// <summary>
- /// Return a marker that we can release later.
- /// <p/>
- /// The specific marker value used for this class allows for some level of
- /// protection against misuse where {@code seek()} is called on a mark or
- /// {@code release()} is called in the wrong order.
- /// </summary>
- virtual ssize_t mark() override;
-
- /// <summary>
- /// Decrement number of markers, resetting buffer if we hit 0. </summary>
- /// <param name="marker"> </param>
- virtual void release(ssize_t marker) override;
- virtual size_t index() override;
-
- /// <summary>
- /// Seek to absolute character index, which might not be in the current
- /// sliding window. Move {@code p} to {@code index-bufferStartIndex}.
- /// </summary>
- virtual void seek(size_t index) override;
- virtual size_t size() override;
- virtual std::string getSourceName() const override;
- virtual std::string getText(const misc::Interval& interval) override;
-
- protected:
-/// A moving window buffer of the data being scanned. While there's a marker,
-/// we keep adding to buffer. Otherwise, <seealso cref="#consume consume()"/>
-/// resets so we start filling at index 0 again.
-// UTF-32 encoded.
-#if defined(_MSC_VER) && _MSC_VER == 1900
- i32string _data; // Custom type for VS 2015.
- typedef __int32 storage_type;
-#else
- std::u32string _data;
- typedef char32_t storage_type;
-#endif
-
- /// <summary>
- /// 0..n-1 index into <seealso cref="#data data"/> of next character.
- /// <p/>
- /// The {@code LA(1)} character is {@code data[p]}. If {@code p == n}, we are
- /// out of buffered characters.
- /// </summary>
- size_t _p;
-
- /// <summary>
- /// Count up with <seealso cref="#mark mark()"/> and down with
- /// <seealso cref="#release release()"/>. When we {@code release()} the last
- /// mark,
- /// {@code numMarkers} reaches 0 and we reset the buffer. Copy
- /// {@code data[p]..data[n-1]} to {@code data[0]..data[(n-1)-p]}.
- /// </summary>
- size_t _numMarkers;
-
- /// This is the {@code LA(-1)} character for the current position.
- size_t _lastChar; // UTF-32
-
- /// <summary>
- /// When {@code numMarkers > 0}, this is the {@code LA(-1)} character for the
- /// first character in <seealso cref="#data data"/>. Otherwise, this is
- /// unspecified.
- /// </summary>
- size_t _lastCharBufferStart; // UTF-32
-
- /// <summary>
- /// Absolute character index. It's the index of the character about to be
- /// read via {@code LA(1)}. Goes from 0 to the number of characters in the
- /// entire stream, although the stream size is unknown before the end is
- /// reached.
- /// </summary>
- size_t _currentCharIndex;
-
- std::wistream& _input;
-
- /// <summary>
- /// Make sure we have 'want' elements from current position <seealso cref="#p
- /// p"/>. Last valid {@code p} index is {@code data.length-1}. {@code
- /// p+need-1} is the char index 'need' elements ahead. If we need 1 element,
- /// {@code (p+1-1)==p} must be less than {@code data.length}.
- /// </summary>
- virtual void sync(size_t want);
-
- /// <summary>
- /// Add {@code n} characters to the buffer. Returns the number of characters
- /// actually added to the buffer. If the return value is less than {@code n},
- /// then EOF was reached before {@code n} characters could be added.
- /// </summary>
- virtual size_t fill(size_t n);
-
- /// Override to provide different source of characters than
- /// <seealso cref="#input input"/>.
- virtual char32_t nextChar();
- virtual void add(char32_t c);
- size_t getBufferStartIndex() const;
-
- private:
- void InitializeInstanceFields();
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp
deleted file mode 100644
index e5a3c1468a..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/UnbufferedTokenStream.cpp
+++ /dev/null
@@ -1,257 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-#include "RuleContext.h"
-#include "Token.h"
-#include "TokenSource.h"
-#include "WritableToken.h"
-#include "assert.h"
-#include "misc/Interval.h"
-#include "support/Arrays.h"
-
-#include "UnbufferedTokenStream.h"
-
-using namespace antlr4;
-
-UnbufferedTokenStream::UnbufferedTokenStream(TokenSource* tokenSource)
- : UnbufferedTokenStream(tokenSource, 256) {}
-
-UnbufferedTokenStream::UnbufferedTokenStream(TokenSource* tokenSource,
- int /*bufferSize*/)
- : _tokenSource(tokenSource),
- _lastToken(nullptr),
- _lastTokenBufferStart(nullptr) {
- InitializeInstanceFields();
- fill(1); // prime the pump
-}
-
-UnbufferedTokenStream::~UnbufferedTokenStream() {}
-
-Token* UnbufferedTokenStream::get(size_t i) const { // get absolute index
- size_t bufferStartIndex = getBufferStartIndex();
- if (i < bufferStartIndex || i >= bufferStartIndex + _tokens.size()) {
- throw IndexOutOfBoundsException(
- std::string("get(") + std::to_string(i) +
- std::string(") outside buffer: ") + std::to_string(bufferStartIndex) +
- std::string("..") + std::to_string(bufferStartIndex + _tokens.size()));
- }
- return _tokens[i - bufferStartIndex].get();
-}
-
-Token* UnbufferedTokenStream::LT(ssize_t i) {
- if (i == -1) {
- return _lastToken;
- }
-
- sync(i);
- ssize_t index = static_cast<ssize_t>(_p) + i - 1;
- if (index < 0) {
- throw IndexOutOfBoundsException(std::string("LT(") + std::to_string(i) +
- std::string(") gives negative index"));
- }
-
- if (index >= static_cast<ssize_t>(_tokens.size())) {
- assert(_tokens.size() > 0 && _tokens.back()->getType() == EOF);
- return _tokens.back().get();
- }
-
- return _tokens[static_cast<size_t>(index)].get();
-}
-
-size_t UnbufferedTokenStream::LA(ssize_t i) { return LT(i)->getType(); }
-
-TokenSource* UnbufferedTokenStream::getTokenSource() const {
- return _tokenSource;
-}
-
-std::string UnbufferedTokenStream::getText() { return ""; }
-
-std::string UnbufferedTokenStream::getText(RuleContext* ctx) {
- return getText(ctx->getSourceInterval());
-}
-
-std::string UnbufferedTokenStream::getText(Token* start, Token* stop) {
- return getText(misc::Interval(start->getTokenIndex(), stop->getTokenIndex()));
-}
-
-void UnbufferedTokenStream::consume() {
- if (LA(1) == EOF) {
- throw IllegalStateException("cannot consume EOF");
- }
-
- // buf always has at least tokens[p==0] in this method due to ctor
- _lastToken = _tokens[_p].get(); // track last token for LT(-1)
-
- // if we're at last token and no markers, opportunity to flush buffer
- if (_p == _tokens.size() - 1 && _numMarkers == 0) {
- _tokens.clear();
- _p = 0;
- _lastTokenBufferStart = _lastToken;
- } else {
- ++_p;
- }
-
- ++_currentTokenIndex;
- sync(1);
-}
-
-/// <summary>
-/// Make sure we have 'need' elements from current position <seealso cref="#p
-/// p"/>. Last valid
-/// {@code p} index is {@code tokens.length-1}. {@code p+need-1} is the tokens
-/// index 'need' elements ahead. If we need 1 element, {@code (p+1-1)==p} must
-/// be less than {@code tokens.length}.
-/// </summary>
-void UnbufferedTokenStream::sync(ssize_t want) {
- ssize_t need = (static_cast<ssize_t>(_p) + want - 1) -
- static_cast<ssize_t>(_tokens.size()) +
- 1; // how many more elements we need?
- if (need > 0) {
- fill(static_cast<size_t>(need));
- }
-}
-
-/// <summary>
-/// Add {@code n} elements to the buffer. Returns the number of tokens
-/// actually added to the buffer. If the return value is less than {@code n},
-/// then EOF was reached before {@code n} tokens could be added.
-/// </summary>
-size_t UnbufferedTokenStream::fill(size_t n) {
- for (size_t i = 0; i < n; i++) {
- if (_tokens.size() > 0 && _tokens.back()->getType() == EOF) {
- return i;
- }
-
- add(_tokenSource->nextToken());
- }
-
- return n;
-}
-
-void UnbufferedTokenStream::add(std::unique_ptr<Token> t) {
- WritableToken* writable = dynamic_cast<WritableToken*>(t.get());
- if (writable != nullptr) {
- writable->setTokenIndex(int(getBufferStartIndex() + _tokens.size()));
- }
-
- _tokens.push_back(std::move(t));
-}
-
-/// <summary>
-/// Return a marker that we can release later.
-/// <p/>
-/// The specific marker value used for this class allows for some level of
-/// protection against misuse where {@code seek()} is called on a mark or
-/// {@code release()} is called in the wrong order.
-/// </summary>
-ssize_t UnbufferedTokenStream::mark() {
- if (_numMarkers == 0) {
- _lastTokenBufferStart = _lastToken;
- }
-
- int mark = -_numMarkers - 1;
- _numMarkers++;
- return mark;
-}
-
-void UnbufferedTokenStream::release(ssize_t marker) {
- ssize_t expectedMark = -_numMarkers;
- if (marker != expectedMark) {
- throw IllegalStateException("release() called with an invalid marker.");
- }
-
- _numMarkers--;
- if (_numMarkers == 0) { // can we release buffer?
- if (_p > 0) {
- // Copy tokens[p]..tokens[n-1] to tokens[0]..tokens[(n-1)-p], reset ptrs
- // p is last valid token; move nothing if p==n as we have no valid char
- _tokens.erase(_tokens.begin(),
- _tokens.begin() + static_cast<ssize_t>(_p));
- _p = 0;
- }
-
- _lastTokenBufferStart = _lastToken;
- }
-}
-
-size_t UnbufferedTokenStream::index() { return _currentTokenIndex; }
-
-void UnbufferedTokenStream::seek(size_t index) { // seek to absolute index
- if (index == _currentTokenIndex) {
- return;
- }
-
- if (index > _currentTokenIndex) {
- sync(ssize_t(index - _currentTokenIndex));
- index = std::min(index, getBufferStartIndex() + _tokens.size() - 1);
- }
-
- size_t bufferStartIndex = getBufferStartIndex();
- if (bufferStartIndex > index) {
- throw IllegalArgumentException(
- std::string("cannot seek to negative index ") + std::to_string(index));
- }
-
- size_t i = index - bufferStartIndex;
- if (i >= _tokens.size()) {
- throw UnsupportedOperationException(
- std::string("seek to index outside buffer: ") + std::to_string(index) +
- " not in " + std::to_string(bufferStartIndex) + ".." +
- std::to_string(bufferStartIndex + _tokens.size()));
- }
-
- _p = i;
- _currentTokenIndex = index;
- if (_p == 0) {
- _lastToken = _lastTokenBufferStart;
- } else {
- _lastToken = _tokens[_p - 1].get();
- }
-}
-
-size_t UnbufferedTokenStream::size() {
- throw UnsupportedOperationException("Unbuffered stream cannot know its size");
-}
-
-std::string UnbufferedTokenStream::getSourceName() const {
- return _tokenSource->getSourceName();
-}
-
-std::string UnbufferedTokenStream::getText(const misc::Interval& interval) {
- size_t bufferStartIndex = getBufferStartIndex();
- size_t bufferStopIndex = bufferStartIndex + _tokens.size() - 1;
-
- size_t start = interval.a;
- size_t stop = interval.b;
- if (start < bufferStartIndex || stop > bufferStopIndex) {
- throw UnsupportedOperationException(
- std::string("interval ") + interval.toString() +
- " not in token buffer window: " + std::to_string(bufferStartIndex) +
- ".." + std::to_string(bufferStopIndex));
- }
-
- size_t a = start - bufferStartIndex;
- size_t b = stop - bufferStartIndex;
-
- std::stringstream ss;
- for (size_t i = a; i <= b; i++) {
- Token* t = _tokens[i].get();
- if (i > 0) ss << ", ";
- ss << t->getText();
- }
-
- return ss.str();
-}
-
-size_t UnbufferedTokenStream::getBufferStartIndex() const {
- return _currentTokenIndex - _p;
-}
-
-void UnbufferedTokenStream::InitializeInstanceFields() {
- _p = 0;
- _numMarkers = 0;
- _currentTokenIndex = 0;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/UnbufferedTokenStream.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/UnbufferedTokenStream.h
deleted file mode 100644
index b4f4012793..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/UnbufferedTokenStream.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "TokenStream.h"
-
-namespace antlr4 {
-
-class ANTLR4CPP_PUBLIC UnbufferedTokenStream : public TokenStream {
- public:
- UnbufferedTokenStream(TokenSource* tokenSource);
- UnbufferedTokenStream(TokenSource* tokenSource, int bufferSize);
- UnbufferedTokenStream(const UnbufferedTokenStream& other) = delete;
- virtual ~UnbufferedTokenStream();
-
- UnbufferedTokenStream& operator=(const UnbufferedTokenStream& other) = delete;
-
- virtual Token* get(size_t i) const override;
- virtual Token* LT(ssize_t i) override;
- virtual size_t LA(ssize_t i) override;
-
- virtual TokenSource* getTokenSource() const override;
-
- virtual std::string getText(const misc::Interval& interval) override;
- virtual std::string getText() override;
- virtual std::string getText(RuleContext* ctx) override;
- virtual std::string getText(Token* start, Token* stop) override;
-
- virtual void consume() override;
-
- /// <summary>
- /// Return a marker that we can release later.
- /// <p/>
- /// The specific marker value used for this class allows for some level of
- /// protection against misuse where {@code seek()} is called on a mark or
- /// {@code release()} is called in the wrong order.
- /// </summary>
- virtual ssize_t mark() override;
- virtual void release(ssize_t marker) override;
- virtual size_t index() override;
- virtual void seek(size_t index) override;
- virtual size_t size() override;
- virtual std::string getSourceName() const override;
-
- protected:
- /// Make sure we have 'need' elements from current position p. Last valid
- /// p index is tokens.length - 1. p + need - 1 is the tokens index 'need'
- /// elements ahead. If we need 1 element, (p+1-1)==p must be less than
- /// tokens.length.
- TokenSource* _tokenSource;
-
- /// <summary>
- /// A moving window buffer of the data being scanned. While there's a marker,
- /// we keep adding to buffer. Otherwise, <seealso cref="#consume consume()"/>
- /// resets so we start filling at index 0 again.
- /// </summary>
-
- std::vector<std::unique_ptr<Token>> _tokens;
-
- /// <summary>
- /// 0..n-1 index into <seealso cref="#tokens tokens"/> of next token.
- /// <p/>
- /// The {@code LT(1)} token is {@code tokens[p]}. If {@code p == n}, we are
- /// out of buffered tokens.
- /// </summary>
- size_t _p;
-
- /// <summary>
- /// Count up with <seealso cref="#mark mark()"/> and down with
- /// <seealso cref="#release release()"/>. When we {@code release()} the last
- /// mark,
- /// {@code numMarkers} reaches 0 and we reset the buffer. Copy
- /// {@code tokens[p]..tokens[n-1]} to {@code tokens[0]..tokens[(n-1)-p]}.
- /// </summary>
- int _numMarkers;
-
- /// <summary>
- /// This is the {@code LT(-1)} token for the current position.
- /// </summary>
- Token* _lastToken;
-
- /// <summary>
- /// When {@code numMarkers > 0}, this is the {@code LT(-1)} token for the
- /// first token in <seealso cref="#tokens"/>. Otherwise, this is {@code null}.
- /// </summary>
- Token* _lastTokenBufferStart;
-
- /// <summary>
- /// Absolute token index. It's the index of the token about to be read via
- /// {@code LT(1)}. Goes from 0 to the number of tokens in the entire stream,
- /// although the stream size is unknown before the end is reached.
- /// <p/>
- /// This value is used to set the token indexes if the stream provides tokens
- /// that implement <seealso cref="WritableToken"/>.
- /// </summary>
- size_t _currentTokenIndex;
-
- virtual void sync(ssize_t want);
-
- /// <summary>
- /// Add {@code n} elements to the buffer. Returns the number of tokens
- /// actually added to the buffer. If the return value is less than {@code n},
- /// then EOF was reached before {@code n} tokens could be added.
- /// </summary>
- virtual size_t fill(size_t n);
- virtual void add(std::unique_ptr<Token> t);
-
- size_t getBufferStartIndex() const;
-
- private:
- void InitializeInstanceFields();
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Vocabulary.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Vocabulary.cpp
deleted file mode 100644
index 4259218462..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Vocabulary.cpp
+++ /dev/null
@@ -1,108 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Token.h"
-
-#include "Vocabulary.h"
-
-using namespace antlr4::dfa;
-
-const Vocabulary Vocabulary::EMPTY_VOCABULARY;
-
-Vocabulary::Vocabulary(const std::vector<std::string>& literalNames,
- const std::vector<std::string>& symbolicNames)
- : Vocabulary(literalNames, symbolicNames, {}) {}
-
-Vocabulary::Vocabulary(const std::vector<std::string>& literalNames,
- const std::vector<std::string>& symbolicNames,
- const std::vector<std::string>& displayNames)
- : _literalNames(literalNames),
- _symbolicNames(symbolicNames),
- _displayNames(displayNames),
- _maxTokenType(
- std::max(_displayNames.size(),
- std::max(_literalNames.size(), _symbolicNames.size())) -
- 1) {
- // See note here on -1 part: https://github.com/antlr/antlr4/pull/1146
-}
-
-Vocabulary::~Vocabulary() {}
-
-Vocabulary Vocabulary::fromTokenNames(
- const std::vector<std::string>& tokenNames) {
- if (tokenNames.empty()) {
- return EMPTY_VOCABULARY;
- }
-
- std::vector<std::string> literalNames = tokenNames;
- std::vector<std::string> symbolicNames = tokenNames;
- std::locale locale;
- for (size_t i = 0; i < tokenNames.size(); i++) {
- std::string tokenName = tokenNames[i];
- if (tokenName == "") {
- continue;
- }
-
- if (!tokenName.empty()) {
- char firstChar = tokenName[0];
- if (firstChar == '\'') {
- symbolicNames[i] = "";
- continue;
- } else if (std::isupper(firstChar, locale)) {
- literalNames[i] = "";
- continue;
- }
- }
-
- // wasn't a literal or symbolic name
- literalNames[i] = "";
- symbolicNames[i] = "";
- }
-
- return Vocabulary(literalNames, symbolicNames, tokenNames);
-}
-
-size_t Vocabulary::getMaxTokenType() const { return _maxTokenType; }
-
-std::string Vocabulary::getLiteralName(size_t tokenType) const {
- if (tokenType < _literalNames.size()) {
- return _literalNames[tokenType];
- }
-
- return "";
-}
-
-std::string Vocabulary::getSymbolicName(size_t tokenType) const {
- if (tokenType == Token::EOF) {
- return "EOF";
- }
-
- if (tokenType < _symbolicNames.size()) {
- return _symbolicNames[tokenType];
- }
-
- return "";
-}
-
-std::string Vocabulary::getDisplayName(size_t tokenType) const {
- if (tokenType < _displayNames.size()) {
- std::string displayName = _displayNames[tokenType];
- if (!displayName.empty()) {
- return displayName;
- }
- }
-
- std::string literalName = getLiteralName(tokenType);
- if (!literalName.empty()) {
- return literalName;
- }
-
- std::string symbolicName = getSymbolicName(tokenType);
- if (!symbolicName.empty()) {
- return symbolicName;
- }
-
- return std::to_string(tokenType);
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Vocabulary.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Vocabulary.h
deleted file mode 100644
index 607d694559..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/Vocabulary.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace dfa {
-
-/// This class provides a default implementation of the <seealso
-/// cref="Vocabulary"/> interface.
-class ANTLR4CPP_PUBLIC Vocabulary {
- public:
- Vocabulary(Vocabulary const&) = default;
- virtual ~Vocabulary();
- Vocabulary& operator=(Vocabulary const&) = default;
-
- /// Gets an empty <seealso cref="Vocabulary"/> instance.
- ///
- /// <para>
- /// No literal or symbol names are assigned to token types, so
- /// <seealso cref="#getDisplayName(int)"/> returns the numeric value for all
- /// tokens except <seealso cref="Token#EOF"/>.</para>
- static const Vocabulary EMPTY_VOCABULARY;
-
- Vocabulary() {}
-
- /// <summary>
- /// Constructs a new instance of <seealso cref="Vocabulary"/> from the
- /// specified literal and symbolic token names.
- /// </summary>
- /// <param name="literalNames"> The literal names assigned to tokens, or
- /// {@code null} if no literal names are assigned. </param> <param
- /// name="symbolicNames"> The symbolic names assigned to tokens, or
- /// {@code null} if no symbolic names are assigned.
- /// </param>
- /// <seealso cref= #getLiteralName(int) </seealso>
- /// <seealso cref= #getSymbolicName(int) </seealso>
- Vocabulary(const std::vector<std::string>& literalNames,
- const std::vector<std::string>& symbolicNames);
-
- /// <summary>
- /// Constructs a new instance of <seealso cref="Vocabulary"/> from the
- /// specified literal, symbolic, and display token names.
- /// </summary>
- /// <param name="literalNames"> The literal names assigned to tokens, or
- /// {@code null} if no literal names are assigned. </param> <param
- /// name="symbolicNames"> The symbolic names assigned to tokens, or
- /// {@code null} if no symbolic names are assigned. </param>
- /// <param name="displayNames"> The display names assigned to tokens, or
- /// {@code null} to use the values in {@code literalNames} and {@code
- /// symbolicNames} as the source of display names, as described in <seealso
- /// cref="#getDisplayName(int)"/>.
- /// </param>
- /// <seealso cref= #getLiteralName(int) </seealso>
- /// <seealso cref= #getSymbolicName(int) </seealso>
- /// <seealso cref= #getDisplayName(int) </seealso>
- Vocabulary(const std::vector<std::string>& literalNames,
- const std::vector<std::string>& symbolicNames,
- const std::vector<std::string>& displayNames);
-
- /// <summary>
- /// Returns a <seealso cref="Vocabulary"/> instance from the specified set of
- /// token names. This method acts as a compatibility layer for the single
- /// {@code tokenNames} array generated by previous releases of ANTLR.
- ///
- /// <para>The resulting vocabulary instance returns {@code null} for
- /// <seealso cref="#getLiteralName(int)"/> and <seealso
- /// cref="#getSymbolicName(int)"/>, and the value from {@code tokenNames} for
- /// the display names.</para>
- /// </summary>
- /// <param name="tokenNames"> The token names, or {@code null} if no token
- /// names are available. </param> <returns> A <seealso cref="Vocabulary"/>
- /// instance which uses {@code tokenNames} for the display names of tokens.
- /// </returns>
- static Vocabulary fromTokenNames(const std::vector<std::string>& tokenNames);
-
- /// <summary>
- /// Returns the highest token type value. It can be used to iterate from
- /// zero to that number, inclusively, thus querying all stored entries.
- /// </summary> <returns> the highest token type value </returns>
- virtual size_t getMaxTokenType() const;
-
- /// <summary>
- /// Gets the string literal associated with a token type. The string returned
- /// by this method, when not {@code null}, can be used unaltered in a parser
- /// grammar to represent this token type.
- ///
- /// <para>The following table shows examples of lexer rules and the literal
- /// names assigned to the corresponding token types.</para>
- ///
- /// <table>
- /// <tr>
- /// <th>Rule</th>
- /// <th>Literal Name</th>
- /// <th>Java String Literal</th>
- /// </tr>
- /// <tr>
- /// <td>{@code THIS : 'this';}</td>
- /// <td>{@code 'this'}</td>
- /// <td>{@code "'this'"}</td>
- /// </tr>
- /// <tr>
- /// <td>{@code SQUOTE : '\'';}</td>
- /// <td>{@code '\''}</td>
- /// <td>{@code "'\\''"}</td>
- /// </tr>
- /// <tr>
- /// <td>{@code ID : [A-Z]+;}</td>
- /// <td>n/a</td>
- /// <td>{@code null}</td>
- /// </tr>
- /// </table>
- /// </summary>
- /// <param name="tokenType"> The token type.
- /// </param>
- /// <returns> The string literal associated with the specified token type, or
- /// {@code null} if no string literal is associated with the type. </returns>
- virtual std::string getLiteralName(size_t tokenType) const;
-
- /// <summary>
- /// Gets the symbolic name associated with a token type. The string returned
- /// by this method, when not {@code null}, can be used unaltered in a parser
- /// grammar to represent this token type.
- ///
- /// <para>This method supports token types defined by any of the following
- /// methods:</para>
- ///
- /// <ul>
- /// <li>Tokens created by lexer rules.</li>
- /// <li>Tokens defined in a <code>tokens{}</code> block in a lexer or parser
- /// grammar.</li>
- /// <li>The implicitly defined {@code EOF} token, which has the token type
- /// <seealso cref="Token#EOF"/>.</li>
- /// </ul>
- ///
- /// <para>The following table shows examples of lexer rules and the literal
- /// names assigned to the corresponding token types.</para>
- ///
- /// <table>
- /// <tr>
- /// <th>Rule</th>
- /// <th>Symbolic Name</th>
- /// </tr>
- /// <tr>
- /// <td>{@code THIS : 'this';}</td>
- /// <td>{@code THIS}</td>
- /// </tr>
- /// <tr>
- /// <td>{@code SQUOTE : '\'';}</td>
- /// <td>{@code SQUOTE}</td>
- /// </tr>
- /// <tr>
- /// <td>{@code ID : [A-Z]+;}</td>
- /// <td>{@code ID}</td>
- /// </tr>
- /// </table>
- /// </summary>
- /// <param name="tokenType"> The token type.
- /// </param>
- /// <returns> The symbolic name associated with the specified token type, or
- /// {@code null} if no symbolic name is associated with the type. </returns>
- virtual std::string getSymbolicName(size_t tokenType) const;
-
- /// <summary>
- /// Gets the display name of a token type.
- ///
- /// <para>ANTLR provides a default implementation of this method, but
- /// applications are free to override the behavior in any manner which makes
- /// sense for the application. The default implementation returns the first
- /// result from the following list which produces a non-{@code null}
- /// result.</para>
- ///
- /// <ol>
- /// <li>The result of <seealso cref="#getLiteralName"/></li>
- /// <li>The result of <seealso cref="#getSymbolicName"/></li>
- /// <li>The result of <seealso cref="Integer#toString"/></li>
- /// </ol>
- /// </summary>
- /// <param name="tokenType"> The token type.
- /// </param>
- /// <returns> The display name of the token type, for use in error reporting
- /// or other user-visible messages which reference specific token types.
- /// </returns>
- virtual std::string getDisplayName(size_t tokenType) const;
-
- private:
- std::vector<std::string> const _literalNames;
- std::vector<std::string> const _symbolicNames;
- std::vector<std::string> const _displayNames;
- const size_t _maxTokenType = 0;
-};
-
-} // namespace dfa
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/WritableToken.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/WritableToken.cpp
deleted file mode 100644
index b44cddf6c5..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/WritableToken.cpp
+++ /dev/null
@@ -1,8 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "WritableToken.h"
-
-antlr4::WritableToken::~WritableToken() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/WritableToken.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/WritableToken.h
deleted file mode 100644
index 189ce7d365..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/WritableToken.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Token.h"
-
-namespace antlr4 {
-
-class ANTLR4CPP_PUBLIC WritableToken : public Token {
- public:
- virtual ~WritableToken();
- virtual void setText(const std::string& text) = 0;
- virtual void setType(size_t ttype) = 0;
- virtual void setLine(size_t line) = 0;
- virtual void setCharPositionInLine(size_t pos) = 0;
- virtual void setChannel(size_t channel) = 0;
- virtual void setTokenIndex(size_t index) = 0;
-};
-
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/antlr4-common.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/antlr4-common.h
deleted file mode 100644
index b4ef14b0c8..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/antlr4-common.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include <assert.h>
-#include <limits.h>
-#include <stdarg.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <algorithm>
-#include <atomic>
-#include <bitset>
-#include <chrono>
-#include <condition_variable>
-#include <exception>
-#include <fstream>
-#include <functional>
-#include <iostream>
-#include <iterator>
-#include <limits>
-#include <list>
-#include <locale>
-#include <map>
-#include <memory>
-#include <mutex>
-#include <set>
-#include <sstream>
-#include <stack>
-#include <string>
-#include <type_traits>
-#include <typeinfo>
-#include <unordered_map>
-#include <unordered_set>
-#include <utility>
-#include <vector>
-
-// Defines for the Guid class and other platform dependent stuff.
-#ifdef _WIN32
-#ifdef _MSC_VER
-#pragma warning(disable : 4250) // Class inherits by dominance.
-#pragma warning(disable : 4512) // assignment operator could not be generated
-
-#if _MSC_VER < 1900
-// Before VS 2015 code like "while (true)" will create a (useless) warning in
-// level 4.
-#pragma warning(disable : 4127) // conditional expression is constant
-#endif
-#endif
-
-#define GUID_WINDOWS
-
-#ifdef _WIN64
-typedef __int64 ssize_t;
-#else
-typedef __int32 ssize_t;
-#endif
-
-#if _MSC_VER >= 1900 && _MSC_VER < 2000
-// VS 2015 has a known bug when using std::codecvt_utf8<char32_t>
-// so we have to temporarily use __int32 instead.
-// https://connect.microsoft.com/VisualStudio/feedback/details/1403302/unresolved-external-when-using-codecvt-utf8
-typedef std::basic_string<__int32> i32string;
-
-typedef i32string UTF32String;
-#else
-typedef std::u32string UTF32String;
-#endif
-
-#ifdef ANTLR4CPP_EXPORTS
-#define ANTLR4CPP_PUBLIC __declspec(dllexport)
-#else
-#ifdef ANTLR4CPP_STATIC
-#define ANTLR4CPP_PUBLIC
-#else
-#define ANTLR4CPP_PUBLIC __declspec(dllimport)
-#endif
-#endif
-
-#elif defined(__APPLE__)
-typedef std::u32string UTF32String;
-
-#define GUID_CFUUID
-#if __GNUC__ >= 4
-#define ANTLR4CPP_PUBLIC __attribute__((visibility("default")))
-#else
-#define ANTLR4CPP_PUBLIC
-#endif
-#else
-typedef std::u32string UTF32String;
-
-#define GUID_LIBUUID
-#if __GNUC__ >= 6
-#define ANTLR4CPP_PUBLIC __attribute__((visibility("default")))
-#else
-#define ANTLR4CPP_PUBLIC
-#endif
-#endif
-
-#include "support/Declarations.h"
-#include "support/guid.h"
-
-#if !defined(HAS_NOEXCEPT)
-#if defined(__clang__)
-#if __has_feature(cxx_noexcept)
-#define HAS_NOEXCEPT
-#endif
-#else
-#if defined(__GXX_EXPERIMENTAL_CXX0X__) && \
- __GNUC__ * 10 + __GNUC_MINOR__ >= 46 || \
- defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023026
-#define HAS_NOEXCEPT
-#endif
-#endif
-
-#ifdef HAS_NOEXCEPT
-#define NOEXCEPT noexcept
-#else
-#define NOEXCEPT
-#endif
-#endif
-
-// We have to undefine this symbol as ANTLR will use this name for own members
-// and even generated functions. Because EOF is a global macro we cannot use
-// e.g. a namespace scope to disambiguate.
-#ifdef EOF
-#undef EOF
-#endif
-
-#define INVALID_INDEX std::numeric_limits<size_t>::max()
-template <class T>
-using Ref = std::shared_ptr<T>;
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/antlr4-runtime.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/antlr4-runtime.h
deleted file mode 100644
index e986a3668d..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/antlr4-runtime.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-// This is the umbrella header for all ANTLR4 C++ runtime headers.
-
-#include "antlr4-common.h"
-
-#include "ANTLRErrorListener.h"
-#include "ANTLRErrorStrategy.h"
-#include "ANTLRFileStream.h"
-#include "ANTLRInputStream.h"
-#include "BailErrorStrategy.h"
-#include "BaseErrorListener.h"
-#include "BufferedTokenStream.h"
-#include "CharStream.h"
-#include "CommonToken.h"
-#include "CommonTokenFactory.h"
-#include "CommonTokenStream.h"
-#include "ConsoleErrorListener.h"
-#include "DefaultErrorStrategy.h"
-#include "DiagnosticErrorListener.h"
-#include "Exceptions.h"
-#include "FailedPredicateException.h"
-#include "InputMismatchException.h"
-#include "IntStream.h"
-#include "InterpreterRuleContext.h"
-#include "Lexer.h"
-#include "LexerInterpreter.h"
-#include "LexerNoViableAltException.h"
-#include "ListTokenSource.h"
-#include "NoViableAltException.h"
-#include "Parser.h"
-#include "ParserInterpreter.h"
-#include "ParserRuleContext.h"
-#include "ProxyErrorListener.h"
-#include "RecognitionException.h"
-#include "Recognizer.h"
-#include "RuleContext.h"
-#include "RuleContextWithAltNum.h"
-#include "RuntimeMetaData.h"
-#include "Token.h"
-#include "TokenFactory.h"
-#include "TokenSource.h"
-#include "TokenStream.h"
-#include "TokenStreamRewriter.h"
-#include "UnbufferedCharStream.h"
-#include "UnbufferedTokenStream.h"
-#include "Vocabulary.h"
-#include "WritableToken.h"
-#include "atn/ATN.h"
-#include "atn/ATNConfig.h"
-#include "atn/ATNConfigSet.h"
-#include "atn/ATNDeserializationOptions.h"
-#include "atn/ATNDeserializer.h"
-#include "atn/ATNSerializer.h"
-#include "atn/ATNSimulator.h"
-#include "atn/ATNState.h"
-#include "atn/ATNType.h"
-#include "atn/AbstractPredicateTransition.h"
-#include "atn/ActionTransition.h"
-#include "atn/AmbiguityInfo.h"
-#include "atn/ArrayPredictionContext.h"
-#include "atn/AtomTransition.h"
-#include "atn/BasicBlockStartState.h"
-#include "atn/BasicState.h"
-#include "atn/BlockEndState.h"
-#include "atn/BlockStartState.h"
-#include "atn/ContextSensitivityInfo.h"
-#include "atn/DecisionEventInfo.h"
-#include "atn/DecisionInfo.h"
-#include "atn/DecisionState.h"
-#include "atn/EmptyPredictionContext.h"
-#include "atn/EpsilonTransition.h"
-#include "atn/ErrorInfo.h"
-#include "atn/LL1Analyzer.h"
-#include "atn/LexerATNConfig.h"
-#include "atn/LexerATNSimulator.h"
-#include "atn/LexerAction.h"
-#include "atn/LexerActionExecutor.h"
-#include "atn/LexerActionType.h"
-#include "atn/LexerChannelAction.h"
-#include "atn/LexerCustomAction.h"
-#include "atn/LexerIndexedCustomAction.h"
-#include "atn/LexerModeAction.h"
-#include "atn/LexerMoreAction.h"
-#include "atn/LexerPopModeAction.h"
-#include "atn/LexerPushModeAction.h"
-#include "atn/LexerSkipAction.h"
-#include "atn/LexerTypeAction.h"
-#include "atn/LookaheadEventInfo.h"
-#include "atn/LoopEndState.h"
-#include "atn/NotSetTransition.h"
-#include "atn/OrderedATNConfigSet.h"
-#include "atn/ParseInfo.h"
-#include "atn/ParserATNSimulator.h"
-#include "atn/PlusBlockStartState.h"
-#include "atn/PlusLoopbackState.h"
-#include "atn/PrecedencePredicateTransition.h"
-#include "atn/PredicateEvalInfo.h"
-#include "atn/PredicateTransition.h"
-#include "atn/PredictionContext.h"
-#include "atn/PredictionMode.h"
-#include "atn/ProfilingATNSimulator.h"
-#include "atn/RangeTransition.h"
-#include "atn/RuleStartState.h"
-#include "atn/RuleStopState.h"
-#include "atn/RuleTransition.h"
-#include "atn/SemanticContext.h"
-#include "atn/SetTransition.h"
-#include "atn/SingletonPredictionContext.h"
-#include "atn/StarBlockStartState.h"
-#include "atn/StarLoopEntryState.h"
-#include "atn/StarLoopbackState.h"
-#include "atn/TokensStartState.h"
-#include "atn/Transition.h"
-#include "atn/WildcardTransition.h"
-#include "dfa/DFA.h"
-#include "dfa/DFASerializer.h"
-#include "dfa/DFAState.h"
-#include "dfa/LexerDFASerializer.h"
-#include "misc/InterpreterDataReader.h"
-#include "misc/Interval.h"
-#include "misc/IntervalSet.h"
-#include "misc/MurmurHash.h"
-#include "misc/Predicate.h"
-#include "support/Any.h"
-#include "support/Arrays.h"
-#include "support/BitSet.h"
-#include "support/CPPUtils.h"
-#include "support/StringUtils.h"
-#include "support/guid.h"
-#include "tree/AbstractParseTreeVisitor.h"
-#include "tree/ErrorNode.h"
-#include "tree/ErrorNodeImpl.h"
-#include "tree/ParseTree.h"
-#include "tree/ParseTreeListener.h"
-#include "tree/ParseTreeProperty.h"
-#include "tree/ParseTreeVisitor.h"
-#include "tree/ParseTreeWalker.h"
-#include "tree/TerminalNode.h"
-#include "tree/TerminalNodeImpl.h"
-#include "tree/Trees.h"
-#include "tree/pattern/Chunk.h"
-#include "tree/pattern/ParseTreeMatch.h"
-#include "tree/pattern/ParseTreePattern.h"
-#include "tree/pattern/ParseTreePatternMatcher.h"
-#include "tree/pattern/RuleTagToken.h"
-#include "tree/pattern/TagChunk.h"
-#include "tree/pattern/TextChunk.h"
-#include "tree/pattern/TokenTagToken.h"
-#include "tree/xpath/XPath.h"
-#include "tree/xpath/XPathElement.h"
-#include "tree/xpath/XPathLexer.h"
-#include "tree/xpath/XPathLexerErrorListener.h"
-#include "tree/xpath/XPathRuleAnywhereElement.h"
-#include "tree/xpath/XPathRuleElement.h"
-#include "tree/xpath/XPathTokenAnywhereElement.h"
-#include "tree/xpath/XPathTokenElement.h"
-#include "tree/xpath/XPathWildcardAnywhereElement.h"
-#include "tree/xpath/XPathWildcardElement.h"
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATN.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATN.cpp
deleted file mode 100644
index d19adfc4d1..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATN.cpp
+++ /dev/null
@@ -1,212 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-#include "Recognizer.h"
-#include "RuleContext.h"
-#include "Token.h"
-#include "atn/ATNType.h"
-#include "atn/DecisionState.h"
-#include "atn/LL1Analyzer.h"
-#include "atn/RuleTransition.h"
-#include "misc/IntervalSet.h"
-#include "support/CPPUtils.h"
-
-#include "atn/ATN.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-ATN::ATN() : ATN(ATNType::LEXER, 0) {}
-
-ATN::ATN(ATN&& other) {
- // All source vectors are implicitly cleared by the moves.
- states = std::move(other.states);
- decisionToState = std::move(other.decisionToState);
- ruleToStartState = std::move(other.ruleToStartState);
- ruleToStopState = std::move(other.ruleToStopState);
- grammarType = std::move(other.grammarType);
- maxTokenType = std::move(other.maxTokenType);
- ruleToTokenType = std::move(other.ruleToTokenType);
- lexerActions = std::move(other.lexerActions);
- modeToStartState = std::move(other.modeToStartState);
-}
-
-ATN::ATN(ATNType grammarType_, size_t maxTokenType_)
- : grammarType(grammarType_), maxTokenType(maxTokenType_) {}
-
-ATN::~ATN() {
- for (ATNState* state : states) {
- delete state;
- }
-}
-
-/**
- * Required to be defined (even though not used) as we have an explicit move
- * assignment operator.
- */
-ATN& ATN::operator=(ATN& other) NOEXCEPT {
- states = other.states;
- decisionToState = other.decisionToState;
- ruleToStartState = other.ruleToStartState;
- ruleToStopState = other.ruleToStopState;
- grammarType = other.grammarType;
- maxTokenType = other.maxTokenType;
- ruleToTokenType = other.ruleToTokenType;
- lexerActions = other.lexerActions;
- modeToStartState = other.modeToStartState;
-
- return *this;
-}
-
-/**
- * Explicit move assignment operator to make this the preferred assignment. With
- * implicit copy/move assignment operators it seems the copy operator is
- * preferred causing trouble when releasing the allocated ATNState instances.
- */
-ATN& ATN::operator=(ATN&& other) NOEXCEPT {
- // All source vectors are implicitly cleared by the moves.
- states = std::move(other.states);
- decisionToState = std::move(other.decisionToState);
- ruleToStartState = std::move(other.ruleToStartState);
- ruleToStopState = std::move(other.ruleToStopState);
- grammarType = std::move(other.grammarType);
- maxTokenType = std::move(other.maxTokenType);
- ruleToTokenType = std::move(other.ruleToTokenType);
- lexerActions = std::move(other.lexerActions);
- modeToStartState = std::move(other.modeToStartState);
-
- return *this;
-}
-
-misc::IntervalSet ATN::nextTokens(ATNState* s, RuleContext* ctx) const {
- LL1Analyzer analyzer(*this);
- return analyzer.LOOK(s, ctx);
-}
-
-misc::IntervalSet const& ATN::nextTokens(ATNState* s) const {
- if (!s->_nextTokenUpdated) {
- std::unique_lock<std::mutex> lock{_mutex};
- if (!s->_nextTokenUpdated) {
- s->_nextTokenWithinRule = nextTokens(s, nullptr);
- s->_nextTokenUpdated = true;
- }
- }
- return s->_nextTokenWithinRule;
-}
-
-void ATN::addState(ATNState* state) {
- if (state != nullptr) {
- // state->atn = this;
- state->stateNumber = static_cast<int>(states.size());
- }
-
- states.push_back(state);
-}
-
-void ATN::removeState(ATNState* state) {
- delete states.at(
- state->stateNumber); // just free mem, don't shift states in list
- states.at(state->stateNumber) = nullptr;
-}
-
-int ATN::defineDecisionState(DecisionState* s) {
- decisionToState.push_back(s);
- s->decision = static_cast<int>(decisionToState.size() - 1);
- return s->decision;
-}
-
-DecisionState* ATN::getDecisionState(size_t decision) const {
- if (!decisionToState.empty()) {
- return decisionToState[decision];
- }
- return nullptr;
-}
-
-size_t ATN::getNumberOfDecisions() const { return decisionToState.size(); }
-
-misc::IntervalSet ATN::getExpectedTokens(size_t stateNumber,
- RuleContext* context) const {
- if (stateNumber == ATNState::INVALID_STATE_NUMBER ||
- stateNumber >= states.size()) {
- throw IllegalArgumentException("Invalid state number.");
- }
-
- RuleContext* ctx = context;
- ATNState* s = states.at(stateNumber);
- misc::IntervalSet following = nextTokens(s);
- if (!following.contains(Token::EPSILON)) {
- return following;
- }
-
- misc::IntervalSet expected;
- expected.addAll(following);
- expected.remove(Token::EPSILON);
- while (ctx && ctx->invokingState != ATNState::INVALID_STATE_NUMBER &&
- following.contains(Token::EPSILON)) {
- ATNState* invokingState = states.at(ctx->invokingState);
- RuleTransition* rt =
- static_cast<RuleTransition*>(invokingState->transitions[0]);
- following = nextTokens(rt->followState);
- expected.addAll(following);
- expected.remove(Token::EPSILON);
-
- if (ctx->parent == nullptr) {
- break;
- }
- ctx = static_cast<RuleContext*>(ctx->parent);
- }
-
- if (following.contains(Token::EPSILON)) {
- expected.add(Token::EOF);
- }
-
- return expected;
-}
-
-std::string ATN::toString() const {
- std::stringstream ss;
- std::string type;
- switch (grammarType) {
- case ATNType::LEXER:
- type = "LEXER ";
- break;
-
- case ATNType::PARSER:
- type = "PARSER ";
- break;
-
- default:
- break;
- }
- ss << "(" << type << "ATN " << std::hex << this << std::dec
- << ") maxTokenType: " << maxTokenType << std::endl;
- ss << "states (" << states.size() << ") {" << std::endl;
-
- size_t index = 0;
- for (auto state : states) {
- if (state == nullptr) {
- ss << " " << index++ << ": nul" << std::endl;
- } else {
- std::string text = state->toString();
- ss << " " << index++ << ": " << indent(text, " ", false) << std::endl;
- }
- }
-
- index = 0;
- for (auto state : decisionToState) {
- if (state == nullptr) {
- ss << " " << index++ << ": nul" << std::endl;
- } else {
- std::string text = state->toString();
- ss << " " << index++ << ": " << indent(text, " ", false) << std::endl;
- }
- }
-
- ss << "}";
-
- return ss.str();
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATN.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATN.h
deleted file mode 100644
index 7af0f931ff..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATN.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "RuleContext.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC ATN {
- public:
- static const size_t INVALID_ALT_NUMBER = 0;
-
- /// Used for runtime deserialization of ATNs from strings.
- ATN();
- ATN(ATN&& other);
- ATN(ATNType grammarType, size_t maxTokenType);
- virtual ~ATN();
-
- std::vector<ATNState*> states;
-
- /// Each subrule/rule is a decision point and we must track them so we
- /// can go back later and build DFA predictors for them. This includes
- /// all the rules, subrules, optional blocks, ()+, ()* etc...
- std::vector<DecisionState*> decisionToState;
-
- /// Maps from rule index to starting state number.
- std::vector<RuleStartState*> ruleToStartState;
-
- /// Maps from rule index to stop state number.
- std::vector<RuleStopState*> ruleToStopState;
-
- /// The type of the ATN.
- ATNType grammarType;
-
- /// The maximum value for any symbol recognized by a transition in the ATN.
- size_t maxTokenType;
-
- /// <summary>
- /// For lexer ATNs, this maps the rule index to the resulting token type.
- /// For parser ATNs, this maps the rule index to the generated bypass token
- /// type if the
- /// <seealso
- /// cref="ATNDeserializationOptions#isGenerateRuleBypassTransitions"/>
- /// deserialization option was specified; otherwise, this is {@code null}.
- /// </summary>
- std::vector<size_t> ruleToTokenType;
-
- /// For lexer ATNs, this is an array of {@link LexerAction} objects which may
- /// be referenced by action transitions in the ATN.
- std::vector<Ref<LexerAction>> lexerActions;
-
- std::vector<TokensStartState*> modeToStartState;
-
- ATN& operator=(ATN& other) NOEXCEPT;
- ATN& operator=(ATN&& other) NOEXCEPT;
-
- /// <summary>
- /// Compute the set of valid tokens that can occur starting in state {@code
- /// s}.
- /// If {@code ctx} is null, the set of tokens will not include what can
- /// follow the rule surrounding {@code s}. In other words, the set will be
- /// restricted to tokens reachable staying within {@code s}'s rule.
- /// </summary>
- virtual misc::IntervalSet nextTokens(ATNState* s, RuleContext* ctx) const;
-
- /// <summary>
- /// Compute the set of valid tokens that can occur starting in {@code s} and
- /// staying in same rule. <seealso cref="Token#EPSILON"/> is in set if we
- /// reach end of rule.
- /// </summary>
- virtual misc::IntervalSet const& nextTokens(ATNState* s) const;
-
- virtual void addState(ATNState* state);
-
- virtual void removeState(ATNState* state);
-
- virtual int defineDecisionState(DecisionState* s);
-
- virtual DecisionState* getDecisionState(size_t decision) const;
-
- virtual size_t getNumberOfDecisions() const;
-
- /// <summary>
- /// Computes the set of input symbols which could follow ATN state number
- /// {@code stateNumber} in the specified full {@code context}. This method
- /// considers the complete parser context, but does not evaluate semantic
- /// predicates (i.e. all predicates encountered during the calculation are
- /// assumed true). If a path in the ATN exists from the starting state to the
- /// <seealso cref="RuleStopState"/> of the outermost context without matching
- /// any symbols, <seealso cref="Token#EOF"/> is added to the returned set.
- /// <p/>
- /// If {@code context} is {@code null}, it is treated as
- /// <seealso cref="ParserRuleContext#EMPTY"/>.
- /// </summary>
- /// <param name="stateNumber"> the ATN state number </param>
- /// <param name="context"> the full parse context </param>
- /// <returns> The set of potentially valid input symbols which could follow
- /// the specified state in the specified context. </returns> <exception
- /// cref="IllegalArgumentException"> if the ATN does not contain a state with
- /// number {@code stateNumber} </exception>
- virtual misc::IntervalSet getExpectedTokens(size_t stateNumber,
- RuleContext* context) const;
-
- std::string toString() const;
-
- private:
- mutable std::mutex _mutex;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNConfig.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNConfig.cpp
deleted file mode 100644
index e0fc9e7c74..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNConfig.cpp
+++ /dev/null
@@ -1,118 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "SemanticContext.h"
-#include "atn/PredictionContext.h"
-#include "misc/MurmurHash.h"
-
-#include "atn/ATNConfig.h"
-
-using namespace antlr4::atn;
-
-const size_t ATNConfig::SUPPRESS_PRECEDENCE_FILTER = 0x40000000;
-
-ATNConfig::ATNConfig(ATNState* state_, size_t alt_,
- Ref<PredictionContext> const& context_)
- : ATNConfig(state_, alt_, context_, SemanticContext::NONE) {}
-
-ATNConfig::ATNConfig(ATNState* state_, size_t alt_,
- Ref<PredictionContext> const& context_,
- Ref<SemanticContext> const& semanticContext_)
- : state(state_),
- alt(alt_),
- context(context_),
- semanticContext(semanticContext_) {
- reachesIntoOuterContext = 0;
-}
-
-ATNConfig::ATNConfig(Ref<ATNConfig> const& c)
- : ATNConfig(c, c->state, c->context, c->semanticContext) {}
-
-ATNConfig::ATNConfig(Ref<ATNConfig> const& c, ATNState* state_)
- : ATNConfig(c, state_, c->context, c->semanticContext) {}
-
-ATNConfig::ATNConfig(Ref<ATNConfig> const& c, ATNState* state,
- Ref<SemanticContext> const& semanticContext)
- : ATNConfig(c, state, c->context, semanticContext) {}
-
-ATNConfig::ATNConfig(Ref<ATNConfig> const& c,
- Ref<SemanticContext> const& semanticContext)
- : ATNConfig(c, c->state, c->context, semanticContext) {}
-
-ATNConfig::ATNConfig(Ref<ATNConfig> const& c, ATNState* state,
- Ref<PredictionContext> const& context)
- : ATNConfig(c, state, context, c->semanticContext) {}
-
-ATNConfig::ATNConfig(Ref<ATNConfig> const& c, ATNState* state,
- Ref<PredictionContext> const& context,
- Ref<SemanticContext> const& semanticContext)
- : state(state),
- alt(c->alt),
- context(context),
- reachesIntoOuterContext(c->reachesIntoOuterContext),
- semanticContext(semanticContext) {}
-
-ATNConfig::~ATNConfig() {}
-
-size_t ATNConfig::hashCode() const {
- size_t hashCode = misc::MurmurHash::initialize(7);
- hashCode = misc::MurmurHash::update(hashCode, state->stateNumber);
- hashCode = misc::MurmurHash::update(hashCode, alt);
- hashCode = misc::MurmurHash::update(hashCode, context);
- hashCode = misc::MurmurHash::update(hashCode, semanticContext);
- hashCode = misc::MurmurHash::finish(hashCode, 4);
- return hashCode;
-}
-
-size_t ATNConfig::getOuterContextDepth() const {
- return reachesIntoOuterContext & ~SUPPRESS_PRECEDENCE_FILTER;
-}
-
-bool ATNConfig::isPrecedenceFilterSuppressed() const {
- return (reachesIntoOuterContext & SUPPRESS_PRECEDENCE_FILTER) != 0;
-}
-
-void ATNConfig::setPrecedenceFilterSuppressed(bool value) {
- if (value) {
- reachesIntoOuterContext |= SUPPRESS_PRECEDENCE_FILTER;
- } else {
- reachesIntoOuterContext &= ~SUPPRESS_PRECEDENCE_FILTER;
- }
-}
-
-bool ATNConfig::operator==(const ATNConfig& other) const {
- return state->stateNumber == other.state->stateNumber && alt == other.alt &&
- ((context == other.context) || (*context == *other.context)) &&
- *semanticContext == *other.semanticContext &&
- isPrecedenceFilterSuppressed() == other.isPrecedenceFilterSuppressed();
-}
-
-bool ATNConfig::operator!=(const ATNConfig& other) const {
- return !operator==(other);
-}
-
-std::string ATNConfig::toString() { return toString(true); }
-
-std::string ATNConfig::toString(bool showAlt) {
- std::stringstream ss;
- ss << "(";
-
- ss << state->toString();
- if (showAlt) {
- ss << "," << alt;
- }
- if (context) {
- ss << ",[" << context->toString() << "]";
- }
- if (semanticContext != nullptr && semanticContext != SemanticContext::NONE) {
- ss << "," << semanticContext.get();
- }
- if (getOuterContextDepth() > 0) {
- ss << ",up=" << getOuterContextDepth();
- }
- ss << ')';
-
- return ss.str();
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNConfig.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNConfig.h
deleted file mode 100644
index 3122b9675e..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNConfig.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "./antlr4-common.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// A tuple: (ATN state, predicted alt, syntactic, semantic context).
-/// The syntactic context is a graph-structured stack node whose
-/// path(s) to the root is the rule invocation(s)
-/// chain used to arrive at the state. The semantic context is
-/// the tree of semantic predicates encountered before reaching
-/// an ATN state.
-/// </summary>
-class ANTLR4CPP_PUBLIC ATNConfig {
- public:
- struct Hasher {
- size_t operator()(ATNConfig const& k) const { return k.hashCode(); }
- };
-
- struct Comparer {
- bool operator()(ATNConfig const& lhs, ATNConfig const& rhs) const {
- return (&lhs == &rhs) || (lhs == rhs);
- }
- };
-
- using Set = std::unordered_set<Ref<ATNConfig>, Hasher, Comparer>;
-
- /// The ATN state associated with this configuration.
- ATNState* state;
-
- /// What alt (or lexer rule) is predicted by this configuration.
- const size_t alt;
-
- /// The stack of invoking states leading to the rule/states associated
- /// with this config. We track only those contexts pushed during
- /// execution of the ATN simulator.
- ///
- /// Can be shared between multiple ANTConfig instances.
- Ref<PredictionContext> context;
-
- /**
- * We cannot execute predicates dependent upon local context unless
- * we know for sure we are in the correct context. Because there is
- * no way to do this efficiently, we simply cannot evaluate
- * dependent predicates unless we are in the rule that initially
- * invokes the ATN simulator.
- *
- * <p>
- * closure() tracks the depth of how far we dip into the outer context:
- * depth > 0. Note that it may not be totally accurate depth since I
- * don't ever decrement. TO_DO: make it a boolean then</p>
- *
- * <p>
- * For memory efficiency, the {@link #isPrecedenceFilterSuppressed} method
- * is also backed by this field. Since the field is publicly accessible, the
- * highest bit which would not cause the value to become negative is used to
- * store this field. This choice minimizes the risk that code which only
- * compares this value to 0 would be affected by the new purpose of the
- * flag. It also ensures the performance of the existing {@link ATNConfig}
- * constructors as well as certain operations like
- * {@link ATNConfigSet#add(ATNConfig, DoubleKeyMap)} method are
- * <em>completely</em> unaffected by the change.</p>
- */
- size_t reachesIntoOuterContext;
-
- /// Can be shared between multiple ATNConfig instances.
- Ref<SemanticContext> semanticContext;
-
- ATNConfig(ATNState* state, size_t alt, Ref<PredictionContext> const& context);
- ATNConfig(ATNState* state, size_t alt, Ref<PredictionContext> const& context,
- Ref<SemanticContext> const& semanticContext);
-
- ATNConfig(Ref<ATNConfig> const& c); // dup
- ATNConfig(Ref<ATNConfig> const& c, ATNState* state);
- ATNConfig(Ref<ATNConfig> const& c, ATNState* state,
- Ref<SemanticContext> const& semanticContext);
- ATNConfig(Ref<ATNConfig> const& c,
- Ref<SemanticContext> const& semanticContext);
- ATNConfig(Ref<ATNConfig> const& c, ATNState* state,
- Ref<PredictionContext> const& context);
- ATNConfig(Ref<ATNConfig> const& c, ATNState* state,
- Ref<PredictionContext> const& context,
- Ref<SemanticContext> const& semanticContext);
-
- ATNConfig(ATNConfig const&) = default;
- virtual ~ATNConfig();
- ATNConfig& operator=(ATNConfig const&) = default;
-
- virtual size_t hashCode() const;
-
- /**
- * This method gets the value of the {@link #reachesIntoOuterContext} field
- * as it existed prior to the introduction of the
- * {@link #isPrecedenceFilterSuppressed} method.
- */
- size_t getOuterContextDepth() const;
- bool isPrecedenceFilterSuppressed() const;
- void setPrecedenceFilterSuppressed(bool value);
-
- /// An ATN configuration is equal to another if both have
- /// the same state, they predict the same alternative, and
- /// syntactic/semantic contexts are the same.
- bool operator==(const ATNConfig& other) const;
- bool operator!=(const ATNConfig& other) const;
-
- virtual std::string toString();
- std::string toString(bool showAlt);
-
- private:
- /**
- * This field stores the bit mask for implementing the
- * {@link #isPrecedenceFilterSuppressed} property as a bit within the
- * existing {@link #reachesIntoOuterContext} field.
- */
- static const size_t SUPPRESS_PRECEDENCE_FILTER;
-};
-
-} // namespace atn
-} // namespace antlr4
-
-// Hash function for ATNConfig.
-
-namespace std {
-using antlr4::atn::ATNConfig;
-
-template <>
-struct hash<ATNConfig> {
- size_t operator()(const ATNConfig& x) const { return x.hashCode(); }
-};
-
-template <>
-struct hash<std::vector<Ref<ATNConfig>>> {
- size_t operator()(const std::vector<Ref<ATNConfig>>& vector) const {
- std::size_t seed = 0;
- for (auto& config : vector) {
- seed ^= config->hashCode() + 0x9e3779b9 + (seed << 6) + (seed >> 2);
- }
- return seed;
- }
-};
-} // namespace std
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNConfigSet.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNConfigSet.cpp
deleted file mode 100644
index fd5e41c475..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNConfigSet.cpp
+++ /dev/null
@@ -1,224 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-#include "atn/ATNConfig.h"
-#include "atn/ATNSimulator.h"
-#include "atn/PredictionContext.h"
-#include "atn/SemanticContext.h"
-#include "support/Arrays.h"
-
-#include "atn/ATNConfigSet.h"
-
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-ATNConfigSet::ATNConfigSet(bool fullCtx) : fullCtx(fullCtx) {
- InitializeInstanceFields();
-}
-
-ATNConfigSet::ATNConfigSet(const Ref<ATNConfigSet>& old)
- : ATNConfigSet(old->fullCtx) {
- addAll(old);
- uniqueAlt = old->uniqueAlt;
- conflictingAlts = old->conflictingAlts;
- hasSemanticContext = old->hasSemanticContext;
- dipsIntoOuterContext = old->dipsIntoOuterContext;
-}
-
-ATNConfigSet::~ATNConfigSet() {}
-
-bool ATNConfigSet::add(const Ref<ATNConfig>& config) {
- return add(config, nullptr);
-}
-
-bool ATNConfigSet::add(const Ref<ATNConfig>& config,
- PredictionContextMergeCache* mergeCache) {
- if (_readonly) {
- throw IllegalStateException("This set is readonly");
- }
- if (config->semanticContext != SemanticContext::NONE) {
- hasSemanticContext = true;
- }
- if (config->getOuterContextDepth() > 0) {
- dipsIntoOuterContext = true;
- }
-
- size_t hash = getHash(config.get());
- ATNConfig* existing = _configLookup[hash];
- if (existing == nullptr) {
- _configLookup[hash] = config.get();
- _cachedHashCode = 0;
- configs.push_back(config); // track order here
-
- return true;
- }
-
- // a previous (s,i,pi,_), merge with it and save result
- bool rootIsWildcard = !fullCtx;
- Ref<PredictionContext> merged = PredictionContext::merge(
- existing->context, config->context, rootIsWildcard, mergeCache);
- // no need to check for existing.context, config.context in cache
- // since only way to create new graphs is "call rule" and here. We
- // cache at both places.
- existing->reachesIntoOuterContext = std::max(
- existing->reachesIntoOuterContext, config->reachesIntoOuterContext);
-
- // make sure to preserve the precedence filter suppression during the merge
- if (config->isPrecedenceFilterSuppressed()) {
- existing->setPrecedenceFilterSuppressed(true);
- }
-
- existing->context = merged; // replace context; no need to alt mapping
-
- return true;
-}
-
-bool ATNConfigSet::addAll(const Ref<ATNConfigSet>& other) {
- for (auto& c : other->configs) {
- add(c);
- }
- return false;
-}
-
-std::vector<ATNState*> ATNConfigSet::getStates() {
- std::vector<ATNState*> states;
- for (auto c : configs) {
- states.push_back(c->state);
- }
- return states;
-}
-
-/**
- * Gets the complete set of represented alternatives for the configuration
- * set.
- *
- * @return the set of represented alternatives in this configuration set
- *
- * @since 4.3
- */
-
-BitSet ATNConfigSet::getAlts() {
- BitSet alts;
- for (ATNConfig config : configs) {
- alts.set(config.alt);
- }
- return alts;
-}
-
-std::vector<Ref<SemanticContext>> ATNConfigSet::getPredicates() {
- std::vector<Ref<SemanticContext>> preds;
- for (auto c : configs) {
- if (c->semanticContext != SemanticContext::NONE) {
- preds.push_back(c->semanticContext);
- }
- }
- return preds;
-}
-
-Ref<ATNConfig> ATNConfigSet::get(size_t i) const { return configs[i]; }
-
-void ATNConfigSet::optimizeConfigs(ATNSimulator* interpreter) {
- if (_readonly) {
- throw IllegalStateException("This set is readonly");
- }
- if (_configLookup.empty()) return;
-
- for (auto& config : configs) {
- config->context = interpreter->getCachedContext(config->context);
- }
-}
-
-bool ATNConfigSet::operator==(const ATNConfigSet& other) {
- if (&other == this) {
- return true;
- }
-
- if (configs.size() != other.configs.size()) return false;
-
- if (fullCtx != other.fullCtx || uniqueAlt != other.uniqueAlt ||
- conflictingAlts != other.conflictingAlts ||
- hasSemanticContext != other.hasSemanticContext ||
- dipsIntoOuterContext !=
- other.dipsIntoOuterContext) // includes stack context
- return false;
-
- return Arrays::equals(configs, other.configs);
-}
-
-size_t ATNConfigSet::hashCode() {
- if (!isReadonly() || _cachedHashCode == 0) {
- _cachedHashCode = 1;
- for (auto& i : configs) {
- _cachedHashCode = 31 * _cachedHashCode +
- i->hashCode(); // Same as Java's list hashCode impl.
- }
- }
-
- return _cachedHashCode;
-}
-
-size_t ATNConfigSet::size() { return configs.size(); }
-
-bool ATNConfigSet::isEmpty() { return configs.empty(); }
-
-void ATNConfigSet::clear() {
- if (_readonly) {
- throw IllegalStateException("This set is readonly");
- }
- configs.clear();
- _cachedHashCode = 0;
- _configLookup.clear();
-}
-
-bool ATNConfigSet::isReadonly() { return _readonly; }
-
-void ATNConfigSet::setReadonly(bool readonly) {
- _readonly = readonly;
- _configLookup.clear();
-}
-
-std::string ATNConfigSet::toString() {
- std::stringstream ss;
- ss << "[";
- for (size_t i = 0; i < configs.size(); i++) {
- ss << configs[i]->toString();
- }
- ss << "]";
-
- if (hasSemanticContext) {
- ss << ",hasSemanticContext = " << hasSemanticContext;
- }
- if (uniqueAlt != ATN::INVALID_ALT_NUMBER) {
- ss << ",uniqueAlt = " << uniqueAlt;
- }
-
- if (conflictingAlts.size() > 0) {
- ss << ",conflictingAlts = ";
- ss << conflictingAlts.toString();
- }
-
- if (dipsIntoOuterContext) {
- ss << ", dipsIntoOuterContext";
- }
- return ss.str();
-}
-
-size_t ATNConfigSet::getHash(ATNConfig* c) {
- size_t hashCode = 7;
- hashCode = 31 * hashCode + c->state->stateNumber;
- hashCode = 31 * hashCode + c->alt;
- hashCode = 31 * hashCode + c->semanticContext->hashCode();
- return hashCode;
-}
-
-void ATNConfigSet::InitializeInstanceFields() {
- uniqueAlt = 0;
- hasSemanticContext = false;
- dipsIntoOuterContext = false;
-
- _readonly = false;
- _cachedHashCode = 0;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNConfigSet.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNConfigSet.h
deleted file mode 100644
index a2f88f5fe0..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNConfigSet.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/PredictionContext.h"
-#include "support/BitSet.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// Specialized set that can track info about the set, with support for
-/// combining similar configurations using a graph-structured stack.
-class ANTLR4CPP_PUBLIC ATNConfigSet {
- public:
- /// Track the elements as they are added to the set; supports get(i)
- std::vector<Ref<ATNConfig>> configs;
-
- // TO_DO: these fields make me pretty uncomfortable but nice to pack up info
- // together, saves recomputation TO_DO: can we track conflicts as they are
- // added to save scanning configs later?
- size_t uniqueAlt;
-
- /** Currently this is only used when we detect SLL conflict; this does
- * not necessarily represent the ambiguous alternatives. In fact,
- * I should also point out that this seems to include predicated alternatives
- * that have predicates that evaluate to false. Computed in
- * computeTargetState().
- */
- antlrcpp::BitSet conflictingAlts;
-
- // Used in parser and lexer. In lexer, it indicates we hit a pred
- // while computing a closure operation. Don't make a DFA state from this.
- bool hasSemanticContext;
- bool dipsIntoOuterContext;
-
- /// Indicates that this configuration set is part of a full context
- /// LL prediction. It will be used to determine how to merge $. With SLL
- /// it's a wildcard whereas it is not for LL context merge.
- const bool fullCtx;
-
- ATNConfigSet(bool fullCtx = true);
- ATNConfigSet(const Ref<ATNConfigSet>& old);
-
- virtual ~ATNConfigSet();
-
- virtual bool add(const Ref<ATNConfig>& config);
-
- /// <summary>
- /// Adding a new config means merging contexts with existing configs for
- /// {@code (s, i, pi, _)}, where {@code s} is the
- /// <seealso cref="ATNConfig#state"/>, {@code i} is the <seealso
- /// cref="ATNConfig#alt"/>, and
- /// {@code pi} is the <seealso cref="ATNConfig#semanticContext"/>. We use
- /// {@code (s,i,pi)} as key.
- /// <p/>
- /// This method updates <seealso cref="#dipsIntoOuterContext"/> and
- /// <seealso cref="#hasSemanticContext"/> when necessary.
- /// </summary>
- virtual bool add(const Ref<ATNConfig>& config,
- PredictionContextMergeCache* mergeCache);
-
- virtual std::vector<ATNState*> getStates();
-
- /**
- * Gets the complete set of represented alternatives for the configuration
- * set.
- *
- * @return the set of represented alternatives in this configuration set
- *
- * @since 4.3
- */
- antlrcpp::BitSet getAlts();
- virtual std::vector<Ref<SemanticContext>> getPredicates();
-
- virtual Ref<ATNConfig> get(size_t i) const;
-
- virtual void optimizeConfigs(ATNSimulator* interpreter);
-
- bool addAll(const Ref<ATNConfigSet>& other);
-
- bool operator==(const ATNConfigSet& other);
- virtual size_t hashCode();
- virtual size_t size();
- virtual bool isEmpty();
- virtual void clear();
- virtual bool isReadonly();
- virtual void setReadonly(bool readonly);
- virtual std::string toString();
-
- protected:
- /// Indicates that the set of configurations is read-only. Do not
- /// allow any code to manipulate the set; DFA states will point at
- /// the sets and they must not change. This does not protect the other
- /// fields; in particular, conflictingAlts is set after
- /// we've made this readonly.
- bool _readonly;
-
- virtual size_t getHash(ATNConfig* c); // Hash differs depending on set type.
-
- private:
- size_t _cachedHashCode;
-
- /// All configs but hashed by (s, i, _, pi) not including context. Wiped out
- /// when we go readonly as this set becomes a DFA state.
- std::unordered_map<size_t, ATNConfig*> _configLookup;
-
- void InitializeInstanceFields();
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.cpp
deleted file mode 100644
index 9314ff6ad9..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATNDeserializationOptions.h"
-
-using namespace antlr4::atn;
-
-ATNDeserializationOptions ATNDeserializationOptions::defaultOptions;
-
-ATNDeserializationOptions::ATNDeserializationOptions() {
- InitializeInstanceFields();
-}
-
-ATNDeserializationOptions::ATNDeserializationOptions(
- ATNDeserializationOptions* options)
- : ATNDeserializationOptions() {
- this->verifyATN = options->verifyATN;
- this->generateRuleBypassTransitions = options->generateRuleBypassTransitions;
-}
-
-ATNDeserializationOptions::~ATNDeserializationOptions() {}
-
-const ATNDeserializationOptions&
-ATNDeserializationOptions::getDefaultOptions() {
- return defaultOptions;
-}
-
-bool ATNDeserializationOptions::isReadOnly() { return readOnly; }
-
-void ATNDeserializationOptions::makeReadOnly() { readOnly = true; }
-
-bool ATNDeserializationOptions::isVerifyATN() { return verifyATN; }
-
-void ATNDeserializationOptions::setVerifyATN(bool verify) {
- throwIfReadOnly();
- verifyATN = verify;
-}
-
-bool ATNDeserializationOptions::isGenerateRuleBypassTransitions() {
- return generateRuleBypassTransitions;
-}
-
-void ATNDeserializationOptions::setGenerateRuleBypassTransitions(
- bool generate) {
- throwIfReadOnly();
- generateRuleBypassTransitions = generate;
-}
-
-void ATNDeserializationOptions::throwIfReadOnly() {
- if (isReadOnly()) {
- throw "The object is read only.";
- }
-}
-
-void ATNDeserializationOptions::InitializeInstanceFields() {
- readOnly = false;
- verifyATN = true;
- generateRuleBypassTransitions = false;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.h
deleted file mode 100644
index 1c2b1023e6..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNDeserializationOptions.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC ATNDeserializationOptions {
- private:
- static ATNDeserializationOptions defaultOptions;
-
- bool readOnly;
- bool verifyATN;
- bool generateRuleBypassTransitions;
-
- public:
- ATNDeserializationOptions();
- ATNDeserializationOptions(ATNDeserializationOptions* options);
- ATNDeserializationOptions(ATNDeserializationOptions const&) = default;
- virtual ~ATNDeserializationOptions();
- ATNDeserializationOptions& operator=(ATNDeserializationOptions const&) =
- default;
-
- static const ATNDeserializationOptions& getDefaultOptions();
-
- bool isReadOnly();
-
- void makeReadOnly();
-
- bool isVerifyATN();
-
- void setVerifyATN(bool verify);
-
- bool isGenerateRuleBypassTransitions();
-
- void setGenerateRuleBypassTransitions(bool generate);
-
- protected:
- virtual void throwIfReadOnly();
-
- private:
- void InitializeInstanceFields();
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp
deleted file mode 100644
index be679ace02..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp
+++ /dev/null
@@ -1,813 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATNDeserializationOptions.h"
-
-#include "atn/ATN.h"
-#include "atn/ATNState.h"
-#include "atn/ATNType.h"
-
-#include "atn/BasicBlockStartState.h"
-#include "atn/BasicState.h"
-#include "atn/BlockEndState.h"
-#include "atn/DecisionState.h"
-#include "atn/EpsilonTransition.h"
-#include "atn/LoopEndState.h"
-#include "atn/PlusBlockStartState.h"
-#include "atn/PlusLoopbackState.h"
-#include "atn/RuleStartState.h"
-#include "atn/RuleStopState.h"
-#include "atn/RuleTransition.h"
-#include "atn/StarLoopEntryState.h"
-#include "atn/StarLoopbackState.h"
-#include "atn/TokensStartState.h"
-
-#include "Token.h"
-#include "atn/ActionTransition.h"
-#include "atn/AtomTransition.h"
-#include "atn/NotSetTransition.h"
-#include "atn/PrecedencePredicateTransition.h"
-#include "atn/PredicateTransition.h"
-#include "atn/RangeTransition.h"
-#include "atn/SetTransition.h"
-#include "atn/StarBlockStartState.h"
-#include "atn/WildcardTransition.h"
-
-#include "Exceptions.h"
-#include "misc/IntervalSet.h"
-#include "support/CPPUtils.h"
-#include "support/StringUtils.h"
-
-#include "atn/LexerChannelAction.h"
-#include "atn/LexerCustomAction.h"
-#include "atn/LexerModeAction.h"
-#include "atn/LexerMoreAction.h"
-#include "atn/LexerPopModeAction.h"
-#include "atn/LexerPushModeAction.h"
-#include "atn/LexerSkipAction.h"
-#include "atn/LexerTypeAction.h"
-
-#include "atn/ATNDeserializer.h"
-
-#include <string>
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-const size_t ATNDeserializer::SERIALIZED_VERSION = 3;
-
-namespace {
-
-uint32_t deserializeInt32(const std::vector<uint16_t>& data, size_t offset) {
- return (uint32_t)data[offset] | ((uint32_t)data[offset + 1] << 16);
-}
-
-ssize_t readUnicodeInt(const std::vector<uint16_t>& data, int& p) {
- return static_cast<ssize_t>(data[p++]);
-}
-
-ssize_t readUnicodeInt32(const std::vector<uint16_t>& data, int& p) {
- auto result = deserializeInt32(data, p);
- p += 2;
- return static_cast<ssize_t>(result);
-}
-
-// We templatize this on the function type so the optimizer can inline
-// the 16- or 32-bit readUnicodeInt/readUnicodeInt32 as needed.
-template <typename F>
-void deserializeSets(const std::vector<uint16_t>& data, int& p,
- std::vector<misc::IntervalSet>& sets, F readUnicode) {
- int nsets = data[p++];
- for (int i = 0; i < nsets; i++) {
- int nintervals = data[p++];
- misc::IntervalSet set;
-
- bool containsEof = data[p++] != 0;
- if (containsEof) {
- set.add(-1);
- }
-
- for (int j = 0; j < nintervals; j++) {
- auto a = readUnicode(data, p);
- auto b = readUnicode(data, p);
- set.add(a, b);
- }
- sets.push_back(set);
- }
-}
-
-} // namespace
-
-ATNDeserializer::ATNDeserializer()
- : ATNDeserializer(ATNDeserializationOptions::getDefaultOptions()) {}
-
-ATNDeserializer::ATNDeserializer(const ATNDeserializationOptions& dso)
- : deserializationOptions(dso) {}
-
-ATNDeserializer::~ATNDeserializer() {}
-
-/**
- * This value should never change. Updates following this version are
- * reflected as change in the unique ID SERIALIZED_UUID.
- */
-Guid ATNDeserializer::ADDED_PRECEDENCE_TRANSITIONS() {
- return Guid("1DA0C57D-6C06-438A-9B27-10BCB3CE0F61");
-}
-
-Guid ATNDeserializer::ADDED_LEXER_ACTIONS() {
- return Guid("AADB8D7E-AEEF-4415-AD2B-8204D6CF042E");
-}
-
-Guid ATNDeserializer::ADDED_UNICODE_SMP() {
- return Guid("59627784-3BE5-417A-B9EB-8131A7286089");
-}
-
-Guid ATNDeserializer::SERIALIZED_UUID() { return ADDED_UNICODE_SMP(); }
-
-Guid ATNDeserializer::BASE_SERIALIZED_UUID() {
- return Guid("33761B2D-78BB-4A43-8B0B-4F5BEE8AACF3");
-}
-
-std::vector<Guid>& ATNDeserializer::SUPPORTED_UUIDS() {
- static std::vector<Guid> singleton = {
- BASE_SERIALIZED_UUID(), ADDED_PRECEDENCE_TRANSITIONS(),
- ADDED_LEXER_ACTIONS(), ADDED_UNICODE_SMP()};
- return singleton;
-}
-
-bool ATNDeserializer::isFeatureSupported(const Guid& feature,
- const Guid& actualUuid) {
- auto featureIterator =
- std::find(SUPPORTED_UUIDS().begin(), SUPPORTED_UUIDS().end(), feature);
- if (featureIterator == SUPPORTED_UUIDS().end()) {
- return false;
- }
- auto actualIterator =
- std::find(SUPPORTED_UUIDS().begin(), SUPPORTED_UUIDS().end(), actualUuid);
- if (actualIterator == SUPPORTED_UUIDS().end()) {
- return false;
- }
-
- return std::distance(featureIterator, actualIterator) >= 0;
-}
-
-ATN ATNDeserializer::deserialize(const std::vector<uint16_t>& input) {
- // Don't adjust the first value since that's the version number.
- std::vector<uint16_t> data(input.size());
- data[0] = input[0];
- for (size_t i = 1; i < input.size(); ++i) {
- data[i] = input[i] - 2;
- }
-
- int p = 0;
- int version = data[p++];
- if (version != SERIALIZED_VERSION) {
- std::string reason = "Could not deserialize ATN with version" +
- std::to_string(version) + "(expected " +
- std::to_string(SERIALIZED_VERSION) + ").";
-
- throw UnsupportedOperationException(reason);
- }
-
- Guid uuid = toUUID(data.data(), p);
- p += 8;
- auto uuidIterator =
- std::find(SUPPORTED_UUIDS().begin(), SUPPORTED_UUIDS().end(), uuid);
- if (uuidIterator == SUPPORTED_UUIDS().end()) {
- std::string reason = "Could not deserialize ATN with UUID " +
- uuid.toString() + " (expected " +
- SERIALIZED_UUID().toString() + " or a legacy UUID).";
-
- throw UnsupportedOperationException(reason);
- }
-
- bool supportsPrecedencePredicates =
- isFeatureSupported(ADDED_PRECEDENCE_TRANSITIONS(), uuid);
- bool supportsLexerActions = isFeatureSupported(ADDED_LEXER_ACTIONS(), uuid);
-
- ATNType grammarType = (ATNType)data[p++];
- size_t maxTokenType = data[p++];
- ATN atn(grammarType, maxTokenType);
-
- //
- // STATES
- //
- std::vector<std::pair<LoopEndState*, size_t>> loopBackStateNumbers;
- std::vector<std::pair<BlockStartState*, size_t>> endStateNumbers;
- size_t nstates = data[p++];
- for (size_t i = 0; i < nstates; i++) {
- size_t stype = data[p++];
- // ignore bad type of states
- if (stype == ATNState::ATN_INVALID_TYPE) {
- atn.addState(nullptr);
- continue;
- }
-
- size_t ruleIndex = data[p++];
- if (ruleIndex == 0xFFFF) {
- ruleIndex = INVALID_INDEX;
- }
-
- ATNState* s = stateFactory(stype, ruleIndex);
- if (stype == ATNState::LOOP_END) { // special case
- int loopBackStateNumber = data[p++];
- loopBackStateNumbers.push_back({(LoopEndState*)s, loopBackStateNumber});
- } else if (is<BlockStartState*>(s)) {
- int endStateNumber = data[p++];
- endStateNumbers.push_back({(BlockStartState*)s, endStateNumber});
- }
- atn.addState(s);
- }
-
- // delay the assignment of loop back and end states until we know all the
- // state instances have been initialized
- for (auto& pair : loopBackStateNumbers) {
- pair.first->loopBackState = atn.states[pair.second];
- }
-
- for (auto& pair : endStateNumbers) {
- pair.first->endState = (BlockEndState*)atn.states[pair.second];
- }
-
- size_t numNonGreedyStates = data[p++];
- for (size_t i = 0; i < numNonGreedyStates; i++) {
- size_t stateNumber = data[p++];
- // The serialized ATN must be specifying the right states, so that the
- // cast below is correct.
- ((DecisionState*)atn.states[stateNumber])->nonGreedy = true;
- }
-
- if (supportsPrecedencePredicates) {
- size_t numPrecedenceStates = data[p++];
- for (size_t i = 0; i < numPrecedenceStates; i++) {
- size_t stateNumber = data[p++];
- ((RuleStartState*)atn.states[stateNumber])->isLeftRecursiveRule = true;
- }
- }
-
- //
- // RULES
- //
- size_t nrules = data[p++];
- for (size_t i = 0; i < nrules; i++) {
- size_t s = data[p++];
- // Also here, the serialized atn must ensure to point to the correct class
- // type.
- RuleStartState* startState = (RuleStartState*)atn.states[s];
- atn.ruleToStartState.push_back(startState);
- if (atn.grammarType == ATNType::LEXER) {
- size_t tokenType = data[p++];
- if (tokenType == 0xFFFF) {
- tokenType = Token::EOF;
- }
-
- atn.ruleToTokenType.push_back(tokenType);
-
- if (!isFeatureSupported(ADDED_LEXER_ACTIONS(), uuid)) {
- // this piece of unused metadata was serialized prior to the
- // addition of LexerAction
- // int actionIndexIgnored = data[p++];
- p++;
- }
- }
- }
-
- atn.ruleToStopState.resize(nrules);
- for (ATNState* state : atn.states) {
- if (!is<RuleStopState*>(state)) {
- continue;
- }
-
- RuleStopState* stopState = static_cast<RuleStopState*>(state);
- atn.ruleToStopState[state->ruleIndex] = stopState;
- atn.ruleToStartState[state->ruleIndex]->stopState = stopState;
- }
-
- //
- // MODES
- //
- size_t nmodes = data[p++];
- for (size_t i = 0; i < nmodes; i++) {
- size_t s = data[p++];
- atn.modeToStartState.push_back(
- static_cast<TokensStartState*>(atn.states[s]));
- }
-
- //
- // SETS
- //
- std::vector<misc::IntervalSet> sets;
-
- // First, deserialize sets with 16-bit arguments <= U+FFFF.
- deserializeSets(data, p, sets, readUnicodeInt);
-
- // Next, if the ATN was serialized with the Unicode SMP feature,
- // deserialize sets with 32-bit arguments <= U+10FFFF.
- if (isFeatureSupported(ADDED_UNICODE_SMP(), uuid)) {
- deserializeSets(data, p, sets, readUnicodeInt32);
- }
-
- //
- // EDGES
- //
- int nedges = data[p++];
- for (int i = 0; i < nedges; i++) {
- size_t src = data[p];
- size_t trg = data[p + 1];
- size_t ttype = data[p + 2];
- size_t arg1 = data[p + 3];
- size_t arg2 = data[p + 4];
- size_t arg3 = data[p + 5];
- Transition* trans =
- edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets);
- ATNState* srcState = atn.states[src];
- srcState->addTransition(trans);
- p += 6;
- }
-
- // edges for rule stop states can be derived, so they aren't serialized
- for (ATNState* state : atn.states) {
- for (size_t i = 0; i < state->transitions.size(); i++) {
- Transition* t = state->transitions[i];
- if (!is<RuleTransition*>(t)) {
- continue;
- }
-
- RuleTransition* ruleTransition = static_cast<RuleTransition*>(t);
- size_t outermostPrecedenceReturn = INVALID_INDEX;
- if (atn.ruleToStartState[ruleTransition->target->ruleIndex]
- ->isLeftRecursiveRule) {
- if (ruleTransition->precedence == 0) {
- outermostPrecedenceReturn = ruleTransition->target->ruleIndex;
- }
- }
-
- EpsilonTransition* returnTransition = new EpsilonTransition(
- ruleTransition->followState,
- outermostPrecedenceReturn); /* mem check: freed in ANTState d-tor */
- atn.ruleToStopState[ruleTransition->target->ruleIndex]->addTransition(
- returnTransition);
- }
- }
-
- for (ATNState* state : atn.states) {
- if (is<BlockStartState*>(state)) {
- BlockStartState* startState = static_cast<BlockStartState*>(state);
-
- // we need to know the end state to set its start state
- if (startState->endState == nullptr) {
- throw IllegalStateException();
- }
-
- // block end states can only be associated to a single block start state
- if (startState->endState->startState != nullptr) {
- throw IllegalStateException();
- }
-
- startState->endState->startState = static_cast<BlockStartState*>(state);
- }
-
- if (is<PlusLoopbackState*>(state)) {
- PlusLoopbackState* loopbackState = static_cast<PlusLoopbackState*>(state);
- for (size_t i = 0; i < loopbackState->transitions.size(); i++) {
- ATNState* target = loopbackState->transitions[i]->target;
- if (is<PlusBlockStartState*>(target)) {
- (static_cast<PlusBlockStartState*>(target))->loopBackState =
- loopbackState;
- }
- }
- } else if (is<StarLoopbackState*>(state)) {
- StarLoopbackState* loopbackState = static_cast<StarLoopbackState*>(state);
- for (size_t i = 0; i < loopbackState->transitions.size(); i++) {
- ATNState* target = loopbackState->transitions[i]->target;
- if (is<StarLoopEntryState*>(target)) {
- (static_cast<StarLoopEntryState*>(target))->loopBackState =
- loopbackState;
- }
- }
- }
- }
-
- //
- // DECISIONS
- //
- size_t ndecisions = data[p++];
- for (size_t i = 1; i <= ndecisions; i++) {
- size_t s = data[p++];
- DecisionState* decState = dynamic_cast<DecisionState*>(atn.states[s]);
- if (decState == nullptr) throw IllegalStateException();
-
- atn.decisionToState.push_back(decState);
- decState->decision = (int)i - 1;
- }
-
- //
- // LEXER ACTIONS
- //
- if (atn.grammarType == ATNType::LEXER) {
- if (supportsLexerActions) {
- atn.lexerActions.resize(data[p++]);
- for (size_t i = 0; i < atn.lexerActions.size(); i++) {
- LexerActionType actionType = (LexerActionType)data[p++];
- int data1 = data[p++];
- if (data1 == 0xFFFF) {
- data1 = -1;
- }
-
- int data2 = data[p++];
- if (data2 == 0xFFFF) {
- data2 = -1;
- }
-
- atn.lexerActions[i] = lexerActionFactory(actionType, data1, data2);
- }
- } else {
- // for compatibility with older serialized ATNs, convert the old
- // serialized action index for action transitions to the new
- // form, which is the index of a LexerCustomAction
- for (ATNState* state : atn.states) {
- for (size_t i = 0; i < state->transitions.size(); i++) {
- Transition* transition = state->transitions[i];
- if (!is<ActionTransition*>(transition)) {
- continue;
- }
-
- size_t ruleIndex =
- static_cast<ActionTransition*>(transition)->ruleIndex;
- size_t actionIndex =
- static_cast<ActionTransition*>(transition)->actionIndex;
- Ref<LexerCustomAction> lexerAction =
- std::make_shared<LexerCustomAction>(ruleIndex, actionIndex);
- state->transitions[i] = new ActionTransition(
- transition->target, ruleIndex, atn.lexerActions.size(),
- false); /* mem-check freed in ATNState d-tor */
- delete transition; // ml: no longer needed since we just replaced it.
- atn.lexerActions.push_back(lexerAction);
- }
- }
- }
- }
-
- markPrecedenceDecisions(atn);
-
- if (deserializationOptions.isVerifyATN()) {
- verifyATN(atn);
- }
-
- if (deserializationOptions.isGenerateRuleBypassTransitions() &&
- atn.grammarType == ATNType::PARSER) {
- atn.ruleToTokenType.resize(atn.ruleToStartState.size());
- for (size_t i = 0; i < atn.ruleToStartState.size(); i++) {
- atn.ruleToTokenType[i] = int(atn.maxTokenType + i + 1);
- }
-
- for (std::vector<RuleStartState*>::size_type i = 0;
- i < atn.ruleToStartState.size(); i++) {
- BasicBlockStartState* bypassStart =
- new BasicBlockStartState(); /* mem check: freed in ATN d-tor */
- bypassStart->ruleIndex = (int)i;
- atn.addState(bypassStart);
-
- BlockEndState* bypassStop =
- new BlockEndState(); /* mem check: freed in ATN d-tor */
- bypassStop->ruleIndex = (int)i;
- atn.addState(bypassStop);
-
- bypassStart->endState = bypassStop;
- atn.defineDecisionState(bypassStart);
-
- bypassStop->startState = bypassStart;
-
- ATNState* endState;
- Transition* excludeTransition = nullptr;
- if (atn.ruleToStartState[i]->isLeftRecursiveRule) {
- // wrap from the beginning of the rule to the StarLoopEntryState
- endState = nullptr;
- for (ATNState* state : atn.states) {
- if (state->ruleIndex != i) {
- continue;
- }
-
- if (!is<StarLoopEntryState*>(state)) {
- continue;
- }
-
- ATNState* maybeLoopEndState =
- state->transitions[state->transitions.size() - 1]->target;
- if (!is<LoopEndState*>(maybeLoopEndState)) {
- continue;
- }
-
- if (maybeLoopEndState->epsilonOnlyTransitions &&
- is<RuleStopState*>(maybeLoopEndState->transitions[0]->target)) {
- endState = state;
- break;
- }
- }
-
- if (endState == nullptr) {
- throw UnsupportedOperationException(
- "Couldn't identify final state of the precedence rule prefix "
- "section.");
- }
-
- excludeTransition = (static_cast<StarLoopEntryState*>(endState))
- ->loopBackState->transitions[0];
- } else {
- endState = atn.ruleToStopState[i];
- }
-
- // all non-excluded transitions that currently target end state need to
- // target blockEnd instead
- for (ATNState* state : atn.states) {
- for (Transition* transition : state->transitions) {
- if (transition == excludeTransition) {
- continue;
- }
-
- if (transition->target == endState) {
- transition->target = bypassStop;
- }
- }
- }
-
- // all transitions leaving the rule start state need to leave blockStart
- // instead
- while (atn.ruleToStartState[i]->transitions.size() > 0) {
- Transition* transition = atn.ruleToStartState[i]->removeTransition(
- atn.ruleToStartState[i]->transitions.size() - 1);
- bypassStart->addTransition(transition);
- }
-
- // link the new states
- atn.ruleToStartState[i]->addTransition(new EpsilonTransition(
- bypassStart)); /* mem check: freed in ATNState d-tor */
- bypassStop->addTransition(new EpsilonTransition(
- endState)); /* mem check: freed in ATNState d-tor */
-
- ATNState* matchState =
- new BasicState(); /* mem check: freed in ATN d-tor */
- atn.addState(matchState);
- matchState->addTransition(new AtomTransition(
- bypassStop,
- atn.ruleToTokenType[i])); /* mem check: freed in ATNState d-tor */
- bypassStart->addTransition(new EpsilonTransition(
- matchState)); /* mem check: freed in ATNState d-tor */
- }
-
- if (deserializationOptions.isVerifyATN()) {
- // reverify after modification
- verifyATN(atn);
- }
- }
-
- return atn;
-}
-
-/**
- * Analyze the {@link StarLoopEntryState} states in the specified ATN to set
- * the {@link StarLoopEntryState#isPrecedenceDecision} field to the
- * correct value.
- *
- * @param atn The ATN.
- */
-void ATNDeserializer::markPrecedenceDecisions(const ATN& atn) {
- for (ATNState* state : atn.states) {
- if (!is<StarLoopEntryState*>(state)) {
- continue;
- }
-
- /* We analyze the ATN to determine if this ATN decision state is the
- * decision for the closure block that determines whether a
- * precedence rule should continue or complete.
- */
- if (atn.ruleToStartState[state->ruleIndex]->isLeftRecursiveRule) {
- ATNState* maybeLoopEndState =
- state->transitions[state->transitions.size() - 1]->target;
- if (is<LoopEndState*>(maybeLoopEndState)) {
- if (maybeLoopEndState->epsilonOnlyTransitions &&
- is<RuleStopState*>(maybeLoopEndState->transitions[0]->target)) {
- static_cast<StarLoopEntryState*>(state)->isPrecedenceDecision = true;
- }
- }
- }
- }
-}
-
-void ATNDeserializer::verifyATN(const ATN& atn) {
- // verify assumptions
- for (ATNState* state : atn.states) {
- if (state == nullptr) {
- continue;
- }
-
- checkCondition(state->epsilonOnlyTransitions ||
- state->transitions.size() <= 1);
-
- if (is<PlusBlockStartState*>(state)) {
- checkCondition(
- (static_cast<PlusBlockStartState*>(state))->loopBackState != nullptr);
- }
-
- if (is<StarLoopEntryState*>(state)) {
- StarLoopEntryState* starLoopEntryState =
- static_cast<StarLoopEntryState*>(state);
- checkCondition(starLoopEntryState->loopBackState != nullptr);
- checkCondition(starLoopEntryState->transitions.size() == 2);
-
- if (is<StarBlockStartState*>(
- starLoopEntryState->transitions[0]->target)) {
- checkCondition(static_cast<LoopEndState*>(
- starLoopEntryState->transitions[1]->target) !=
- nullptr);
- checkCondition(!starLoopEntryState->nonGreedy);
- } else if (is<LoopEndState*>(
- starLoopEntryState->transitions[0]->target)) {
- checkCondition(is<StarBlockStartState*>(
- starLoopEntryState->transitions[1]->target));
- checkCondition(starLoopEntryState->nonGreedy);
- } else {
- throw IllegalStateException();
- }
- }
-
- if (is<StarLoopbackState*>(state)) {
- checkCondition(state->transitions.size() == 1);
- checkCondition(is<StarLoopEntryState*>(state->transitions[0]->target));
- }
-
- if (is<LoopEndState*>(state)) {
- checkCondition((static_cast<LoopEndState*>(state))->loopBackState !=
- nullptr);
- }
-
- if (is<RuleStartState*>(state)) {
- checkCondition((static_cast<RuleStartState*>(state))->stopState !=
- nullptr);
- }
-
- if (is<BlockStartState*>(state)) {
- checkCondition((static_cast<BlockStartState*>(state))->endState !=
- nullptr);
- }
-
- if (is<BlockEndState*>(state)) {
- checkCondition((static_cast<BlockEndState*>(state))->startState !=
- nullptr);
- }
-
- if (is<DecisionState*>(state)) {
- DecisionState* decisionState = static_cast<DecisionState*>(state);
- checkCondition(decisionState->transitions.size() <= 1 ||
- decisionState->decision >= 0);
- } else {
- checkCondition(state->transitions.size() <= 1 ||
- is<RuleStopState*>(state));
- }
- }
-}
-
-void ATNDeserializer::checkCondition(bool condition) {
- checkCondition(condition, "");
-}
-
-void ATNDeserializer::checkCondition(bool condition,
- const std::string& message) {
- if (!condition) {
- throw IllegalStateException(message);
- }
-}
-
-Guid ATNDeserializer::toUUID(const unsigned short* data, size_t offset) {
- return Guid((uint16_t*)data + offset, true);
-}
-
-/* mem check: all created instances are freed in the d-tor of the ATNState they
- * are added to. */
-Transition* ATNDeserializer::edgeFactory(
- const ATN& atn, size_t type, size_t /*src*/, size_t trg, size_t arg1,
- size_t arg2, size_t arg3, const std::vector<misc::IntervalSet>& sets) {
- ATNState* target = atn.states[trg];
- switch (type) {
- case Transition::EPSILON:
- return new EpsilonTransition(target);
- case Transition::RANGE:
- if (arg3 != 0) {
- return new RangeTransition(target, Token::EOF, arg2);
- } else {
- return new RangeTransition(target, arg1, arg2);
- }
- case Transition::RULE:
- return new RuleTransition(static_cast<RuleStartState*>(atn.states[arg1]),
- arg2, (int)arg3, target);
- case Transition::PREDICATE:
- return new PredicateTransition(target, arg1, arg2, arg3 != 0);
- case Transition::PRECEDENCE:
- return new PrecedencePredicateTransition(target, (int)arg1);
- case Transition::ATOM:
- if (arg3 != 0) {
- return new AtomTransition(target, Token::EOF);
- } else {
- return new AtomTransition(target, arg1);
- }
- case Transition::ACTION:
- return new ActionTransition(target, arg1, arg2, arg3 != 0);
- case Transition::SET:
- return new SetTransition(target, sets[arg1]);
- case Transition::NOT_SET:
- return new NotSetTransition(target, sets[arg1]);
- case Transition::WILDCARD:
- return new WildcardTransition(target);
- }
-
- throw IllegalArgumentException("The specified transition type is not valid.");
-}
-
-/* mem check: all created instances are freed in the d-tor of the ATN. */
-ATNState* ATNDeserializer::stateFactory(size_t type, size_t ruleIndex) {
- ATNState* s;
- switch (type) {
- case ATNState::ATN_INVALID_TYPE:
- return nullptr;
- case ATNState::BASIC:
- s = new BasicState();
- break;
- case ATNState::RULE_START:
- s = new RuleStartState();
- break;
- case ATNState::BLOCK_START:
- s = new BasicBlockStartState();
- break;
- case ATNState::PLUS_BLOCK_START:
- s = new PlusBlockStartState();
- break;
- case ATNState::STAR_BLOCK_START:
- s = new StarBlockStartState();
- break;
- case ATNState::TOKEN_START:
- s = new TokensStartState();
- break;
- case ATNState::RULE_STOP:
- s = new RuleStopState();
- break;
- case ATNState::BLOCK_END:
- s = new BlockEndState();
- break;
- case ATNState::STAR_LOOP_BACK:
- s = new StarLoopbackState();
- break;
- case ATNState::STAR_LOOP_ENTRY:
- s = new StarLoopEntryState();
- break;
- case ATNState::PLUS_LOOP_BACK:
- s = new PlusLoopbackState();
- break;
- case ATNState::LOOP_END:
- s = new LoopEndState();
- break;
- default:
- std::string message =
- "The specified state type " + std::to_string(type) + " is not valid.";
- throw IllegalArgumentException(message);
- }
-
- s->ruleIndex = ruleIndex;
- return s;
-}
-
-Ref<LexerAction> ATNDeserializer::lexerActionFactory(LexerActionType type,
- int data1, int data2) {
- switch (type) {
- case LexerActionType::CHANNEL:
- return std::make_shared<LexerChannelAction>(data1);
-
- case LexerActionType::CUSTOM:
- return std::make_shared<LexerCustomAction>(data1, data2);
-
- case LexerActionType::MODE:
- return std::make_shared<LexerModeAction>(data1);
-
- case LexerActionType::MORE:
- return LexerMoreAction::getInstance();
-
- case LexerActionType::POP_MODE:
- return LexerPopModeAction::getInstance();
-
- case LexerActionType::PUSH_MODE:
- return std::make_shared<LexerPushModeAction>(data1);
-
- case LexerActionType::SKIP:
- return LexerSkipAction::getInstance();
-
- case LexerActionType::TYPE:
- return std::make_shared<LexerTypeAction>(data1);
-
- default:
- throw IllegalArgumentException("The specified lexer action type " +
- std::to_string(static_cast<size_t>(type)) +
- " is not valid.");
- }
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNDeserializer.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNDeserializer.h
deleted file mode 100644
index f92d359b47..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNDeserializer.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNDeserializationOptions.h"
-#include "atn/LexerAction.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC ATNDeserializer {
- public:
- static const size_t SERIALIZED_VERSION;
-
- /// This is the current serialized UUID.
- // ml: defined as function to avoid the “static initialization order fiasco”.
- static Guid SERIALIZED_UUID();
-
- ATNDeserializer();
- ATNDeserializer(const ATNDeserializationOptions& dso);
- virtual ~ATNDeserializer();
-
- static Guid toUUID(const unsigned short* data, size_t offset);
-
- virtual ATN deserialize(const std::vector<uint16_t>& input);
- virtual void verifyATN(const ATN& atn);
-
- static void checkCondition(bool condition);
- static void checkCondition(bool condition, const std::string& message);
-
- static Transition* edgeFactory(const ATN& atn, size_t type, size_t src,
- size_t trg, size_t arg1, size_t arg2,
- size_t arg3,
- const std::vector<misc::IntervalSet>& sets);
-
- static ATNState* stateFactory(size_t type, size_t ruleIndex);
-
- protected:
- /// Determines if a particular serialized representation of an ATN supports
- /// a particular feature, identified by the <seealso cref="UUID"/> used for
- /// serializing the ATN at the time the feature was first introduced.
- ///
- /// <param name="feature"> The <seealso cref="UUID"/> marking the first time
- /// the feature was supported in the serialized ATN. </param> <param
- /// name="actualUuid"> The <seealso cref="UUID"/> of the actual serialized ATN
- /// which is currently being deserialized. </param> <returns> {@code true} if
- /// the {@code actualUuid} value represents a serialized ATN at or after the
- /// feature identified by {@code feature} was introduced; otherwise, {@code
- /// false}. </returns>
- virtual bool isFeatureSupported(const Guid& feature, const Guid& actualUuid);
- void markPrecedenceDecisions(const ATN& atn);
- Ref<LexerAction> lexerActionFactory(LexerActionType type, int data1,
- int data2);
-
- private:
- /// This is the earliest supported serialized UUID.
- static Guid BASE_SERIALIZED_UUID();
-
- /// This UUID indicates an extension of <seealso cref="BASE_SERIALIZED_UUID"/>
- /// for the addition of precedence predicates.
- static Guid ADDED_PRECEDENCE_TRANSITIONS();
-
- /**
- * This UUID indicates an extension of ADDED_PRECEDENCE_TRANSITIONS
- * for the addition of lexer actions encoded as a sequence of
- * LexerAction instances.
- */
- static Guid ADDED_LEXER_ACTIONS();
-
- /**
- * This UUID indicates the serialized ATN contains two sets of
- * IntervalSets, where the second set's values are encoded as
- * 32-bit integers to support the full Unicode SMP range up to U+10FFFF.
- */
- static Guid ADDED_UNICODE_SMP();
-
- /// This list contains all of the currently supported UUIDs, ordered by when
- /// the feature first appeared in this branch.
- static std::vector<Guid>& SUPPORTED_UUIDS();
-
- ATNDeserializationOptions deserializationOptions;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp
deleted file mode 100644
index 1cda27ae6e..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNSerializer.cpp
+++ /dev/null
@@ -1,622 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATNState.h"
-#include "atn/ATNType.h"
-#include "atn/BlockEndState.h"
-#include "misc/IntervalSet.h"
-
-#include "Token.h"
-#include "atn/ATN.h"
-#include "atn/BlockStartState.h"
-#include "atn/DecisionState.h"
-#include "atn/LoopEndState.h"
-#include "atn/RuleStartState.h"
-#include "atn/SetTransition.h"
-#include "atn/Transition.h"
-#include "misc/Interval.h"
-
-#include "atn/ATNDeserializer.h"
-#include "atn/ActionTransition.h"
-#include "atn/AtomTransition.h"
-#include "atn/PrecedencePredicateTransition.h"
-#include "atn/PredicateTransition.h"
-#include "atn/RangeTransition.h"
-#include "atn/RuleTransition.h"
-
-#include "Exceptions.h"
-#include "atn/TokensStartState.h"
-#include "support/CPPUtils.h"
-
-#include "atn/LexerChannelAction.h"
-#include "atn/LexerCustomAction.h"
-#include "atn/LexerModeAction.h"
-#include "atn/LexerPushModeAction.h"
-#include "atn/LexerTypeAction.h"
-
-#include "Exceptions.h"
-
-#include "atn/ATNSerializer.h"
-
-using namespace antlrcpp;
-using namespace antlr4::atn;
-
-ATNSerializer::ATNSerializer(ATN* atn) { this->atn = atn; }
-
-ATNSerializer::ATNSerializer(ATN* atn,
- const std::vector<std::string>& tokenNames) {
- this->atn = atn;
- _tokenNames = tokenNames;
-}
-
-ATNSerializer::~ATNSerializer() {}
-
-std::vector<size_t> ATNSerializer::serialize() {
- std::vector<size_t> data;
- data.push_back(ATNDeserializer::SERIALIZED_VERSION);
- serializeUUID(data, ATNDeserializer::SERIALIZED_UUID());
-
- // convert grammar type to ATN const to avoid dependence on ANTLRParser
- data.push_back(static_cast<size_t>(atn->grammarType));
- data.push_back(atn->maxTokenType);
- size_t nedges = 0;
-
- std::unordered_map<misc::IntervalSet, int> setIndices;
- std::vector<misc::IntervalSet> sets;
-
- // dump states, count edges and collect sets while doing so
- std::vector<size_t> nonGreedyStates;
- std::vector<size_t> precedenceStates;
- data.push_back(atn->states.size());
- for (ATNState* s : atn->states) {
- if (s == nullptr) { // might be optimized away
- data.push_back(ATNState::ATN_INVALID_TYPE);
- continue;
- }
-
- size_t stateType = s->getStateType();
- if (is<DecisionState*>(s) && (static_cast<DecisionState*>(s))->nonGreedy) {
- nonGreedyStates.push_back(s->stateNumber);
- }
-
- if (is<RuleStartState*>(s) &&
- (static_cast<RuleStartState*>(s))->isLeftRecursiveRule) {
- precedenceStates.push_back(s->stateNumber);
- }
-
- data.push_back(stateType);
-
- if (s->ruleIndex == INVALID_INDEX) {
- data.push_back(0xFFFF);
- } else {
- data.push_back(s->ruleIndex);
- }
-
- if (s->getStateType() == ATNState::LOOP_END) {
- data.push_back(
- (static_cast<LoopEndState*>(s))->loopBackState->stateNumber);
- } else if (is<BlockStartState*>(s)) {
- data.push_back((static_cast<BlockStartState*>(s))->endState->stateNumber);
- }
-
- if (s->getStateType() != ATNState::RULE_STOP) {
- // the deserializer can trivially derive these edges, so there's no need
- // to serialize them
- nedges += s->transitions.size();
- }
-
- for (size_t i = 0; i < s->transitions.size(); i++) {
- Transition* t = s->transitions[i];
- Transition::SerializationType edgeType = t->getSerializationType();
- if (edgeType == Transition::SET || edgeType == Transition::NOT_SET) {
- SetTransition* st = static_cast<SetTransition*>(t);
- if (setIndices.find(st->set) == setIndices.end()) {
- sets.push_back(st->set);
- setIndices.insert({st->set, (int)sets.size() - 1});
- }
- }
- }
- }
-
- // non-greedy states
- data.push_back(nonGreedyStates.size());
- for (size_t i = 0; i < nonGreedyStates.size(); i++) {
- data.push_back(nonGreedyStates.at(i));
- }
-
- // precedence states
- data.push_back(precedenceStates.size());
- for (size_t i = 0; i < precedenceStates.size(); i++) {
- data.push_back(precedenceStates.at(i));
- }
-
- size_t nrules = atn->ruleToStartState.size();
- data.push_back(nrules);
- for (size_t r = 0; r < nrules; r++) {
- ATNState* ruleStartState = atn->ruleToStartState[r];
- data.push_back(ruleStartState->stateNumber);
- if (atn->grammarType == ATNType::LEXER) {
- if (atn->ruleToTokenType[r] == Token::EOF) {
- data.push_back(0xFFFF);
- } else {
- data.push_back(atn->ruleToTokenType[r]);
- }
- }
- }
-
- size_t nmodes = atn->modeToStartState.size();
- data.push_back(nmodes);
- if (nmodes > 0) {
- for (const auto& modeStartState : atn->modeToStartState) {
- data.push_back(modeStartState->stateNumber);
- }
- }
-
- size_t nsets = sets.size();
- data.push_back(nsets);
- for (auto set : sets) {
- bool containsEof = set.contains(Token::EOF);
- if (containsEof && set.getIntervals().at(0).b == -1) {
- data.push_back(set.getIntervals().size() - 1);
- } else {
- data.push_back(set.getIntervals().size());
- }
-
- data.push_back(containsEof ? 1 : 0);
- for (auto& interval : set.getIntervals()) {
- if (interval.a == -1) {
- if (interval.b == -1) {
- continue;
- } else {
- data.push_back(0);
- }
- } else {
- data.push_back(interval.a);
- }
-
- data.push_back(interval.b);
- }
- }
-
- data.push_back(nedges);
- for (ATNState* s : atn->states) {
- if (s == nullptr) {
- // might be optimized away
- continue;
- }
-
- if (s->getStateType() == ATNState::RULE_STOP) {
- continue;
- }
-
- for (size_t i = 0; i < s->transitions.size(); i++) {
- Transition* t = s->transitions[i];
-
- if (atn->states[t->target->stateNumber] == nullptr) {
- throw IllegalStateException(
- "Cannot serialize a transition to a removed state.");
- }
-
- size_t src = s->stateNumber;
- size_t trg = t->target->stateNumber;
- Transition::SerializationType edgeType = t->getSerializationType();
- size_t arg1 = 0;
- size_t arg2 = 0;
- size_t arg3 = 0;
- switch (edgeType) {
- case Transition::RULE:
- trg = (static_cast<RuleTransition*>(t))->followState->stateNumber;
- arg1 = (static_cast<RuleTransition*>(t))->target->stateNumber;
- arg2 = (static_cast<RuleTransition*>(t))->ruleIndex;
- arg3 = (static_cast<RuleTransition*>(t))->precedence;
- break;
- case Transition::PRECEDENCE: {
- PrecedencePredicateTransition* ppt =
- static_cast<PrecedencePredicateTransition*>(t);
- arg1 = ppt->precedence;
- } break;
- case Transition::PREDICATE: {
- PredicateTransition* pt = static_cast<PredicateTransition*>(t);
- arg1 = pt->ruleIndex;
- arg2 = pt->predIndex;
- arg3 = pt->isCtxDependent ? 1 : 0;
- } break;
- case Transition::RANGE:
- arg1 = (static_cast<RangeTransition*>(t))->from;
- arg2 = (static_cast<RangeTransition*>(t))->to;
- if (arg1 == Token::EOF) {
- arg1 = 0;
- arg3 = 1;
- }
-
- break;
- case Transition::ATOM:
- arg1 = (static_cast<AtomTransition*>(t))->_label;
- if (arg1 == Token::EOF) {
- arg1 = 0;
- arg3 = 1;
- }
-
- break;
- case Transition::ACTION: {
- ActionTransition* at = static_cast<ActionTransition*>(t);
- arg1 = at->ruleIndex;
- arg2 = at->actionIndex;
- if (arg2 == INVALID_INDEX) {
- arg2 = 0xFFFF;
- }
-
- arg3 = at->isCtxDependent ? 1 : 0;
- } break;
- case Transition::SET:
- arg1 = setIndices[(static_cast<SetTransition*>(t))->set];
- break;
-
- case Transition::NOT_SET:
- arg1 = setIndices[(static_cast<SetTransition*>(t))->set];
- break;
-
- default:
- break;
- }
-
- data.push_back(src);
- data.push_back(trg);
- data.push_back(edgeType);
- data.push_back(arg1);
- data.push_back(arg2);
- data.push_back(arg3);
- }
- }
-
- size_t ndecisions = atn->decisionToState.size();
- data.push_back(ndecisions);
- for (DecisionState* decStartState : atn->decisionToState) {
- data.push_back(decStartState->stateNumber);
- }
-
- // LEXER ACTIONS
- if (atn->grammarType == ATNType::LEXER) {
- data.push_back(atn->lexerActions.size());
- for (Ref<LexerAction>& action : atn->lexerActions) {
- data.push_back(static_cast<size_t>(action->getActionType()));
- switch (action->getActionType()) {
- case LexerActionType::CHANNEL: {
- int channel = std::dynamic_pointer_cast<LexerChannelAction>(action)
- ->getChannel();
- data.push_back(channel != -1 ? channel : 0xFFFF);
- data.push_back(0);
- break;
- }
-
- case LexerActionType::CUSTOM: {
- size_t ruleIndex =
- std::dynamic_pointer_cast<LexerCustomAction>(action)
- ->getRuleIndex();
- size_t actionIndex =
- std::dynamic_pointer_cast<LexerCustomAction>(action)
- ->getActionIndex();
- data.push_back(ruleIndex != INVALID_INDEX ? ruleIndex : 0xFFFF);
- data.push_back(actionIndex != INVALID_INDEX ? actionIndex : 0xFFFF);
- break;
- }
-
- case LexerActionType::MODE: {
- int mode =
- std::dynamic_pointer_cast<LexerModeAction>(action)->getMode();
- data.push_back(mode != -1 ? mode : 0xFFFF);
- data.push_back(0);
- break;
- }
-
- case LexerActionType::MORE:
- data.push_back(0);
- data.push_back(0);
- break;
-
- case LexerActionType::POP_MODE:
- data.push_back(0);
- data.push_back(0);
- break;
-
- case LexerActionType::PUSH_MODE: {
- int mode =
- std::dynamic_pointer_cast<LexerPushModeAction>(action)->getMode();
- data.push_back(mode != -1 ? mode : 0xFFFF);
- data.push_back(0);
- break;
- }
-
- case LexerActionType::SKIP:
- data.push_back(0);
- data.push_back(0);
- break;
-
- case LexerActionType::TYPE: {
- int type =
- std::dynamic_pointer_cast<LexerTypeAction>(action)->getType();
- data.push_back(type != -1 ? type : 0xFFFF);
- data.push_back(0);
- break;
- }
-
- default:
- throw IllegalArgumentException(
- "The specified lexer action type " +
- std::to_string(static_cast<size_t>(action->getActionType())) +
- " is not valid.");
- }
- }
- }
-
- // don't adjust the first value since that's the version number
- for (size_t i = 1; i < data.size(); i++) {
- if (data.at(i) > 0xFFFF) {
- throw UnsupportedOperationException(
- "Serialized ATN data element out of range.");
- }
-
- size_t value = (data.at(i) + 2) & 0xFFFF;
- data.at(i) = value;
- }
-
- return data;
-}
-
-//------------------------------------------------------------------------------------------------------------
-
-std::string ATNSerializer::decode(const std::wstring& inpdata) {
- if (inpdata.size() < 10)
- throw IllegalArgumentException("Not enough data to decode");
-
- std::vector<uint16_t> data(inpdata.size());
- data[0] = (uint16_t)inpdata[0];
-
- // Don't adjust the first value since that's the version number.
- for (size_t i = 1; i < inpdata.size(); ++i) {
- data[i] = (uint16_t)inpdata[i] - 2;
- }
-
- std::string buf;
- size_t p = 0;
- size_t version = data[p++];
- if (version != ATNDeserializer::SERIALIZED_VERSION) {
- std::string reason = "Could not deserialize ATN with version " +
- std::to_string(version) + "(expected " +
- std::to_string(ATNDeserializer::SERIALIZED_VERSION) +
- ").";
- throw UnsupportedOperationException("ATN Serializer" + reason);
- }
-
- Guid uuid = ATNDeserializer::toUUID(data.data(), p);
- p += 8;
- if (uuid != ATNDeserializer::SERIALIZED_UUID()) {
- std::string reason = "Could not deserialize ATN with UUID " +
- uuid.toString() + " (expected " +
- ATNDeserializer::SERIALIZED_UUID().toString() + ").";
- throw UnsupportedOperationException("ATN Serializer" + reason);
- }
-
- p++; // skip grammarType
- size_t maxType = data[p++];
- buf.append("max type ").append(std::to_string(maxType)).append("\n");
- size_t nstates = data[p++];
- for (size_t i = 0; i < nstates; i++) {
- size_t stype = data[p++];
- if (stype == ATNState::ATN_INVALID_TYPE) { // ignore bad type of states
- continue;
- }
- size_t ruleIndex = data[p++];
- if (ruleIndex == 0xFFFF) {
- ruleIndex = INVALID_INDEX;
- }
-
- std::string arg = "";
- if (stype == ATNState::LOOP_END) {
- int loopBackStateNumber = data[p++];
- arg = std::string(" ") + std::to_string(loopBackStateNumber);
- } else if (stype == ATNState::PLUS_BLOCK_START ||
- stype == ATNState::STAR_BLOCK_START ||
- stype == ATNState::BLOCK_START) {
- int endStateNumber = data[p++];
- arg = std::string(" ") + std::to_string(endStateNumber);
- }
- buf.append(std::to_string(i))
- .append(":")
- .append(ATNState::serializationNames[stype])
- .append(" ")
- .append(std::to_string(ruleIndex))
- .append(arg)
- .append("\n");
- }
- size_t numNonGreedyStates = data[p++];
- p += numNonGreedyStates; // Instead of that useless loop below.
- /*
- for (int i = 0; i < numNonGreedyStates; i++) {
- int stateNumber = data[p++];
- }
- */
-
- size_t numPrecedenceStates = data[p++];
- p += numPrecedenceStates;
- /*
- for (int i = 0; i < numPrecedenceStates; i++) {
- int stateNumber = data[p++];
- }
- */
-
- size_t nrules = data[p++];
- for (size_t i = 0; i < nrules; i++) {
- size_t s = data[p++];
- if (atn->grammarType == ATNType::LEXER) {
- size_t arg1 = data[p++];
- buf.append("rule ")
- .append(std::to_string(i))
- .append(":")
- .append(std::to_string(s))
- .append(" ")
- .append(std::to_string(arg1))
- .append("\n");
- } else {
- buf.append("rule ")
- .append(std::to_string(i))
- .append(":")
- .append(std::to_string(s))
- .append("\n");
- }
- }
- size_t nmodes = data[p++];
- for (size_t i = 0; i < nmodes; i++) {
- size_t s = data[p++];
- buf.append("mode ")
- .append(std::to_string(i))
- .append(":")
- .append(std::to_string(s))
- .append("\n");
- }
- size_t nsets = data[p++];
- for (size_t i = 0; i < nsets; i++) {
- size_t nintervals = data[p++];
- buf.append(std::to_string(i)).append(":");
- bool containsEof = data[p++] != 0;
- if (containsEof) {
- buf.append(getTokenName(Token::EOF));
- }
-
- for (size_t j = 0; j < nintervals; j++) {
- if (containsEof || j > 0) {
- buf.append(", ");
- }
-
- buf.append(getTokenName(data[p]))
- .append("..")
- .append(getTokenName(data[p + 1]));
- p += 2;
- }
- buf.append("\n");
- }
- size_t nedges = data[p++];
- for (size_t i = 0; i < nedges; i++) {
- size_t src = data[p];
- size_t trg = data[p + 1];
- size_t ttype = data[p + 2];
- size_t arg1 = data[p + 3];
- size_t arg2 = data[p + 4];
- size_t arg3 = data[p + 5];
- buf.append(std::to_string(src))
- .append("->")
- .append(std::to_string(trg))
- .append(" ")
- .append(Transition::serializationNames[ttype])
- .append(" ")
- .append(std::to_string(arg1))
- .append(",")
- .append(std::to_string(arg2))
- .append(",")
- .append(std::to_string(arg3))
- .append("\n");
- p += 6;
- }
- size_t ndecisions = data[p++];
- for (size_t i = 0; i < ndecisions; i++) {
- size_t s = data[p++];
- buf += std::to_string(i) + ":" + std::to_string(s) + "\n";
- }
-
- if (atn->grammarType == ATNType::LEXER) {
- // int lexerActionCount = data[p++];
-
- // p += lexerActionCount * 3; // Instead of useless loop below.
- /*
- for (int i = 0; i < lexerActionCount; i++) {
- LexerActionType actionType = (LexerActionType)data[p++];
- int data1 = data[p++];
- int data2 = data[p++];
- }
- */
- }
-
- return buf;
-}
-
-std::string ATNSerializer::getTokenName(size_t t) {
- if (t == Token::EOF) {
- return "EOF";
- }
-
- if (atn->grammarType == ATNType::LEXER && t <= 0x10FFFF) {
- switch (t) {
- case '\n':
- return "'\\n'";
- case '\r':
- return "'\\r'";
- case '\t':
- return "'\\t'";
- case '\b':
- return "'\\b'";
- case '\f':
- return "'\\f'";
- case '\\':
- return "'\\\\'";
- case '\'':
- return "'\\''";
- default:
- std::string s_hex = antlrcpp::toHexString((int)t);
- if (s_hex >= "0" && s_hex <= "7F" && !iscntrl((int)t)) {
- return "'" + std::to_string(t) + "'";
- }
-
- // turn on the bit above max "\u10FFFF" value so that we pad with zeros
- // then only take last 6 digits
- std::string hex =
- antlrcpp::toHexString((int)t | 0x1000000).substr(1, 6);
- std::string unicodeStr = std::string("'\\u") + hex + std::string("'");
- return unicodeStr;
- }
- }
-
- if (_tokenNames.size() > 0 && t < _tokenNames.size()) {
- return _tokenNames[t];
- }
-
- return std::to_string(t);
-}
-
-std::wstring ATNSerializer::getSerializedAsString(ATN* atn) {
- std::vector<size_t> data = getSerialized(atn);
- std::wstring result;
- for (size_t entry : data) result.push_back((wchar_t)entry);
-
- return result;
-}
-
-std::vector<size_t> ATNSerializer::getSerialized(ATN* atn) {
- return ATNSerializer(atn).serialize();
-}
-
-std::string ATNSerializer::getDecoded(ATN* atn,
- std::vector<std::string>& tokenNames) {
- std::wstring serialized = getSerializedAsString(atn);
- return ATNSerializer(atn, tokenNames).decode(serialized);
-}
-
-void ATNSerializer::serializeUUID(std::vector<size_t>& data, Guid uuid) {
- unsigned int twoBytes = 0;
- bool firstByte = true;
- for (std::vector<unsigned char>::const_reverse_iterator rit = uuid.rbegin();
- rit != uuid.rend(); ++rit) {
- if (firstByte) {
- twoBytes = *rit;
- firstByte = false;
- } else {
- twoBytes |= (*rit << 8);
- data.push_back(twoBytes);
- firstByte = true;
- }
- }
- if (!firstByte)
- throw IllegalArgumentException(
- "The UUID provided is not valid (odd number of bytes).");
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNSerializer.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNSerializer.h
deleted file mode 100644
index d2d9f02a35..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNSerializer.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC ATNSerializer {
- public:
- ATN* atn;
-
- ATNSerializer(ATN* atn);
- ATNSerializer(ATN* atn, const std::vector<std::string>& tokenNames);
- virtual ~ATNSerializer();
-
- /// <summary>
- /// Serialize state descriptors, edge descriptors, and decision->state map
- /// into list of ints:
- ///
- /// grammar-type, (ANTLRParser.LEXER, ...)
- /// max token type,
- /// num states,
- /// state-0-type ruleIndex, state-1-type ruleIndex, ... state-i-type
- /// ruleIndex optional-arg ...
- /// num rules,
- /// rule-1-start-state rule-1-args, rule-2-start-state rule-2-args, ...
- /// (args are token type,actionIndex in lexer else 0,0)
- /// num modes,
- /// mode-0-start-state, mode-1-start-state, ... (parser has 0 modes)
- /// num sets
- /// set-0-interval-count intervals, set-1-interval-count intervals, ...
- /// num total edges,
- /// src, trg, edge-type, edge arg1, optional edge arg2 (present always),
- /// ...
- /// num decisions,
- /// decision-0-start-state, decision-1-start-state, ...
- ///
- /// Convenient to pack into unsigned shorts to make as Java string.
- /// </summary>
- virtual std::vector<size_t> serialize();
-
- virtual std::string decode(const std::wstring& data);
- virtual std::string getTokenName(size_t t);
-
- /// Used by Java target to encode short/int array as chars in string.
- static std::wstring getSerializedAsString(ATN* atn);
- static std::vector<size_t> getSerialized(ATN* atn);
-
- static std::string getDecoded(ATN* atn, std::vector<std::string>& tokenNames);
-
- private:
- std::vector<std::string> _tokenNames;
-
- void serializeUUID(std::vector<size_t>& data, Guid uuid);
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNSimulator.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNSimulator.cpp
deleted file mode 100644
index 540ec95ebf..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNSimulator.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATNConfigSet.h"
-#include "atn/ATNDeserializer.h"
-#include "atn/ATNType.h"
-#include "atn/EmptyPredictionContext.h"
-#include "dfa/DFAState.h"
-
-#include "atn/ATNSimulator.h"
-
-using namespace antlr4;
-using namespace antlr4::dfa;
-using namespace antlr4::atn;
-
-const Ref<DFAState> ATNSimulator::ERROR_STATE =
- std::make_shared<DFAState>(INT32_MAX);
-antlrcpp::SingleWriteMultipleReadLock ATNSimulator::_stateLock;
-antlrcpp::SingleWriteMultipleReadLock ATNSimulator::_edgeLock;
-
-ATNSimulator::ATNSimulator(const ATN& atn,
- PredictionContextCache& sharedContextCache)
- : atn(atn), _sharedContextCache(sharedContextCache) {}
-
-ATNSimulator::~ATNSimulator() {}
-
-void ATNSimulator::clearDFA() {
- throw UnsupportedOperationException(
- "This ATN simulator does not support clearing the DFA.");
-}
-
-PredictionContextCache& ATNSimulator::getSharedContextCache() {
- return _sharedContextCache;
-}
-
-Ref<PredictionContext> ATNSimulator::getCachedContext(
- Ref<PredictionContext> const& context) {
- // This function must only be called with an active state lock, as we are
- // going to change a shared structure.
- std::map<Ref<PredictionContext>, Ref<PredictionContext>> visited;
- return PredictionContext::getCachedContext(context, _sharedContextCache,
- visited);
-}
-
-ATN ATNSimulator::deserialize(const std::vector<uint16_t>& data) {
- ATNDeserializer deserializer;
- return deserializer.deserialize(data);
-}
-
-void ATNSimulator::checkCondition(bool condition) {
- ATNDeserializer::checkCondition(condition);
-}
-
-void ATNSimulator::checkCondition(bool condition, const std::string& message) {
- ATNDeserializer::checkCondition(condition, message);
-}
-
-Transition* ATNSimulator::edgeFactory(
- const ATN& atn, int type, int src, int trg, int arg1, int arg2, int arg3,
- const std::vector<misc::IntervalSet>& sets) {
- return ATNDeserializer::edgeFactory(atn, type, src, trg, arg1, arg2, arg3,
- sets);
-}
-
-ATNState* ATNSimulator::stateFactory(int type, int ruleIndex) {
- return ATNDeserializer::stateFactory(type, ruleIndex);
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNSimulator.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNSimulator.h
deleted file mode 100644
index a43ed7f691..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNSimulator.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATN.h"
-#include "atn/PredictionContext.h"
-#include "misc/IntervalSet.h"
-#include "support/CPPUtils.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC ATNSimulator {
- public:
- /// Must distinguish between missing edge and edge we know leads nowhere.
- static const Ref<dfa::DFAState> ERROR_STATE;
- const ATN& atn;
-
- ATNSimulator(const ATN& atn, PredictionContextCache& sharedContextCache);
- virtual ~ATNSimulator();
-
- virtual void reset() = 0;
-
- /**
- * Clear the DFA cache used by the current instance. Since the DFA cache may
- * be shared by multiple ATN simulators, this method may affect the
- * performance (but not accuracy) of other parsers which are being used
- * concurrently.
- *
- * @throws UnsupportedOperationException if the current instance does not
- * support clearing the DFA.
- *
- * @since 4.3
- */
- virtual void clearDFA();
- virtual PredictionContextCache& getSharedContextCache();
- virtual Ref<PredictionContext> getCachedContext(
- Ref<PredictionContext> const& context);
-
- /// @deprecated Use <seealso cref="ATNDeserializer#deserialize"/> instead.
- static ATN deserialize(const std::vector<uint16_t>& data);
-
- /// @deprecated Use <seealso cref="ATNDeserializer#checkCondition(boolean)"/>
- /// instead.
- static void checkCondition(bool condition);
-
- /// @deprecated Use <seealso cref="ATNDeserializer#checkCondition(boolean,
- /// String)"/> instead.
- static void checkCondition(bool condition, const std::string& message);
-
- /// @deprecated Use <seealso cref="ATNDeserializer#edgeFactory"/> instead.
- static Transition* edgeFactory(const ATN& atn, int type, int src, int trg,
- int arg1, int arg2, int arg3,
- const std::vector<misc::IntervalSet>& sets);
-
- /// @deprecated Use <seealso cref="ATNDeserializer#stateFactory"/> instead.
- static ATNState* stateFactory(int type, int ruleIndex);
-
- protected:
- static antlrcpp::SingleWriteMultipleReadLock
- _stateLock; // Lock for DFA states.
- static antlrcpp::SingleWriteMultipleReadLock
- _edgeLock; // Lock for the sparse edge map in DFA states.
-
- /// <summary>
- /// The context cache maps all PredictionContext objects that are equals()
- /// to a single cached copy. This cache is shared across all contexts
- /// in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet
- /// to use only cached nodes/graphs in addDFAState(). We don't want to
- /// fill this during closure() since there are lots of contexts that
- /// pop up but are not used ever again. It also greatly slows down closure().
- /// <p/>
- /// This cache makes a huge difference in memory and a little bit in speed.
- /// For the Java grammar on java.*, it dropped the memory requirements
- /// at the end from 25M to 16M. We don't store any of the full context
- /// graphs in the DFA because they are limited to local context only,
- /// but apparently there's a lot of repetition there as well. We optimize
- /// the config contexts before storing the config set in the DFA states
- /// by literally rebuilding them with cached subgraphs only.
- /// <p/>
- /// I tried a cache for use during closure operations, that was
- /// whacked after each adaptivePredict(). It cost a little bit
- /// more time I think and doesn't save on the overall footprint
- /// so it's not worth the complexity.
- /// </summary>
- PredictionContextCache& _sharedContextCache;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNState.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNState.cpp
deleted file mode 100644
index 724772b8e0..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNState.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATN.h"
-#include "atn/Transition.h"
-#include "misc/IntervalSet.h"
-#include "support/CPPUtils.h"
-
-#include "atn/ATNState.h"
-
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-ATNState::ATNState() {}
-
-ATNState::~ATNState() {
- for (auto transition : transitions) {
- delete transition;
- }
-}
-
-const std::vector<std::string> ATNState::serializationNames = {
- "INVALID", "BASIC",
- "RULE_START", "BLOCK_START",
- "PLUS_BLOCK_START", "STAR_BLOCK_START",
- "TOKEN_START", "RULE_STOP",
- "BLOCK_END", "STAR_LOOP_BACK",
- "STAR_LOOP_ENTRY", "PLUS_LOOP_BACK",
- "LOOP_END"};
-
-size_t ATNState::hashCode() { return stateNumber; }
-
-bool ATNState::operator==(const ATNState& other) {
- return stateNumber == other.stateNumber;
-}
-
-bool ATNState::isNonGreedyExitState() { return false; }
-
-std::string ATNState::toString() const { return std::to_string(stateNumber); }
-
-void ATNState::addTransition(Transition* e) {
- addTransition(transitions.size(), e);
-}
-
-void ATNState::addTransition(size_t index, Transition* e) {
- for (Transition* transition : transitions)
- if (transition->target->stateNumber == e->target->stateNumber) {
- delete e;
- return;
- }
-
- if (transitions.empty()) {
- epsilonOnlyTransitions = e->isEpsilon();
- } else if (epsilonOnlyTransitions != e->isEpsilon()) {
- std::cerr << "ATN state %d has both epsilon and non-epsilon transitions.\n"
- << stateNumber;
- epsilonOnlyTransitions = false;
- }
-
- transitions.insert(transitions.begin() + index, e);
-}
-
-Transition* ATNState::removeTransition(size_t index) {
- Transition* result = transitions[index];
- transitions.erase(transitions.begin() + index);
- return result;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNState.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNState.h
deleted file mode 100644
index 72f90fa624..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNState.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "misc/IntervalSet.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// The following images show the relation of states and
-/// <seealso cref="ATNState#transitions"/> for various grammar constructs.
-///
-/// <ul>
-///
-/// <li>Solid edges marked with an &#0949; indicate a required
-/// <seealso cref="EpsilonTransition"/>.</li>
-///
-/// <li>Dashed edges indicate locations where any transition derived from
-/// <seealso cref="Transition"/> might appear.</li>
-///
-/// <li>Dashed nodes are place holders for either a sequence of linked
-/// <seealso cref="BasicState"/> states or the inclusion of a block representing
-/// a nested construct in one of the forms below.</li>
-///
-/// <li>Nodes showing multiple outgoing alternatives with a {@code ...} support
-/// any number of alternatives (one or more). Nodes without the {@code ...} only
-/// support the exact number of alternatives shown in the diagram.</li>
-///
-/// </ul>
-///
-/// <h2>Basic Blocks</h2>
-///
-/// <h3>Rule</h3>
-///
-/// <embed src="images/Rule.svg" type="image/svg+xml"/>
-///
-/// <h3>Block of 1 or more alternatives</h3>
-///
-/// <embed src="images/Block.svg" type="image/svg+xml"/>
-///
-/// <h2>Greedy Loops</h2>
-///
-/// <h3>Greedy Closure: {@code (...)*}</h3>
-///
-/// <embed src="images/ClosureGreedy.svg" type="image/svg+xml"/>
-///
-/// <h3>Greedy Positive Closure: {@code (...)+}</h3>
-///
-/// <embed src="images/PositiveClosureGreedy.svg" type="image/svg+xml"/>
-///
-/// <h3>Greedy Optional: {@code (...)?}</h3>
-///
-/// <embed src="images/OptionalGreedy.svg" type="image/svg+xml"/>
-///
-/// <h2>Non-Greedy Loops</h2>
-///
-/// <h3>Non-Greedy Closure: {@code (...)*?}</h3>
-///
-/// <embed src="images/ClosureNonGreedy.svg" type="image/svg+xml"/>
-///
-/// <h3>Non-Greedy Positive Closure: {@code (...)+?}</h3>
-///
-/// <embed src="images/PositiveClosureNonGreedy.svg" type="image/svg+xml"/>
-///
-/// <h3>Non-Greedy Optional: {@code (...)??}</h3>
-///
-/// <embed src="images/OptionalNonGreedy.svg" type="image/svg+xml"/>
-/// </summary>
-class ATN;
-
-class ANTLR4CPP_PUBLIC ATNState {
- public:
- ATNState();
- ATNState(ATNState const&) = delete;
-
- virtual ~ATNState();
-
- ATNState& operator=(ATNState const&) = delete;
-
- static const size_t INITIAL_NUM_TRANSITIONS = 4;
- static const size_t INVALID_STATE_NUMBER =
- static_cast<size_t>(-1); // std::numeric_limits<size_t>::max();
-
- enum {
- ATN_INVALID_TYPE = 0,
- BASIC = 1,
- RULE_START = 2,
- BLOCK_START = 3,
- PLUS_BLOCK_START = 4,
- STAR_BLOCK_START = 5,
- TOKEN_START = 6,
- RULE_STOP = 7,
- BLOCK_END = 8,
- STAR_LOOP_BACK = 9,
- STAR_LOOP_ENTRY = 10,
- PLUS_LOOP_BACK = 11,
- LOOP_END = 12
- };
-
- static const std::vector<std::string> serializationNames;
-
- size_t stateNumber = INVALID_STATE_NUMBER;
- size_t ruleIndex = 0; // at runtime, we don't have Rule objects
- bool epsilonOnlyTransitions = false;
-
- public:
- virtual size_t hashCode();
- bool operator==(const ATNState& other);
-
- /// Track the transitions emanating from this ATN state.
- std::vector<Transition*> transitions;
-
- virtual bool isNonGreedyExitState();
- virtual std::string toString() const;
- virtual void addTransition(Transition* e);
- virtual void addTransition(size_t index, Transition* e);
- virtual Transition* removeTransition(size_t index);
- virtual size_t getStateType() = 0;
-
- private:
- /// Used to cache lookahead during parsing, not used during construction.
-
- misc::IntervalSet _nextTokenWithinRule;
- std::atomic<bool> _nextTokenUpdated{false};
-
- friend class ATN;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNType.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNType.h
deleted file mode 100644
index eadb969988..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ATNType.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// Represents the type of recognizer an ATN applies to.
-enum class ATNType {
- LEXER = 0,
- PARSER = 1,
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AbstractPredicateTransition.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AbstractPredicateTransition.cpp
deleted file mode 100644
index 89ab981b87..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AbstractPredicateTransition.cpp
+++ /dev/null
@@ -1,13 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/AbstractPredicateTransition.h"
-
-using namespace antlr4::atn;
-
-AbstractPredicateTransition::AbstractPredicateTransition(ATNState* target)
- : Transition(target) {}
-
-AbstractPredicateTransition::~AbstractPredicateTransition() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AbstractPredicateTransition.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AbstractPredicateTransition.h
deleted file mode 100644
index 5cf792ab92..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AbstractPredicateTransition.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/Transition.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTState;
-
-class ANTLR4CPP_PUBLIC AbstractPredicateTransition : public Transition {
- public:
- AbstractPredicateTransition(ATNState* target);
- ~AbstractPredicateTransition();
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ActionTransition.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ActionTransition.cpp
deleted file mode 100644
index 688323003d..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ActionTransition.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ActionTransition.h"
-
-using namespace antlr4::atn;
-
-ActionTransition::ActionTransition(ATNState* target, size_t ruleIndex)
- : Transition(target),
- ruleIndex(ruleIndex),
- actionIndex(INVALID_INDEX),
- isCtxDependent(false) {}
-
-ActionTransition::ActionTransition(ATNState* target, size_t ruleIndex,
- size_t actionIndex, bool isCtxDependent)
- : Transition(target),
- ruleIndex(ruleIndex),
- actionIndex(actionIndex),
- isCtxDependent(isCtxDependent) {}
-
-Transition::SerializationType ActionTransition::getSerializationType() const {
- return ACTION;
-}
-
-bool ActionTransition::isEpsilon() const {
- return true; // we are to be ignored by analysis 'cept for predicates
-}
-
-bool ActionTransition::matches(size_t /*symbol*/, size_t /*minVocabSymbol*/,
- size_t /*maxVocabSymbol*/) const {
- return false;
-}
-
-std::string ActionTransition::toString() const {
- return " ACTION " + Transition::toString() +
- " { ruleIndex: " + std::to_string(ruleIndex) +
- ", actionIndex: " + std::to_string(actionIndex) +
- ", isCtxDependent: " + std::to_string(isCtxDependent) + " }";
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ActionTransition.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ActionTransition.h
deleted file mode 100644
index 805a2bedb7..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ActionTransition.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/Transition.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC ActionTransition final : public Transition {
- public:
- const size_t ruleIndex;
- const size_t actionIndex;
- const bool isCtxDependent; // e.g., $i ref in action
-
- ActionTransition(ATNState* target, size_t ruleIndex);
-
- ActionTransition(ATNState* target, size_t ruleIndex, size_t actionIndex,
- bool isCtxDependent);
-
- virtual SerializationType getSerializationType() const override;
-
- virtual bool isEpsilon() const override;
-
- virtual bool matches(size_t symbol, size_t minVocabSymbol,
- size_t maxVocabSymbol) const override;
-
- virtual std::string toString() const override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AmbiguityInfo.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AmbiguityInfo.cpp
deleted file mode 100644
index 6abd181b58..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AmbiguityInfo.cpp
+++ /dev/null
@@ -1,18 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/AmbiguityInfo.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-AmbiguityInfo::AmbiguityInfo(size_t decision, ATNConfigSet* configs,
- const antlrcpp::BitSet& ambigAlts,
- TokenStream* input, size_t startIndex,
- size_t stopIndex, bool fullCtx)
- : DecisionEventInfo(decision, configs, input, startIndex, stopIndex,
- fullCtx) {
- this->ambigAlts = ambigAlts;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AmbiguityInfo.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AmbiguityInfo.h
deleted file mode 100644
index 65990347ae..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AmbiguityInfo.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionEventInfo.h"
-#include "support/BitSet.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// This class represents profiling event information for an ambiguity.
-/// Ambiguities are decisions where a particular input resulted in an SLL
-/// conflict, followed by LL prediction also reaching a conflict state
-/// (indicating a true ambiguity in the grammar).
-///
-/// <para>
-/// This event may be reported during SLL prediction in cases where the
-/// conflicting SLL configuration set provides sufficient information to
-/// determine that the SLL conflict is truly an ambiguity. For example, if none
-/// of the ATN configurations in the conflicting SLL configuration set have
-/// traversed a global follow transition (i.e.
-/// <seealso cref="ATNConfig#reachesIntoOuterContext"/> is 0 for all
-/// configurations), then the result of SLL prediction for that input is known
-/// to be equivalent to the result of LL prediction for that input.</para>
-///
-/// <para>
-/// In some cases, the minimum represented alternative in the conflicting LL
-/// configuration set is not equal to the minimum represented alternative in the
-/// conflicting SLL configuration set. Grammars and inputs which result in this
-/// scenario are unable to use <seealso cref="PredictionMode#SLL"/>, which in
-/// turn means they cannot use the two-stage parsing strategy to improve parsing
-/// performance for that input.</para>
-/// </summary>
-/// <seealso cref= ParserATNSimulator#reportAmbiguity </seealso>
-/// <seealso cref= ANTLRErrorListener#reportAmbiguity
-///
-/// @since 4.3 </seealso>
-class ANTLR4CPP_PUBLIC AmbiguityInfo : public DecisionEventInfo {
- public:
- /// The set of alternative numbers for this decision event that lead to a
- /// valid parse.
- antlrcpp::BitSet ambigAlts;
-
- /// <summary>
- /// Constructs a new instance of the <seealso cref="AmbiguityInfo"/> class
- /// with the specified detailed ambiguity information.
- /// </summary>
- /// <param name="decision"> The decision number </param>
- /// <param name="configs"> The final configuration set identifying the
- /// ambiguous alternatives for the current input </param> <param
- /// name="ambigAlts"> The set of alternatives in the decision that lead to a
- /// valid parse.
- /// The predicted alt is the min(ambigAlts) </param>
- /// <param name="input"> The input token stream </param>
- /// <param name="startIndex"> The start index for the current prediction
- /// </param> <param name="stopIndex"> The index at which the ambiguity was
- /// identified during prediction </param> <param name="fullCtx"> {@code true}
- /// if the ambiguity was identified during LL prediction; otherwise, {@code
- /// false} if the ambiguity was identified during SLL prediction </param>
- AmbiguityInfo(size_t decision, ATNConfigSet* configs,
- const antlrcpp::BitSet& ambigAlts, TokenStream* input,
- size_t startIndex, size_t stopIndex, bool fullCtx);
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ArrayPredictionContext.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ArrayPredictionContext.cpp
deleted file mode 100644
index 9c2797819b..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ArrayPredictionContext.cpp
+++ /dev/null
@@ -1,84 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/SingletonPredictionContext.h"
-#include "support/Arrays.h"
-
-#include "atn/ArrayPredictionContext.h"
-
-using namespace antlr4::atn;
-
-ArrayPredictionContext::ArrayPredictionContext(
- Ref<SingletonPredictionContext> const& a)
- : ArrayPredictionContext({a->parent}, {a->returnState}) {}
-
-ArrayPredictionContext::ArrayPredictionContext(
- std::vector<Ref<PredictionContext>> const& parents_,
- std::vector<size_t> const& returnStates)
- : PredictionContext(calculateHashCode(parents_, returnStates)),
- parents(parents_),
- returnStates(returnStates) {
- assert(parents.size() > 0);
- assert(returnStates.size() > 0);
-}
-
-ArrayPredictionContext::~ArrayPredictionContext() {}
-
-bool ArrayPredictionContext::isEmpty() const {
- // Since EMPTY_RETURN_STATE can only appear in the last position, we don't
- // need to verify that size == 1.
- return returnStates[0] == EMPTY_RETURN_STATE;
-}
-
-size_t ArrayPredictionContext::size() const { return returnStates.size(); }
-
-Ref<PredictionContext> ArrayPredictionContext::getParent(size_t index) const {
- return parents[index];
-}
-
-size_t ArrayPredictionContext::getReturnState(size_t index) const {
- return returnStates[index];
-}
-
-bool ArrayPredictionContext::operator==(PredictionContext const& o) const {
- if (this == &o) {
- return true;
- }
-
- const ArrayPredictionContext* other =
- dynamic_cast<const ArrayPredictionContext*>(&o);
- if (other == nullptr || hashCode() != other->hashCode()) {
- return false; // can't be same if hash is different
- }
-
- return antlrcpp::Arrays::equals(returnStates, other->returnStates) &&
- antlrcpp::Arrays::equals(parents, other->parents);
-}
-
-std::string ArrayPredictionContext::toString() const {
- if (isEmpty()) {
- return "[]";
- }
-
- std::stringstream ss;
- ss << "[";
- for (size_t i = 0; i < returnStates.size(); i++) {
- if (i > 0) {
- ss << ", ";
- }
- if (returnStates[i] == EMPTY_RETURN_STATE) {
- ss << "$";
- continue;
- }
- ss << returnStates[i];
- if (parents[i] != nullptr) {
- ss << " " << parents[i]->toString();
- } else {
- ss << "nul";
- }
- }
- ss << "]";
- return ss.str();
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ArrayPredictionContext.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ArrayPredictionContext.h
deleted file mode 100644
index bd5560d4bf..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ArrayPredictionContext.h
+++ /dev/null
@@ -1,45 +0,0 @@
-
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/PredictionContext.h"
-
-namespace antlr4 {
-namespace atn {
-
-class SingletonPredictionContext;
-
-class ANTLR4CPP_PUBLIC ArrayPredictionContext : public PredictionContext {
- public:
- /// Parent can be empty only if full ctx mode and we make an array
- /// from EMPTY and non-empty. We merge EMPTY by using null parent and
- /// returnState == EMPTY_RETURN_STATE.
- // Also here: we use a strong reference to our parents to avoid having them
- // freed prematurely.
- // See also SinglePredictionContext.
- const std::vector<Ref<PredictionContext>> parents;
-
- /// Sorted for merge, no duplicates; if present, EMPTY_RETURN_STATE is always
- /// last.
- const std::vector<size_t> returnStates;
-
- ArrayPredictionContext(Ref<SingletonPredictionContext> const& a);
- ArrayPredictionContext(std::vector<Ref<PredictionContext>> const& parents_,
- std::vector<size_t> const& returnStates);
- virtual ~ArrayPredictionContext();
-
- virtual bool isEmpty() const override;
- virtual size_t size() const override;
- virtual Ref<PredictionContext> getParent(size_t index) const override;
- virtual size_t getReturnState(size_t index) const override;
- bool operator==(const PredictionContext& o) const override;
-
- virtual std::string toString() const override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AtomTransition.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AtomTransition.cpp
deleted file mode 100644
index 21f54c9149..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AtomTransition.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/Transition.h"
-#include "misc/IntervalSet.h"
-
-#include "atn/AtomTransition.h"
-
-using namespace antlr4::misc;
-using namespace antlr4::atn;
-
-AtomTransition::AtomTransition(ATNState* target, size_t label)
- : Transition(target), _label(label) {}
-
-Transition::SerializationType AtomTransition::getSerializationType() const {
- return ATOM;
-}
-
-IntervalSet AtomTransition::label() const {
- return IntervalSet::of((int)_label);
-}
-
-bool AtomTransition::matches(size_t symbol, size_t /*minVocabSymbol*/,
- size_t /*maxVocabSymbol*/) const {
- return _label == symbol;
-}
-
-std::string AtomTransition::toString() const {
- return "ATOM " + Transition::toString() +
- " { label: " + std::to_string(_label) + " }";
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AtomTransition.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AtomTransition.h
deleted file mode 100644
index 93d80df080..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/AtomTransition.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/Transition.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// TO_DO: make all transitions sets? no, should remove set edges.
-class ANTLR4CPP_PUBLIC AtomTransition final : public Transition {
- public:
- /// The token type or character value; or, signifies special label.
- const size_t _label;
-
- AtomTransition(ATNState* target, size_t label);
-
- virtual SerializationType getSerializationType() const override;
-
- virtual misc::IntervalSet label() const override;
- virtual bool matches(size_t symbol, size_t minVocabSymbol,
- size_t maxVocabSymbol) const override;
-
- virtual std::string toString() const override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BasicBlockStartState.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BasicBlockStartState.cpp
deleted file mode 100644
index 59028137b3..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BasicBlockStartState.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/BasicBlockStartState.h"
-
-using namespace antlr4::atn;
-
-size_t BasicBlockStartState::getStateType() { return BLOCK_START; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BasicBlockStartState.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BasicBlockStartState.h
deleted file mode 100644
index 5af123dacb..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BasicBlockStartState.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-#include "atn/BlockStartState.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC BasicBlockStartState final : public BlockStartState {
- public:
- virtual size_t getStateType() override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BasicState.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BasicState.cpp
deleted file mode 100644
index 99b3523349..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BasicState.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/BasicState.h"
-
-using namespace antlr4::atn;
-
-size_t BasicState::getStateType() { return BASIC; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BasicState.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BasicState.h
deleted file mode 100644
index dc7a2e6d91..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BasicState.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNState.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC BasicState final : public ATNState {
- public:
- virtual size_t getStateType() override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BlockEndState.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BlockEndState.cpp
deleted file mode 100644
index 5ca0b7229a..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BlockEndState.cpp
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/BlockEndState.h"
-
-using namespace antlr4::atn;
-
-BlockEndState::BlockEndState() : startState(nullptr) {}
-
-size_t BlockEndState::getStateType() { return BLOCK_END; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BlockEndState.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BlockEndState.h
deleted file mode 100644
index 371a2071d5..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BlockEndState.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNState.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// Terminal node of a simple {@code (a|b|c)} block.
-class ANTLR4CPP_PUBLIC BlockEndState final : public ATNState {
- public:
- BlockStartState* startState = nullptr;
-
- BlockEndState();
-
- virtual size_t getStateType() override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BlockStartState.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BlockStartState.cpp
deleted file mode 100644
index 881c91efe5..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BlockStartState.cpp
+++ /dev/null
@@ -1,8 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "BlockStartState.h"
-
-antlr4::atn::BlockStartState::~BlockStartState() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BlockStartState.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BlockStartState.h
deleted file mode 100644
index 7a91ca0b49..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/BlockStartState.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionState.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// The start of a regular {@code (...)} block.
-class ANTLR4CPP_PUBLIC BlockStartState : public DecisionState {
- public:
- ~BlockStartState();
- BlockEndState* endState = nullptr;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.cpp
deleted file mode 100644
index 0f600b9016..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ContextSensitivityInfo.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-ContextSensitivityInfo::ContextSensitivityInfo(size_t decision,
- ATNConfigSet* configs,
- TokenStream* input,
- size_t startIndex,
- size_t stopIndex)
- : DecisionEventInfo(decision, configs, input, startIndex, stopIndex, true) {
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.h
deleted file mode 100644
index 595a2a3bbf..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ContextSensitivityInfo.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionEventInfo.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// This class represents profiling event information for a context sensitivity.
-/// Context sensitivities are decisions where a particular input resulted in an
-/// SLL conflict, but LL prediction produced a single unique alternative.
-///
-/// <para>
-/// In some cases, the unique alternative identified by LL prediction is not
-/// equal to the minimum represented alternative in the conflicting SLL
-/// configuration set. Grammars and inputs which result in this scenario are
-/// unable to use <seealso cref="PredictionMode#SLL"/>, which in turn means they
-/// cannot use the two-stage parsing strategy to improve parsing performance for
-/// that input.</para>
-/// </summary>
-/// <seealso cref= ParserATNSimulator#reportContextSensitivity </seealso>
-/// <seealso cref= ANTLRErrorListener#reportContextSensitivity
-///
-/// @since 4.3 </seealso>
-class ANTLR4CPP_PUBLIC ContextSensitivityInfo : public DecisionEventInfo {
- public:
- /// <summary>
- /// Constructs a new instance of the <seealso cref="ContextSensitivityInfo"/>
- /// class with the specified detailed context sensitivity information.
- /// </summary>
- /// <param name="decision"> The decision number </param>
- /// <param name="configs"> The final configuration set containing the unique
- /// alternative identified by full-context prediction </param>
- /// <param name="input"> The input token stream </param>
- /// <param name="startIndex"> The start index for the current prediction
- /// </param> <param name="stopIndex"> The index at which the context
- /// sensitivity was identified during full-context prediction </param>
- ContextSensitivityInfo(size_t decision, ATNConfigSet* configs,
- TokenStream* input, size_t startIndex,
- size_t stopIndex);
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionEventInfo.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionEventInfo.cpp
deleted file mode 100644
index 2637cf1b1f..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionEventInfo.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/DecisionEventInfo.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-DecisionEventInfo::DecisionEventInfo(size_t decision, ATNConfigSet* configs,
- TokenStream* input, size_t startIndex,
- size_t stopIndex, bool fullCtx)
- : decision(decision),
- configs(configs),
- input(input),
- startIndex(startIndex),
- stopIndex(stopIndex),
- fullCtx(fullCtx) {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionEventInfo.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionEventInfo.h
deleted file mode 100644
index 393fd27665..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionEventInfo.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// This is the base class for gathering detailed information about prediction
-/// events which occur during parsing.
-///
-/// Note that we could record the parser call stack at the time this event
-/// occurred but in the presence of left recursive rules, the stack is kind of
-/// meaningless. It's better to look at the individual configurations for their
-/// individual stacks. Of course that is a <seealso cref="PredictionContext"/>
-/// object not a parse tree node and so it does not have information about the
-/// extent (start...stop) of the various subtrees. Examining the stack tops of
-/// all configurations provide the return states for the rule invocations. From
-/// there you can get the enclosing rule.
-///
-/// @since 4.3
-/// </summary>
-class ANTLR4CPP_PUBLIC DecisionEventInfo {
- public:
- /// <summary>
- /// The invoked decision number which this event is related to.
- /// </summary>
- /// <seealso cref= ATN#decisionToState </seealso>
- const size_t decision;
-
- /// <summary>
- /// The configuration set containing additional information relevant to the
- /// prediction state when the current event occurred, or {@code null} if no
- /// additional information is relevant or available.
- /// </summary>
- const ATNConfigSet* configs;
-
- /// <summary>
- /// The input token stream which is being parsed.
- /// </summary>
- const TokenStream* input;
-
- /// <summary>
- /// The token index in the input stream at which the current prediction was
- /// originally invoked.
- /// </summary>
- const size_t startIndex;
-
- /// <summary>
- /// The token index in the input stream at which the current event occurred.
- /// </summary>
- const size_t stopIndex;
-
- /// <summary>
- /// {@code true} if the current event occurred during LL prediction;
- /// otherwise, {@code false} if the input occurred during SLL prediction.
- /// </summary>
- const bool fullCtx;
-
- DecisionEventInfo(size_t decision, ATNConfigSet* configs, TokenStream* input,
- size_t startIndex, size_t stopIndex, bool fullCtx);
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionInfo.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionInfo.cpp
deleted file mode 100644
index 2bf295b085..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionInfo.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ErrorInfo.h"
-#include "atn/LookaheadEventInfo.h"
-
-#include "atn/DecisionInfo.h"
-
-using namespace antlr4::atn;
-
-DecisionInfo::DecisionInfo(size_t decision) : decision(decision) {}
-
-std::string DecisionInfo::toString() const {
- std::stringstream ss;
-
- ss << "{decision=" << decision
- << ", contextSensitivities=" << contextSensitivities.size() << ", errors=";
- ss << errors.size() << ", ambiguities=" << ambiguities.size()
- << ", SLL_lookahead=" << SLL_TotalLook;
- ss << ", SLL_ATNTransitions=" << SLL_ATNTransitions
- << ", SLL_DFATransitions=" << SLL_DFATransitions;
- ss << ", LL_Fallback=" << LL_Fallback << ", LL_lookahead=" << LL_TotalLook
- << ", LL_ATNTransitions=" << LL_ATNTransitions << '}';
-
- return ss.str();
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionInfo.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionInfo.h
deleted file mode 100644
index b33810f992..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionInfo.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/AmbiguityInfo.h"
-#include "atn/ContextSensitivityInfo.h"
-#include "atn/ErrorInfo.h"
-#include "atn/PredicateEvalInfo.h"
-
-namespace antlr4 {
-namespace atn {
-
-class LookaheadEventInfo;
-
-/// <summary>
-/// This class contains profiling gathered for a particular decision.
-///
-/// <para>
-/// Parsing performance in ANTLR 4 is heavily influenced by both static factors
-/// (e.g. the form of the rules in the grammar) and dynamic factors (e.g. the
-/// choice of input and the state of the DFA cache at the time profiling
-/// operations are started). For best results, gather and use aggregate
-/// statistics from a large sample of inputs representing the inputs expected in
-/// production before using the results to make changes in the grammar.</para>
-///
-/// @since 4.3
-/// </summary>
-class ANTLR4CPP_PUBLIC DecisionInfo {
- public:
- /// <summary>
- /// The decision number, which is an index into <seealso
- /// cref="ATN#decisionToState"/>.
- /// </summary>
- const size_t decision;
-
- /// <summary>
- /// The total number of times <seealso
- /// cref="ParserATNSimulator#adaptivePredict"/> was invoked for this decision.
- /// </summary>
- long long invocations = 0;
-
- /// <summary>
- /// The total time spent in <seealso
- /// cref="ParserATNSimulator#adaptivePredict"/> for this decision, in
- /// nanoseconds.
- ///
- /// <para>
- /// The value of this field contains the sum of differential results obtained
- /// by <seealso cref="System#nanoTime()"/>, and is not adjusted to compensate
- /// for JIT and/or garbage collection overhead. For best accuracy, use a
- /// modern JVM implementation that provides precise results from <seealso
- /// cref="System#nanoTime()"/>, and perform profiling in a separate process
- /// which is warmed up by parsing the input prior to profiling. If desired,
- /// call <seealso cref="ATNSimulator#clearDFA"/> to reset the DFA cache to its
- /// initial state before starting the profiling measurement pass.</para>
- /// </summary>
- long long timeInPrediction = 0;
-
- /// <summary>
- /// The sum of the lookahead required for SLL prediction for this decision.
- /// Note that SLL prediction is used before LL prediction for performance
- /// reasons even when <seealso cref="PredictionMode#LL"/> or
- /// <seealso cref="PredictionMode#LL_EXACT_AMBIG_DETECTION"/> is used.
- /// </summary>
- long long SLL_TotalLook = 0;
-
- /// <summary>
- /// Gets the minimum lookahead required for any single SLL prediction to
- /// complete for this decision, by reaching a unique prediction, reaching an
- /// SLL conflict state, or encountering a syntax error.
- /// </summary>
- long long SLL_MinLook = 0;
-
- /// <summary>
- /// Gets the maximum lookahead required for any single SLL prediction to
- /// complete for this decision, by reaching a unique prediction, reaching an
- /// SLL conflict state, or encountering a syntax error.
- /// </summary>
- long long SLL_MaxLook = 0;
-
- /// Gets the <seealso cref="LookaheadEventInfo"/> associated with the event
- /// where the <seealso cref="#SLL_MaxLook"/> value was set.
- Ref<LookaheadEventInfo> SLL_MaxLookEvent;
-
- /// <summary>
- /// The sum of the lookahead required for LL prediction for this decision.
- /// Note that LL prediction is only used when SLL prediction reaches a
- /// conflict state.
- /// </summary>
- long long LL_TotalLook = 0;
-
- /// <summary>
- /// Gets the minimum lookahead required for any single LL prediction to
- /// complete for this decision. An LL prediction completes when the algorithm
- /// reaches a unique prediction, a conflict state (for
- /// <seealso cref="PredictionMode#LL"/>, an ambiguity state (for
- /// <seealso cref="PredictionMode#LL_EXACT_AMBIG_DETECTION"/>, or a syntax
- /// error.
- /// </summary>
- long long LL_MinLook = 0;
-
- /// <summary>
- /// Gets the maximum lookahead required for any single LL prediction to
- /// complete for this decision. An LL prediction completes when the algorithm
- /// reaches a unique prediction, a conflict state (for
- /// <seealso cref="PredictionMode#LL"/>, an ambiguity state (for
- /// <seealso cref="PredictionMode#LL_EXACT_AMBIG_DETECTION"/>, or a syntax
- /// error.
- /// </summary>
- long long LL_MaxLook = 0;
-
- /// <summary>
- /// Gets the <seealso cref="LookaheadEventInfo"/> associated with the event
- /// where the <seealso cref="#LL_MaxLook"/> value was set.
- /// </summary>
- Ref<LookaheadEventInfo> LL_MaxLookEvent;
-
- /// <summary>
- /// A collection of <seealso cref="ContextSensitivityInfo"/> instances
- /// describing the context sensitivities encountered during LL prediction for
- /// this decision.
- /// </summary>
- /// <seealso cref= ContextSensitivityInfo </seealso>
- std::vector<ContextSensitivityInfo> contextSensitivities;
-
- /// <summary>
- /// A collection of <seealso cref="ErrorInfo"/> instances describing the parse
- /// errors identified during calls to <seealso
- /// cref="ParserATNSimulator#adaptivePredict"/> for this decision.
- /// </summary>
- /// <seealso cref= ErrorInfo </seealso>
- std::vector<ErrorInfo> errors;
-
- /// <summary>
- /// A collection of <seealso cref="AmbiguityInfo"/> instances describing the
- /// ambiguities encountered during LL prediction for this decision.
- /// </summary>
- /// <seealso cref= AmbiguityInfo </seealso>
- std::vector<AmbiguityInfo> ambiguities;
-
- /// <summary>
- /// A collection of <seealso cref="PredicateEvalInfo"/> instances describing
- /// the results of evaluating individual predicates during prediction for this
- /// decision.
- /// </summary>
- /// <seealso cref= PredicateEvalInfo </seealso>
- std::vector<PredicateEvalInfo> predicateEvals;
-
- /// <summary>
- /// The total number of ATN transitions required during SLL prediction for
- /// this decision. An ATN transition is determined by the number of times the
- /// DFA does not contain an edge that is required for prediction, resulting
- /// in on-the-fly computation of that edge.
- ///
- /// <para>
- /// If DFA caching of SLL transitions is employed by the implementation, ATN
- /// computation may cache the computed edge for efficient lookup during
- /// future parsing of this decision. Otherwise, the SLL parsing algorithm
- /// will use ATN transitions exclusively.</para>
- /// </summary>
- /// <seealso cref= #SLL_ATNTransitions </seealso>
- /// <seealso cref= ParserATNSimulator#computeTargetState </seealso>
- /// <seealso cref= LexerATNSimulator#computeTargetState </seealso>
- long long SLL_ATNTransitions = 0;
-
- /// <summary>
- /// The total number of DFA transitions required during SLL prediction for
- /// this decision.
- ///
- /// <para>If the ATN simulator implementation does not use DFA caching for SLL
- /// transitions, this value will be 0.</para>
- /// </summary>
- /// <seealso cref= ParserATNSimulator#getExistingTargetState </seealso>
- /// <seealso cref= LexerATNSimulator#getExistingTargetState </seealso>
- long long SLL_DFATransitions = 0;
-
- /// <summary>
- /// Gets the total number of times SLL prediction completed in a conflict
- /// state, resulting in fallback to LL prediction.
- ///
- /// <para>Note that this value is not related to whether or not
- /// <seealso cref="PredictionMode#SLL"/> may be used successfully with a
- /// particular grammar. If the ambiguity resolution algorithm applied to the
- /// SLL conflicts for this decision produce the same result as LL prediction
- /// for this decision, <seealso cref="PredictionMode#SLL"/> would produce the
- /// same overall parsing result as <seealso cref="PredictionMode#LL"/>.</para>
- /// </summary>
- long long LL_Fallback = 0;
-
- /// <summary>
- /// The total number of ATN transitions required during LL prediction for
- /// this decision. An ATN transition is determined by the number of times the
- /// DFA does not contain an edge that is required for prediction, resulting
- /// in on-the-fly computation of that edge.
- ///
- /// <para>
- /// If DFA caching of LL transitions is employed by the implementation, ATN
- /// computation may cache the computed edge for efficient lookup during
- /// future parsing of this decision. Otherwise, the LL parsing algorithm will
- /// use ATN transitions exclusively.</para>
- /// </summary>
- /// <seealso cref= #LL_DFATransitions </seealso>
- /// <seealso cref= ParserATNSimulator#computeTargetState </seealso>
- /// <seealso cref= LexerATNSimulator#computeTargetState </seealso>
- long long LL_ATNTransitions = 0;
-
- /// <summary>
- /// The total number of DFA transitions required during LL prediction for
- /// this decision.
- ///
- /// <para>If the ATN simulator implementation does not use DFA caching for LL
- /// transitions, this value will be 0.</para>
- /// </summary>
- /// <seealso cref= ParserATNSimulator#getExistingTargetState </seealso>
- /// <seealso cref= LexerATNSimulator#getExistingTargetState </seealso>
- long long LL_DFATransitions = 0;
-
- /// <summary>
- /// Constructs a new instance of the <seealso cref="DecisionInfo"/> class to
- /// contain statistics for a particular decision.
- /// </summary>
- /// <param name="decision"> The decision number </param>
- DecisionInfo(size_t decision);
-
- std::string toString() const;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionState.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionState.cpp
deleted file mode 100644
index 924f814a9c..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionState.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/DecisionState.h"
-
-using namespace antlr4::atn;
-
-void DecisionState::InitializeInstanceFields() {
- decision = -1;
- nonGreedy = false;
-}
-
-std::string DecisionState::toString() const {
- return "DECISION " + ATNState::toString();
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionState.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionState.h
deleted file mode 100644
index 4e3e0cbc80..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/DecisionState.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNState.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC DecisionState : public ATNState {
- public:
- int decision;
- bool nonGreedy;
-
- private:
- void InitializeInstanceFields();
-
- public:
- DecisionState() { InitializeInstanceFields(); }
-
- virtual std::string toString() const override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/EmptyPredictionContext.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/EmptyPredictionContext.cpp
deleted file mode 100644
index 5eb7b4252d..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/EmptyPredictionContext.cpp
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/EmptyPredictionContext.h"
-
-using namespace antlr4::atn;
-
-EmptyPredictionContext::EmptyPredictionContext()
- : SingletonPredictionContext(nullptr, EMPTY_RETURN_STATE) {}
-
-bool EmptyPredictionContext::isEmpty() const { return true; }
-
-size_t EmptyPredictionContext::size() const { return 1; }
-
-Ref<PredictionContext> EmptyPredictionContext::getParent(
- size_t /*index*/) const {
- return nullptr;
-}
-
-size_t EmptyPredictionContext::getReturnState(size_t /*index*/) const {
- return returnState;
-}
-
-bool EmptyPredictionContext::operator==(const PredictionContext& o) const {
- return this == &o;
-}
-
-std::string EmptyPredictionContext::toString() const { return "$"; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/EmptyPredictionContext.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/EmptyPredictionContext.h
deleted file mode 100644
index c410921ec9..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/EmptyPredictionContext.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/SingletonPredictionContext.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC EmptyPredictionContext
- : public SingletonPredictionContext {
- public:
- EmptyPredictionContext();
-
- virtual bool isEmpty() const override;
- virtual size_t size() const override;
- virtual Ref<PredictionContext> getParent(size_t index) const override;
- virtual size_t getReturnState(size_t index) const override;
- virtual std::string toString() const override;
-
- virtual bool operator==(const PredictionContext& o) const override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/EpsilonTransition.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/EpsilonTransition.cpp
deleted file mode 100644
index 400f9295a8..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/EpsilonTransition.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/EpsilonTransition.h"
-
-using namespace antlr4::atn;
-
-EpsilonTransition::EpsilonTransition(ATNState* target)
- : EpsilonTransition(target, INVALID_INDEX) {}
-
-EpsilonTransition::EpsilonTransition(ATNState* target,
- size_t outermostPrecedenceReturn)
- : Transition(target),
- _outermostPrecedenceReturn(outermostPrecedenceReturn) {}
-
-size_t EpsilonTransition::outermostPrecedenceReturn() {
- return _outermostPrecedenceReturn;
-}
-
-Transition::SerializationType EpsilonTransition::getSerializationType() const {
- return EPSILON;
-}
-
-bool EpsilonTransition::isEpsilon() const { return true; }
-
-bool EpsilonTransition::matches(size_t /*symbol*/, size_t /*minVocabSymbol*/,
- size_t /*maxVocabSymbol*/) const {
- return false;
-}
-
-std::string EpsilonTransition::toString() const {
- return "EPSILON " + Transition::toString() + " {}";
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/EpsilonTransition.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/EpsilonTransition.h
deleted file mode 100644
index bb21a2eb7c..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/EpsilonTransition.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/Transition.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC EpsilonTransition final : public Transition {
- public:
- EpsilonTransition(ATNState* target);
- EpsilonTransition(ATNState* target, size_t outermostPrecedenceReturn);
-
- /**
- * @return the rule index of a precedence rule for which this transition is
- * returning from, where the precedence value is 0; otherwise, INVALID_INDEX.
- *
- * @see ATNConfig#isPrecedenceFilterSuppressed()
- * @see ParserATNSimulator#applyPrecedenceFilter(ATNConfigSet)
- * @since 4.4.1
- */
- size_t outermostPrecedenceReturn();
- virtual SerializationType getSerializationType() const override;
-
- virtual bool isEpsilon() const override;
- virtual bool matches(size_t symbol, size_t minVocabSymbol,
- size_t maxVocabSymbol) const override;
-
- virtual std::string toString() const override;
-
- private:
- const size_t _outermostPrecedenceReturn; // A rule index.
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ErrorInfo.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ErrorInfo.cpp
deleted file mode 100644
index 6cb37c4354..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ErrorInfo.cpp
+++ /dev/null
@@ -1,16 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATNConfigSet.h"
-
-#include "atn/ErrorInfo.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-ErrorInfo::ErrorInfo(size_t decision, ATNConfigSet* configs, TokenStream* input,
- size_t startIndex, size_t stopIndex, bool fullCtx)
- : DecisionEventInfo(decision, configs, input, startIndex, stopIndex,
- fullCtx) {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ErrorInfo.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ErrorInfo.h
deleted file mode 100644
index 0e7b80e644..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ErrorInfo.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionEventInfo.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// This class represents profiling event information for a syntax error
-/// identified during prediction. Syntax errors occur when the prediction
-/// algorithm is unable to identify an alternative which would lead to a
-/// successful parse.
-/// </summary>
-/// <seealso cref= Parser#notifyErrorListeners(Token, String,
-/// RecognitionException) </seealso> <seealso cref=
-/// ANTLRErrorListener#syntaxError
-///
-/// @since 4.3 </seealso>
-class ANTLR4CPP_PUBLIC ErrorInfo : public DecisionEventInfo {
- public:
- /// <summary>
- /// Constructs a new instance of the <seealso cref="ErrorInfo"/> class with
- /// the specified detailed syntax error information.
- /// </summary>
- /// <param name="decision"> The decision number </param>
- /// <param name="configs"> The final configuration set reached during
- /// prediction prior to reaching the <seealso cref="ATNSimulator#ERROR"/>
- /// state </param> <param name="input"> The input token stream </param> <param
- /// name="startIndex"> The start index for the current prediction </param>
- /// <param name="stopIndex"> The index at which the syntax error was
- /// identified </param> <param name="fullCtx"> {@code true} if the syntax
- /// error was identified during LL prediction; otherwise, {@code false} if the
- /// syntax error was identified during SLL prediction </param>
- ErrorInfo(size_t decision, ATNConfigSet* configs, TokenStream* input,
- size_t startIndex, size_t stopIndex, bool fullCtx);
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp
deleted file mode 100644
index ce823b3f92..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LL1Analyzer.cpp
+++ /dev/null
@@ -1,173 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATNConfig.h"
-#include "atn/AbstractPredicateTransition.h"
-#include "atn/EmptyPredictionContext.h"
-#include "atn/NotSetTransition.h"
-#include "atn/RuleStopState.h"
-#include "atn/RuleTransition.h"
-#include "atn/SingletonPredictionContext.h"
-#include "atn/Transition.h"
-#include "atn/WildcardTransition.h"
-#include "misc/IntervalSet.h"
-
-#include "support/CPPUtils.h"
-
-#include "atn/LL1Analyzer.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-LL1Analyzer::LL1Analyzer(const ATN& atn) : _atn(atn) {}
-
-LL1Analyzer::~LL1Analyzer() {}
-
-std::vector<misc::IntervalSet> LL1Analyzer::getDecisionLookahead(
- ATNState* s) const {
- std::vector<misc::IntervalSet> look;
-
- if (s == nullptr) {
- return look;
- }
-
- look.resize(s->transitions.size()); // Fills all interval sets with defaults.
- for (size_t alt = 0; alt < s->transitions.size(); alt++) {
- bool seeThruPreds = false; // fail to get lookahead upon pred
-
- ATNConfig::Set lookBusy;
- antlrcpp::BitSet callRuleStack;
- _LOOK(s->transitions[alt]->target, nullptr, PredictionContext::EMPTY,
- look[alt], lookBusy, callRuleStack, seeThruPreds, false);
-
- // Wipe out lookahead for this alternative if we found nothing
- // or we had a predicate when we !seeThruPreds
- if (look[alt].size() == 0 || look[alt].contains(HIT_PRED)) {
- look[alt].clear();
- }
- }
- return look;
-}
-
-misc::IntervalSet LL1Analyzer::LOOK(ATNState* s, RuleContext* ctx) const {
- return LOOK(s, nullptr, ctx);
-}
-
-misc::IntervalSet LL1Analyzer::LOOK(ATNState* s, ATNState* stopState,
- RuleContext* ctx) const {
- misc::IntervalSet r;
- bool seeThruPreds = true; // ignore preds; get all lookahead
- Ref<PredictionContext> lookContext =
- ctx != nullptr ? PredictionContext::fromRuleContext(_atn, ctx) : nullptr;
-
- ATNConfig::Set lookBusy;
- antlrcpp::BitSet callRuleStack;
- _LOOK(s, stopState, lookContext, r, lookBusy, callRuleStack, seeThruPreds,
- true);
-
- return r;
-}
-
-void LL1Analyzer::_LOOK(ATNState* s, ATNState* stopState,
- Ref<PredictionContext> const& ctx,
- misc::IntervalSet& look, ATNConfig::Set& lookBusy,
- antlrcpp::BitSet& calledRuleStack, bool seeThruPreds,
- bool addEOF) const {
- Ref<ATNConfig> c = std::make_shared<ATNConfig>(s, 0, ctx);
-
- if (lookBusy.count(c) > 0) // Keep in mind comparison is based on members of
- // the class, not the actual instance.
- return;
-
- lookBusy.insert(c);
-
- // ml: s can never be null, hence no need to check if stopState is != null.
- if (s == stopState) {
- if (ctx == nullptr) {
- look.add(Token::EPSILON);
- return;
- } else if (ctx->isEmpty() && addEOF) {
- look.add(Token::EOF);
- return;
- }
- }
-
- if (s->getStateType() == ATNState::RULE_STOP) {
- if (ctx == nullptr) {
- look.add(Token::EPSILON);
- return;
- } else if (ctx->isEmpty() && addEOF) {
- look.add(Token::EOF);
- return;
- }
-
- if (ctx != PredictionContext::EMPTY) {
- // run thru all possible stack tops in ctx
- for (size_t i = 0; i < ctx->size(); i++) {
- ATNState* returnState = _atn.states[ctx->getReturnState(i)];
-
- bool removed = calledRuleStack.test(returnState->ruleIndex);
- auto onExit = finally([removed, &calledRuleStack, returnState] {
- if (removed) {
- calledRuleStack.set(returnState->ruleIndex);
- }
- });
-
- calledRuleStack[returnState->ruleIndex] = false;
- _LOOK(returnState, stopState, ctx->getParent(i), look, lookBusy,
- calledRuleStack, seeThruPreds, addEOF);
- }
- return;
- }
- }
-
- size_t n = s->transitions.size();
- for (size_t i = 0; i < n; i++) {
- Transition* t = s->transitions[i];
-
- if (t->getSerializationType() == Transition::RULE) {
- if (calledRuleStack[(static_cast<RuleTransition*>(t))
- ->target->ruleIndex]) {
- continue;
- }
-
- Ref<PredictionContext> newContext = SingletonPredictionContext::create(
- ctx, (static_cast<RuleTransition*>(t))->followState->stateNumber);
- auto onExit = finally([t, &calledRuleStack] {
- calledRuleStack[(static_cast<RuleTransition*>(t))->target->ruleIndex] =
- false;
- });
-
- calledRuleStack.set((static_cast<RuleTransition*>(t))->target->ruleIndex);
- _LOOK(t->target, stopState, newContext, look, lookBusy, calledRuleStack,
- seeThruPreds, addEOF);
-
- } else if (is<AbstractPredicateTransition*>(t)) {
- if (seeThruPreds) {
- _LOOK(t->target, stopState, ctx, look, lookBusy, calledRuleStack,
- seeThruPreds, addEOF);
- } else {
- look.add(HIT_PRED);
- }
- } else if (t->isEpsilon()) {
- _LOOK(t->target, stopState, ctx, look, lookBusy, calledRuleStack,
- seeThruPreds, addEOF);
- } else if (t->getSerializationType() == Transition::WILDCARD) {
- look.addAll(misc::IntervalSet::of(
- Token::MIN_USER_TOKEN_TYPE, static_cast<ssize_t>(_atn.maxTokenType)));
- } else {
- misc::IntervalSet set = t->label();
- if (!set.isEmpty()) {
- if (is<NotSetTransition*>(t)) {
- set = set.complement(
- misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE,
- static_cast<ssize_t>(_atn.maxTokenType)));
- }
- look.addAll(set);
- }
- }
- }
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LL1Analyzer.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LL1Analyzer.h
deleted file mode 100644
index a58c16671b..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LL1Analyzer.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Token.h"
-#include "atn/ATNConfig.h"
-#include "atn/PredictionContext.h"
-#include "support/BitSet.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC LL1Analyzer {
- public:
- /// Special value added to the lookahead sets to indicate that we hit
- /// a predicate during analysis if {@code seeThruPreds==false}.
- static const size_t HIT_PRED = Token::INVALID_TYPE;
-
- const atn::ATN& _atn;
-
- LL1Analyzer(const atn::ATN& atn);
- virtual ~LL1Analyzer();
-
- /// <summary>
- /// Calculates the SLL(1) expected lookahead set for each outgoing transition
- /// of an <seealso cref="ATNState"/>. The returned array has one element for
- /// each outgoing transition in {@code s}. If the closure from transition
- /// <em>i</em> leads to a semantic predicate before matching a symbol, the
- /// element at index <em>i</em> of the result will be {@code null}.
- /// </summary>
- /// <param name="s"> the ATN state </param>
- /// <returns> the expected symbols for each outgoing transition of {@code s}.
- /// </returns>
- virtual std::vector<misc::IntervalSet> getDecisionLookahead(
- ATNState* s) const;
-
- /// <summary>
- /// Compute set of tokens that can follow {@code s} in the ATN in the
- /// specified {@code ctx}.
- /// <p/>
- /// If {@code ctx} is {@code null} and the end of the rule containing
- /// {@code s} is reached, <seealso cref="Token#EPSILON"/> is added to the
- /// result set. If {@code ctx} is not {@code null} and the end of the
- /// outermost rule is reached, <seealso cref="Token#EOF"/> is added to the
- /// result set.
- /// </summary>
- /// <param name="s"> the ATN state </param>
- /// <param name="ctx"> the complete parser context, or {@code null} if the
- /// context should be ignored
- /// </param>
- /// <returns> The set of tokens that can follow {@code s} in the ATN in the
- /// specified {@code ctx}. </returns>
- virtual misc::IntervalSet LOOK(ATNState* s, RuleContext* ctx) const;
-
- /// <summary>
- /// Compute set of tokens that can follow {@code s} in the ATN in the
- /// specified {@code ctx}.
- /// <p/>
- /// If {@code ctx} is {@code null} and the end of the rule containing
- /// {@code s} is reached, <seealso cref="Token#EPSILON"/> is added to the
- /// result set. If {@code ctx} is not {@code null} and the end of the
- /// outermost rule is reached, <seealso cref="Token#EOF"/> is added to the
- /// result set.
- /// </summary>
- /// <param name="s"> the ATN state </param>
- /// <param name="stopState"> the ATN state to stop at. This can be a
- /// <seealso cref="BlockEndState"/> to detect epsilon paths through a closure.
- /// </param> <param name="ctx"> the complete parser context, or {@code null}
- /// if the context should be ignored
- /// </param>
- /// <returns> The set of tokens that can follow {@code s} in the ATN in the
- /// specified {@code ctx}. </returns>
- virtual misc::IntervalSet LOOK(ATNState* s, ATNState* stopState,
- RuleContext* ctx) const;
-
- /// <summary>
- /// Compute set of tokens that can follow {@code s} in the ATN in the
- /// specified {@code ctx}.
- /// <p/>
- /// If {@code ctx} is {@code null} and {@code stopState} or the end of the
- /// rule containing {@code s} is reached, <seealso cref="Token#EPSILON"/> is
- /// added to the result set. If {@code ctx} is not {@code null} and {@code
- /// addEOF} is
- /// {@code true} and {@code stopState} or the end of the outermost rule is
- /// reached, <seealso cref="Token#EOF"/> is added to the result set.
- /// </summary>
- /// <param name="s"> the ATN state. </param>
- /// <param name="stopState"> the ATN state to stop at. This can be a
- /// <seealso cref="BlockEndState"/> to detect epsilon paths through a closure.
- /// </param> <param name="ctx"> The outer context, or {@code null} if the
- /// outer context should not be used. </param> <param name="look"> The result
- /// lookahead set. </param> <param name="lookBusy"> A set used for preventing
- /// epsilon closures in the ATN from causing a stack overflow. Outside code
- /// should pass
- /// {@code new HashSet<ATNConfig>} for this argument. </param>
- /// <param name="calledRuleStack"> A set used for preventing left recursion in
- /// the ATN from causing a stack overflow. Outside code should pass
- /// {@code new BitSet()} for this argument. </param>
- /// <param name="seeThruPreds"> {@code true} to true semantic predicates as
- /// implicitly {@code true} and "see through them", otherwise {@code false}
- /// to treat semantic predicates as opaque and add <seealso cref="#HIT_PRED"/>
- /// to the result if one is encountered. </param> <param name="addEOF"> Add
- /// <seealso cref="Token#EOF"/> to the result if the end of the outermost
- /// context is reached. This parameter has no effect if {@code ctx} is {@code
- /// null}. </param>
- protected:
- virtual void _LOOK(ATNState* s, ATNState* stopState,
- Ref<PredictionContext> const& ctx, misc::IntervalSet& look,
- ATNConfig::Set& lookBusy,
- antlrcpp::BitSet& calledRuleStack, bool seeThruPreds,
- bool addEOF) const;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerATNConfig.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerATNConfig.cpp
deleted file mode 100644
index 8ec9aa39a5..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerATNConfig.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "SemanticContext.h"
-#include "atn/DecisionState.h"
-#include "atn/LexerActionExecutor.h"
-#include "atn/PredictionContext.h"
-#include "misc/MurmurHash.h"
-
-#include "support/CPPUtils.h"
-
-#include "atn/LexerATNConfig.h"
-
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-LexerATNConfig::LexerATNConfig(ATNState* state, int alt,
- Ref<PredictionContext> const& context)
- : ATNConfig(state, alt, context, SemanticContext::NONE),
- _passedThroughNonGreedyDecision(false) {}
-
-LexerATNConfig::LexerATNConfig(
- ATNState* state, int alt, Ref<PredictionContext> const& context,
- Ref<LexerActionExecutor> const& lexerActionExecutor)
- : ATNConfig(state, alt, context, SemanticContext::NONE),
- _lexerActionExecutor(lexerActionExecutor),
- _passedThroughNonGreedyDecision(false) {}
-
-LexerATNConfig::LexerATNConfig(Ref<LexerATNConfig> const& c, ATNState* state)
- : ATNConfig(c, state, c->context, c->semanticContext),
- _lexerActionExecutor(c->_lexerActionExecutor),
- _passedThroughNonGreedyDecision(checkNonGreedyDecision(c, state)) {}
-
-LexerATNConfig::LexerATNConfig(
- Ref<LexerATNConfig> const& c, ATNState* state,
- Ref<LexerActionExecutor> const& lexerActionExecutor)
- : ATNConfig(c, state, c->context, c->semanticContext),
- _lexerActionExecutor(lexerActionExecutor),
- _passedThroughNonGreedyDecision(checkNonGreedyDecision(c, state)) {}
-
-LexerATNConfig::LexerATNConfig(Ref<LexerATNConfig> const& c, ATNState* state,
- Ref<PredictionContext> const& context)
- : ATNConfig(c, state, context, c->semanticContext),
- _lexerActionExecutor(c->_lexerActionExecutor),
- _passedThroughNonGreedyDecision(checkNonGreedyDecision(c, state)) {}
-
-Ref<LexerActionExecutor> LexerATNConfig::getLexerActionExecutor() const {
- return _lexerActionExecutor;
-}
-
-bool LexerATNConfig::hasPassedThroughNonGreedyDecision() {
- return _passedThroughNonGreedyDecision;
-}
-
-size_t LexerATNConfig::hashCode() const {
- size_t hashCode = misc::MurmurHash::initialize(7);
- hashCode = misc::MurmurHash::update(hashCode, state->stateNumber);
- hashCode = misc::MurmurHash::update(hashCode, alt);
- hashCode = misc::MurmurHash::update(hashCode, context);
- hashCode = misc::MurmurHash::update(hashCode, semanticContext);
- hashCode = misc::MurmurHash::update(hashCode,
- _passedThroughNonGreedyDecision ? 1 : 0);
- hashCode = misc::MurmurHash::update(hashCode, _lexerActionExecutor);
- hashCode = misc::MurmurHash::finish(hashCode, 6);
- return hashCode;
-}
-
-bool LexerATNConfig::operator==(const LexerATNConfig& other) const {
- if (this == &other) return true;
-
- if (_passedThroughNonGreedyDecision != other._passedThroughNonGreedyDecision)
- return false;
-
- if (_lexerActionExecutor == nullptr)
- return other._lexerActionExecutor == nullptr;
- if (*_lexerActionExecutor != *(other._lexerActionExecutor)) {
- return false;
- }
-
- return ATNConfig::operator==(other);
-}
-
-bool LexerATNConfig::checkNonGreedyDecision(Ref<LexerATNConfig> const& source,
- ATNState* target) {
- return source->_passedThroughNonGreedyDecision ||
- (is<DecisionState*>(target) &&
- (static_cast<DecisionState*>(target))->nonGreedy);
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerATNConfig.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerATNConfig.h
deleted file mode 100644
index 8b9679d049..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerATNConfig.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNConfig.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC LexerATNConfig : public ATNConfig {
- public:
- LexerATNConfig(ATNState* state, int alt,
- Ref<PredictionContext> const& context);
- LexerATNConfig(ATNState* state, int alt,
- Ref<PredictionContext> const& context,
- Ref<LexerActionExecutor> const& lexerActionExecutor);
-
- LexerATNConfig(Ref<LexerATNConfig> const& c, ATNState* state);
- LexerATNConfig(Ref<LexerATNConfig> const& c, ATNState* state,
- Ref<LexerActionExecutor> const& lexerActionExecutor);
- LexerATNConfig(Ref<LexerATNConfig> const& c, ATNState* state,
- Ref<PredictionContext> const& context);
-
- /**
- * Gets the {@link LexerActionExecutor} capable of executing the embedded
- * action(s) for the current configuration.
- */
- Ref<LexerActionExecutor> getLexerActionExecutor() const;
- bool hasPassedThroughNonGreedyDecision();
-
- virtual size_t hashCode() const override;
-
- bool operator==(const LexerATNConfig& other) const;
-
- private:
- /**
- * This is the backing field for {@link #getLexerActionExecutor}.
- */
- const Ref<LexerActionExecutor> _lexerActionExecutor;
- const bool _passedThroughNonGreedyDecision;
-
- static bool checkNonGreedyDecision(Ref<LexerATNConfig> const& source,
- ATNState* target);
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp
deleted file mode 100644
index 409a8dfed7..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp
+++ /dev/null
@@ -1,683 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "IntStream.h"
-#include "Lexer.h"
-#include "LexerNoViableAltException.h"
-#include "Token.h"
-#include "atn/ActionTransition.h"
-#include "atn/OrderedATNConfigSet.h"
-#include "atn/PredicateTransition.h"
-#include "atn/RuleStopState.h"
-#include "atn/RuleTransition.h"
-#include "atn/SingletonPredictionContext.h"
-#include "atn/TokensStartState.h"
-#include "dfa/DFA.h"
-#include "misc/Interval.h"
-
-#include "atn/EmptyPredictionContext.h"
-#include "atn/LexerATNConfig.h"
-#include "atn/LexerActionExecutor.h"
-#include "dfa/DFAState.h"
-
-#include "atn/LexerATNSimulator.h"
-
-#define DEBUG_ATN 0
-#define DEBUG_DFA 0
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-LexerATNSimulator::SimState::~SimState() {}
-
-void LexerATNSimulator::SimState::reset() {
- index = INVALID_INDEX;
- line = 0;
- charPos = INVALID_INDEX;
- dfaState = nullptr; // Don't delete. It's just a reference.
-}
-
-void LexerATNSimulator::SimState::InitializeInstanceFields() {
- index = INVALID_INDEX;
- line = 0;
- charPos = INVALID_INDEX;
-}
-
-int LexerATNSimulator::match_calls = 0;
-
-LexerATNSimulator::LexerATNSimulator(const ATN& atn,
- std::vector<dfa::DFA>& decisionToDFA,
- PredictionContextCache& sharedContextCache)
- : LexerATNSimulator(nullptr, atn, decisionToDFA, sharedContextCache) {}
-
-LexerATNSimulator::LexerATNSimulator(Lexer* recog, const ATN& atn,
- std::vector<dfa::DFA>& decisionToDFA,
- PredictionContextCache& sharedContextCache)
- : ATNSimulator(atn, sharedContextCache),
- _recog(recog),
- _decisionToDFA(decisionToDFA) {
- InitializeInstanceFields();
-}
-
-void LexerATNSimulator::copyState(LexerATNSimulator* simulator) {
- _charPositionInLine = simulator->_charPositionInLine;
- _line = simulator->_line;
- _mode = simulator->_mode;
- _startIndex = simulator->_startIndex;
-}
-
-size_t LexerATNSimulator::match(CharStream* input, size_t mode) {
- match_calls++;
- _mode = mode;
- ssize_t mark = input->mark();
-
- auto onExit = finally([input, mark] { input->release(mark); });
-
- _startIndex = input->index();
- _prevAccept.reset();
- const dfa::DFA& dfa = _decisionToDFA[mode];
- if (dfa.s0 == nullptr) {
- return matchATN(input);
- } else {
- return execATN(input, dfa.s0);
- }
-}
-
-void LexerATNSimulator::reset() {
- _prevAccept.reset();
- _startIndex = 0;
- _line = 1;
- _charPositionInLine = 0;
- _mode = Lexer::DEFAULT_MODE;
-}
-
-void LexerATNSimulator::clearDFA() {
- size_t size = _decisionToDFA.size();
- _decisionToDFA.clear();
- for (size_t d = 0; d < size; ++d) {
- _decisionToDFA.emplace_back(atn.getDecisionState(d), d);
- }
-}
-
-size_t LexerATNSimulator::matchATN(CharStream* input) {
- ATNState* startState = atn.modeToStartState[_mode];
-
- std::unique_ptr<ATNConfigSet> s0_closure =
- computeStartState(input, startState);
-
- bool suppressEdge = s0_closure->hasSemanticContext;
- s0_closure->hasSemanticContext = false;
-
- dfa::DFAState* next = addDFAState(s0_closure.release());
- if (!suppressEdge) {
- _decisionToDFA[_mode].s0 = next;
- }
-
- size_t predict = execATN(input, next);
-
- return predict;
-}
-
-size_t LexerATNSimulator::execATN(CharStream* input, dfa::DFAState* ds0) {
- if (ds0->isAcceptState) {
- // allow zero-length tokens
- // ml: in Java code this method uses 3 params. The first is a member var of
- // the class anyway (_prevAccept), so why pass it here?
- captureSimState(input, ds0);
- }
-
- size_t t = input->LA(1);
- dfa::DFAState* s = ds0; // s is current/from DFA state
-
- while (true) { // while more work
- // As we move src->trg, src->trg, we keep track of the previous trg to
- // avoid looking up the DFA state again, which is expensive.
- // If the previous target was already part of the DFA, we might
- // be able to avoid doing a reach operation upon t. If s!=null,
- // it means that semantic predicates didn't prevent us from
- // creating a DFA state. Once we know s!=null, we check to see if
- // the DFA state has an edge already for t. If so, we can just reuse
- // it's configuration set; there's no point in re-computing it.
- // This is kind of like doing DFA simulation within the ATN
- // simulation because DFA simulation is really just a way to avoid
- // computing reach/closure sets. Technically, once we know that
- // we have a previously added DFA state, we could jump over to
- // the DFA simulator. But, that would mean popping back and forth
- // a lot and making things more complicated algorithmically.
- // This optimization makes a lot of sense for loops within DFA.
- // A character will take us back to an existing DFA state
- // that already has lots of edges out of it. e.g., .* in comments.
- dfa::DFAState* target = getExistingTargetState(s, t);
- if (target == nullptr) {
- target = computeTargetState(input, s, t);
- }
-
- if (target == ERROR_STATE.get()) {
- break;
- }
-
- // If this is a consumable input element, make sure to consume before
- // capturing the accept state so the input index, line, and char
- // position accurately reflect the state of the interpreter at the
- // end of the token.
- if (t != Token::EOF) {
- consume(input);
- }
-
- if (target->isAcceptState) {
- captureSimState(input, target);
- if (t == Token::EOF) {
- break;
- }
- }
-
- t = input->LA(1);
- s = target; // flip; current DFA target becomes new src/from state
- }
-
- return failOrAccept(input, s->configs.get(), t);
-}
-
-dfa::DFAState* LexerATNSimulator::getExistingTargetState(dfa::DFAState* s,
- size_t t) {
- dfa::DFAState* retval = nullptr;
- _edgeLock.readLock();
- if (t <= MAX_DFA_EDGE) {
- auto iterator = s->edges.find(t - MIN_DFA_EDGE);
-#if DEBUG_ATN == 1
- if (iterator != s->edges.end()) {
- std::cout << std::string("reuse state ") << s->stateNumber
- << std::string(" edge to ") << iterator->second->stateNumber
- << std::endl;
- }
-#endif
-
- if (iterator != s->edges.end()) retval = iterator->second;
- }
- _edgeLock.readUnlock();
- return retval;
-}
-
-dfa::DFAState* LexerATNSimulator::computeTargetState(CharStream* input,
- dfa::DFAState* s,
- size_t t) {
- OrderedATNConfigSet* reach =
- new OrderedATNConfigSet(); /* mem-check: deleted on error or managed by
- new DFA state. */
-
- // if we don't find an existing DFA state
- // Fill reach starting from closure, following t transitions
- getReachableConfigSet(input, s->configs.get(), reach, t);
-
- if (reach->isEmpty()) { // we got nowhere on t from s
- if (!reach->hasSemanticContext) {
- // we got nowhere on t, don't throw out this knowledge; it'd
- // cause a failover from DFA later.
- delete reach;
- addDFAEdge(s, t, ERROR_STATE.get());
- }
-
- // stop when we can't match any more char
- return ERROR_STATE.get();
- }
-
- // Add an edge from s to target DFA found/created for reach
- return addDFAEdge(s, t, reach);
-}
-
-size_t LexerATNSimulator::failOrAccept(CharStream* input, ATNConfigSet* reach,
- size_t t) {
- if (_prevAccept.dfaState != nullptr) {
- Ref<LexerActionExecutor> lexerActionExecutor =
- _prevAccept.dfaState->lexerActionExecutor;
- accept(input, lexerActionExecutor, _startIndex, _prevAccept.index,
- _prevAccept.line, _prevAccept.charPos);
- return _prevAccept.dfaState->prediction;
- } else {
- // if no accept and EOF is first char, return EOF
- if (t == Token::EOF && input->index() == _startIndex) {
- return Token::EOF;
- }
-
- throw LexerNoViableAltException(_recog, input, _startIndex, reach);
- }
-}
-
-void LexerATNSimulator::getReachableConfigSet(CharStream* input,
- ATNConfigSet* closure_,
- ATNConfigSet* reach, size_t t) {
- // this is used to skip processing for configs which have a lower priority
- // than a config that already reached an accept state for the same rule
- size_t skipAlt = ATN::INVALID_ALT_NUMBER;
-
- for (auto c : closure_->configs) {
- bool currentAltReachedAcceptState = c->alt == skipAlt;
- if (currentAltReachedAcceptState &&
- (std::static_pointer_cast<LexerATNConfig>(c))
- ->hasPassedThroughNonGreedyDecision()) {
- continue;
- }
-
-#if DEBUG_ATN == 1
- std::cout << "testing " << getTokenName((int)t) << " at "
- << c->toString(true) << std::endl;
-#endif
-
- size_t n = c->state->transitions.size();
- for (size_t ti = 0; ti < n; ti++) { // for each transition
- Transition* trans = c->state->transitions[ti];
- ATNState* target = getReachableTarget(trans, (int)t);
- if (target != nullptr) {
- Ref<LexerActionExecutor> lexerActionExecutor =
- std::static_pointer_cast<LexerATNConfig>(c)
- ->getLexerActionExecutor();
- if (lexerActionExecutor != nullptr) {
- lexerActionExecutor = lexerActionExecutor->fixOffsetBeforeMatch(
- (int)input->index() - (int)_startIndex, lexerActionExecutor);
- }
-
- bool treatEofAsEpsilon = t == Token::EOF;
- Ref<LexerATNConfig> config = std::make_shared<LexerATNConfig>(
- std::static_pointer_cast<LexerATNConfig>(c), target,
- lexerActionExecutor);
-
- if (closure(input, config, reach, currentAltReachedAcceptState, true,
- treatEofAsEpsilon)) {
- // any remaining configs for this alt have a lower priority than
- // the one that just reached an accept state.
- skipAlt = c->alt;
- break;
- }
- }
- }
- }
-}
-
-void LexerATNSimulator::accept(
- CharStream* input, const Ref<LexerActionExecutor>& lexerActionExecutor,
- size_t /*startIndex*/, size_t index, size_t line, size_t charPos) {
-#if DEBUG_ATN == 1
- std::cout << "ACTION ";
- std::cout << toString(lexerActionExecutor) << std::endl;
-#endif
-
- // seek to after last char in token
- input->seek(index);
- _line = line;
- _charPositionInLine = (int)charPos;
-
- if (lexerActionExecutor != nullptr && _recog != nullptr) {
- lexerActionExecutor->execute(_recog, input, _startIndex);
- }
-}
-
-atn::ATNState* LexerATNSimulator::getReachableTarget(Transition* trans,
- size_t t) {
- if (trans->matches(t, Lexer::MIN_CHAR_VALUE, Lexer::MAX_CHAR_VALUE)) {
- return trans->target;
- }
-
- return nullptr;
-}
-
-std::unique_ptr<ATNConfigSet> LexerATNSimulator::computeStartState(
- CharStream* input, ATNState* p) {
- Ref<PredictionContext> initialContext =
- PredictionContext::EMPTY; // ml: the purpose of this assignment is
- // unclear
- std::unique_ptr<ATNConfigSet> configs(new OrderedATNConfigSet());
- for (size_t i = 0; i < p->transitions.size(); i++) {
- ATNState* target = p->transitions[i]->target;
- Ref<LexerATNConfig> c =
- std::make_shared<LexerATNConfig>(target, (int)(i + 1), initialContext);
- closure(input, c, configs.get(), false, false, false);
- }
-
- return configs;
-}
-
-bool LexerATNSimulator::closure(CharStream* input,
- const Ref<LexerATNConfig>& config,
- ATNConfigSet* configs,
- bool currentAltReachedAcceptState,
- bool speculative, bool treatEofAsEpsilon) {
-#if DEBUG_ATN == 1
- std::cout << "closure(" << config->toString(true) << ")" << std::endl;
-#endif
-
- if (is<RuleStopState*>(config->state)) {
-#if DEBUG_ATN == 1
- if (_recog != nullptr) {
- std::cout << "closure at "
- << _recog->getRuleNames()[config->state->ruleIndex]
- << " rule stop " << config << std::endl;
- } else {
- std::cout << "closure at rule stop " << config << std::endl;
- }
-#endif
-
- if (config->context == nullptr || config->context->hasEmptyPath()) {
- if (config->context == nullptr || config->context->isEmpty()) {
- configs->add(config);
- return true;
- } else {
- configs->add(std::make_shared<LexerATNConfig>(
- config, config->state, PredictionContext::EMPTY));
- currentAltReachedAcceptState = true;
- }
- }
-
- if (config->context != nullptr && !config->context->isEmpty()) {
- for (size_t i = 0; i < config->context->size(); i++) {
- if (config->context->getReturnState(i) !=
- PredictionContext::EMPTY_RETURN_STATE) {
- std::weak_ptr<PredictionContext> newContext =
- config->context->getParent(i); // "pop" return state
- ATNState* returnState =
- atn.states[config->context->getReturnState(i)];
- Ref<LexerATNConfig> c = std::make_shared<LexerATNConfig>(
- config, returnState, newContext.lock());
- currentAltReachedAcceptState =
- closure(input, c, configs, currentAltReachedAcceptState,
- speculative, treatEofAsEpsilon);
- }
- }
- }
-
- return currentAltReachedAcceptState;
- }
-
- // optimization
- if (!config->state->epsilonOnlyTransitions) {
- if (!currentAltReachedAcceptState ||
- !config->hasPassedThroughNonGreedyDecision()) {
- configs->add(config);
- }
- }
-
- ATNState* p = config->state;
- for (size_t i = 0; i < p->transitions.size(); i++) {
- Transition* t = p->transitions[i];
- Ref<LexerATNConfig> c = getEpsilonTarget(input, config, t, configs,
- speculative, treatEofAsEpsilon);
- if (c != nullptr) {
- currentAltReachedAcceptState =
- closure(input, c, configs, currentAltReachedAcceptState, speculative,
- treatEofAsEpsilon);
- }
- }
-
- return currentAltReachedAcceptState;
-}
-
-Ref<LexerATNConfig> LexerATNSimulator::getEpsilonTarget(
- CharStream* input, const Ref<LexerATNConfig>& config, Transition* t,
- ATNConfigSet* configs, bool speculative, bool treatEofAsEpsilon) {
- Ref<LexerATNConfig> c = nullptr;
- switch (t->getSerializationType()) {
- case Transition::RULE: {
- RuleTransition* ruleTransition = static_cast<RuleTransition*>(t);
- Ref<PredictionContext> newContext = SingletonPredictionContext::create(
- config->context, ruleTransition->followState->stateNumber);
- c = std::make_shared<LexerATNConfig>(config, t->target, newContext);
- break;
- }
-
- case Transition::PRECEDENCE:
- throw UnsupportedOperationException(
- "Precedence predicates are not supported in lexers.");
-
- case Transition::PREDICATE: {
- /* Track traversing semantic predicates. If we traverse,
- we cannot add a DFA state for this "reach" computation
- because the DFA would not test the predicate again in the
- future. Rather than creating collections of semantic predicates
- like v3 and testing them on prediction, v4 will test them on the
- fly all the time using the ATN not the DFA. This is slower but
- semantically it's not used that often. One of the key elements to
- this predicate mechanism is not adding DFA states that see
- predicates immediately afterwards in the ATN. For example,
-
- a : ID {p1}? | ID {p2}? ;
-
- should create the start state for rule 'a' (to save start state
- competition), but should not create target of ID state. The
- collection of ATN states the following ID references includes
- states reached by traversing predicates. Since this is when we
- test them, we cannot cash the DFA state target of ID.
- */
- PredicateTransition* pt = static_cast<PredicateTransition*>(t);
-
-#if DEBUG_ATN == 1
- std::cout << "EVAL rule " << pt->ruleIndex << ":" << pt->predIndex
- << std::endl;
-#endif
-
- configs->hasSemanticContext = true;
- if (evaluatePredicate(input, pt->ruleIndex, pt->predIndex, speculative)) {
- c = std::make_shared<LexerATNConfig>(config, t->target);
- }
- break;
- }
-
- case Transition::ACTION:
- if (config->context == nullptr || config->context->hasEmptyPath()) {
- // execute actions anywhere in the start rule for a token.
- //
- // TO_DO: if the entry rule is invoked recursively, some
- // actions may be executed during the recursive call. The
- // problem can appear when hasEmptyPath() is true but
- // isEmpty() is false. In this case, the config needs to be
- // split into two contexts - one with just the empty path
- // and another with everything but the empty path.
- // Unfortunately, the current algorithm does not allow
- // getEpsilonTarget to return two configurations, so
- // additional modifications are needed before we can support
- // the split operation.
- Ref<LexerActionExecutor> lexerActionExecutor =
- LexerActionExecutor::append(
- config->getLexerActionExecutor(),
- atn.lexerActions[static_cast<ActionTransition*>(t)
- ->actionIndex]);
- c = std::make_shared<LexerATNConfig>(config, t->target,
- lexerActionExecutor);
- break;
- } else {
- // ignore actions in referenced rules
- c = std::make_shared<LexerATNConfig>(config, t->target);
- break;
- }
-
- case Transition::EPSILON:
- c = std::make_shared<LexerATNConfig>(config, t->target);
- break;
-
- case Transition::ATOM:
- case Transition::RANGE:
- case Transition::SET:
- if (treatEofAsEpsilon) {
- if (t->matches(Token::EOF, Lexer::MIN_CHAR_VALUE,
- Lexer::MAX_CHAR_VALUE)) {
- c = std::make_shared<LexerATNConfig>(config, t->target);
- break;
- }
- }
-
- break;
-
- default
- : // To silence the compiler. Other transition types are not used here.
- break;
- }
-
- return c;
-}
-
-bool LexerATNSimulator::evaluatePredicate(CharStream* input, size_t ruleIndex,
- size_t predIndex, bool speculative) {
- // assume true if no recognizer was provided
- if (_recog == nullptr) {
- return true;
- }
-
- if (!speculative) {
- return _recog->sempred(nullptr, ruleIndex, predIndex);
- }
-
- size_t savedCharPositionInLine = _charPositionInLine;
- size_t savedLine = _line;
- size_t index = input->index();
- ssize_t marker = input->mark();
-
- auto onExit =
- finally([this, input, savedCharPositionInLine, savedLine, index, marker] {
- _charPositionInLine = savedCharPositionInLine;
- _line = savedLine;
- input->seek(index);
- input->release(marker);
- });
-
- consume(input);
- return _recog->sempred(nullptr, ruleIndex, predIndex);
-}
-
-void LexerATNSimulator::captureSimState(CharStream* input,
- dfa::DFAState* dfaState) {
- _prevAccept.index = input->index();
- _prevAccept.line = _line;
- _prevAccept.charPos = _charPositionInLine;
- _prevAccept.dfaState = dfaState;
-}
-
-dfa::DFAState* LexerATNSimulator::addDFAEdge(dfa::DFAState* from, size_t t,
- ATNConfigSet* q) {
- /* leading to this call, ATNConfigSet.hasSemanticContext is used as a
- * marker indicating dynamic predicate evaluation makes this edge
- * dependent on the specific input sequence, so the static edge in the
- * DFA should be omitted. The target DFAState is still created since
- * execATN has the ability to resynchronize with the DFA state cache
- * following the predicate evaluation step.
- *
- * TJP notes: next time through the DFA, we see a pred again and eval.
- * If that gets us to a previously created (but dangling) DFA
- * state, we can continue in pure DFA mode from there.
- */
- bool suppressEdge = q->hasSemanticContext;
- q->hasSemanticContext = false;
-
- dfa::DFAState* to = addDFAState(q);
-
- if (suppressEdge) {
- return to;
- }
-
- addDFAEdge(from, t, to);
- return to;
-}
-
-void LexerATNSimulator::addDFAEdge(dfa::DFAState* p, size_t t,
- dfa::DFAState* q) {
- if (/*t < MIN_DFA_EDGE ||*/ t > MAX_DFA_EDGE) { // MIN_DFA_EDGE is 0
- // Only track edges within the DFA bounds
- return;
- }
-
- _edgeLock.writeLock();
- p->edges[t - MIN_DFA_EDGE] = q; // connect
- _edgeLock.writeUnlock();
-}
-
-dfa::DFAState* LexerATNSimulator::addDFAState(ATNConfigSet* configs) {
- /* the lexer evaluates predicates on-the-fly; by this point configs
- * should not contain any configurations with unevaluated predicates.
- */
- assert(!configs->hasSemanticContext);
-
- dfa::DFAState* proposed = new dfa::DFAState(std::unique_ptr<ATNConfigSet>(
- configs)); /* mem-check: managed by the DFA or deleted below */
- Ref<ATNConfig> firstConfigWithRuleStopState = nullptr;
- for (auto& c : configs->configs) {
- if (is<RuleStopState*>(c->state)) {
- firstConfigWithRuleStopState = c;
- break;
- }
- }
-
- if (firstConfigWithRuleStopState != nullptr) {
- proposed->isAcceptState = true;
- proposed->lexerActionExecutor =
- std::dynamic_pointer_cast<LexerATNConfig>(firstConfigWithRuleStopState)
- ->getLexerActionExecutor();
- proposed->prediction =
- atn.ruleToTokenType[firstConfigWithRuleStopState->state->ruleIndex];
- }
-
- dfa::DFA& dfa = _decisionToDFA[_mode];
-
- _stateLock.writeLock();
- if (!dfa.states.empty()) {
- auto iterator = dfa.states.find(proposed);
- if (iterator != dfa.states.end()) {
- delete proposed;
- _stateLock.writeUnlock();
- return *iterator;
- }
- }
-
- proposed->stateNumber = (int)dfa.states.size();
- proposed->configs->setReadonly(true);
-
- dfa.states.insert(proposed);
- _stateLock.writeUnlock();
-
- return proposed;
-}
-
-dfa::DFA& LexerATNSimulator::getDFA(size_t mode) {
- return _decisionToDFA[mode];
-}
-
-std::string LexerATNSimulator::getText(CharStream* input) {
- // index is first lookahead char, don't include.
- return input->getText(misc::Interval(_startIndex, input->index() - 1));
-}
-
-size_t LexerATNSimulator::getLine() const { return _line; }
-
-void LexerATNSimulator::setLine(size_t line) { _line = line; }
-
-size_t LexerATNSimulator::getCharPositionInLine() {
- return _charPositionInLine;
-}
-
-void LexerATNSimulator::setCharPositionInLine(size_t charPositionInLine) {
- _charPositionInLine = charPositionInLine;
-}
-
-void LexerATNSimulator::consume(CharStream* input) {
- size_t curChar = input->LA(1);
- if (curChar == '\n') {
- _line++;
- _charPositionInLine = 0;
- } else {
- _charPositionInLine++;
- }
- input->consume();
-}
-
-std::string LexerATNSimulator::getTokenName(size_t t) {
- if (t == Token::EOF) {
- return "EOF";
- }
- return std::string("'") + static_cast<char>(t) + std::string("'");
-}
-
-void LexerATNSimulator::InitializeInstanceFields() {
- _startIndex = 0;
- _line = 1;
- _charPositionInLine = 0;
- _mode = antlr4::Lexer::DEFAULT_MODE;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerATNSimulator.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerATNSimulator.h
deleted file mode 100644
index 11a29280ff..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerATNSimulator.h
+++ /dev/null
@@ -1,223 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNConfigSet.h"
-#include "atn/ATNSimulator.h"
-#include "atn/LexerATNConfig.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// "dup" of ParserInterpreter
-class ANTLR4CPP_PUBLIC LexerATNSimulator : public ATNSimulator {
- protected:
- class SimState {
- public:
- virtual ~SimState();
-
- protected:
- size_t index;
- size_t line;
- size_t charPos;
- dfa::DFAState* dfaState;
- virtual void reset();
- friend class LexerATNSimulator;
-
- private:
- void InitializeInstanceFields();
-
- public:
- SimState() { InitializeInstanceFields(); }
- };
-
- public:
- static const size_t MIN_DFA_EDGE = 0;
- static const size_t MAX_DFA_EDGE = 127; // forces unicode to stay in ATN
-
- protected:
- /// <summary>
- /// When we hit an accept state in either the DFA or the ATN, we
- /// have to notify the character stream to start buffering characters
- /// via <seealso cref="IntStream#mark"/> and record the current state. The
- /// current sim state includes the current index into the input, the current
- /// line, and current character position in that line. Note that the Lexer is
- /// tracking the starting line and characterization of the token. These
- /// variables track the "state" of the simulator when it hits an accept
- /// state.
- /// <p/>
- /// We track these variables separately for the DFA and ATN simulation
- /// because the DFA simulation often has to fail over to the ATN
- /// simulation. If the ATN simulation fails, we need the DFA to fall
- /// back to its previously accepted state, if any. If the ATN succeeds,
- /// then the ATN does the accept and the DFA simulator that invoked it
- /// can simply return the predicted token type.
- /// </summary>
- Lexer* const _recog;
-
- /// The current token's starting index into the character stream.
- /// Shared across DFA to ATN simulation in case the ATN fails and the
- /// DFA did not have a previous accept state. In this case, we use the
- /// ATN-generated exception object.
- size_t _startIndex;
-
- /// line number 1..n within the input.
- size_t _line;
-
- /// The index of the character relative to the beginning of the line 0..n-1.
- size_t _charPositionInLine;
-
- public:
- std::vector<dfa::DFA>& _decisionToDFA;
-
- protected:
- size_t _mode;
-
- /// Used during DFA/ATN exec to record the most recent accept configuration
- /// info.
- SimState _prevAccept;
-
- public:
- static int match_calls;
-
- LexerATNSimulator(const ATN& atn, std::vector<dfa::DFA>& decisionToDFA,
- PredictionContextCache& sharedContextCache);
- LexerATNSimulator(Lexer* recog, const ATN& atn,
- std::vector<dfa::DFA>& decisionToDFA,
- PredictionContextCache& sharedContextCache);
- virtual ~LexerATNSimulator() {}
-
- virtual void copyState(LexerATNSimulator* simulator);
- virtual size_t match(CharStream* input, size_t mode);
- virtual void reset() override;
-
- virtual void clearDFA() override;
-
- protected:
- virtual size_t matchATN(CharStream* input);
- virtual size_t execATN(CharStream* input, dfa::DFAState* ds0);
-
- /// <summary>
- /// Get an existing target state for an edge in the DFA. If the target state
- /// for the edge has not yet been computed or is otherwise not available,
- /// this method returns {@code null}.
- /// </summary>
- /// <param name="s"> The current DFA state </param>
- /// <param name="t"> The next input symbol </param>
- /// <returns> The existing target DFA state for the given input symbol
- /// {@code t}, or {@code null} if the target state for this edge is not
- /// already cached </returns>
- virtual dfa::DFAState* getExistingTargetState(dfa::DFAState* s, size_t t);
-
- /// <summary>
- /// Compute a target state for an edge in the DFA, and attempt to add the
- /// computed state and corresponding edge to the DFA.
- /// </summary>
- /// <param name="input"> The input stream </param>
- /// <param name="s"> The current DFA state </param>
- /// <param name="t"> The next input symbol
- /// </param>
- /// <returns> The computed target DFA state for the given input symbol
- /// {@code t}. If {@code t} does not lead to a valid DFA state, this method
- /// returns <seealso cref="#ERROR"/>. </returns>
- virtual dfa::DFAState* computeTargetState(CharStream* input, dfa::DFAState* s,
- size_t t);
-
- virtual size_t failOrAccept(CharStream* input, ATNConfigSet* reach, size_t t);
-
- /// <summary>
- /// Given a starting configuration set, figure out all ATN configurations
- /// we can reach upon input {@code t}. Parameter {@code reach} is a return
- /// parameter.
- /// </summary>
- void getReachableConfigSet(
- CharStream* input,
- ATNConfigSet* closure_, // closure_ as we have a closure() already
- ATNConfigSet* reach, size_t t);
-
- virtual void accept(CharStream* input,
- const Ref<LexerActionExecutor>& lexerActionExecutor,
- size_t startIndex, size_t index, size_t line,
- size_t charPos);
-
- virtual ATNState* getReachableTarget(Transition* trans, size_t t);
-
- virtual std::unique_ptr<ATNConfigSet> computeStartState(CharStream* input,
- ATNState* p);
-
- /// <summary>
- /// Since the alternatives within any lexer decision are ordered by
- /// preference, this method stops pursuing the closure as soon as an accept
- /// state is reached. After the first accept state is reached by depth-first
- /// search from {@code config}, all other (potentially reachable) states for
- /// this rule would have a lower priority.
- /// </summary>
- /// <returns> {@code true} if an accept state is reached, otherwise
- /// {@code false}. </returns>
- virtual bool closure(CharStream* input, const Ref<LexerATNConfig>& config,
- ATNConfigSet* configs, bool currentAltReachedAcceptState,
- bool speculative, bool treatEofAsEpsilon);
-
- // side-effect: can alter configs.hasSemanticContext
- virtual Ref<LexerATNConfig> getEpsilonTarget(
- CharStream* input, const Ref<LexerATNConfig>& config, Transition* t,
- ATNConfigSet* configs, bool speculative, bool treatEofAsEpsilon);
-
- /// <summary>
- /// Evaluate a predicate specified in the lexer.
- /// <p/>
- /// If {@code speculative} is {@code true}, this method was called before
- /// <seealso cref="#consume"/> for the matched character. This method should
- /// call <seealso cref="#consume"/> before evaluating the predicate to ensure
- /// position sensitive values, including <seealso cref="Lexer#getText"/>,
- /// <seealso cref="Lexer#getLine"/>, and <seealso
- /// cref="Lexer#getCharPositionInLine"/>, properly reflect the current lexer
- /// state. This method should restore {@code input} and the simulator to the
- /// original state before returning (i.e. undo the actions made by the call to
- /// <seealso cref="#consume"/>.
- /// </summary>
- /// <param name="input"> The input stream. </param>
- /// <param name="ruleIndex"> The rule containing the predicate. </param>
- /// <param name="predIndex"> The index of the predicate within the rule.
- /// </param> <param name="speculative"> {@code true} if the current index in
- /// {@code input} is one character before the predicate's location.
- /// </param>
- /// <returns> {@code true} if the specified predicate evaluates to
- /// {@code true}. </returns>
- virtual bool evaluatePredicate(CharStream* input, size_t ruleIndex,
- size_t predIndex, bool speculative);
-
- virtual void captureSimState(CharStream* input, dfa::DFAState* dfaState);
- virtual dfa::DFAState* addDFAEdge(dfa::DFAState* from, size_t t,
- ATNConfigSet* q);
- virtual void addDFAEdge(dfa::DFAState* p, size_t t, dfa::DFAState* q);
-
- /// <summary>
- /// Add a new DFA state if there isn't one with this set of
- /// configurations already. This method also detects the first
- /// configuration containing an ATN rule stop state. Later, when
- /// traversing the DFA, we will know which rule to accept.
- /// </summary>
- virtual dfa::DFAState* addDFAState(ATNConfigSet* configs);
-
- public:
- dfa::DFA& getDFA(size_t mode);
-
- /// Get the text matched so far for the current token.
- virtual std::string getText(CharStream* input);
- virtual size_t getLine() const;
- virtual void setLine(size_t line);
- virtual size_t getCharPositionInLine();
- virtual void setCharPositionInLine(size_t charPositionInLine);
- virtual void consume(CharStream* input);
- virtual std::string getTokenName(size_t t);
-
- private:
- void InitializeInstanceFields();
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerAction.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerAction.cpp
deleted file mode 100644
index 0aa4a00ec5..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerAction.cpp
+++ /dev/null
@@ -1,8 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "LexerAction.h"
-
-antlr4::atn::LexerAction::~LexerAction() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerAction.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerAction.h
deleted file mode 100644
index 6919ad12ad..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerAction.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-#include "atn/LexerActionType.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// Represents a single action which can be executed following the successful
-/// match of a lexer rule. Lexer actions are used for both embedded action
-/// syntax and ANTLR 4's new lexer command syntax.
-///
-/// @author Sam Harwell
-/// @since 4.2
-/// </summary>
-class ANTLR4CPP_PUBLIC LexerAction {
- public:
- virtual ~LexerAction();
-
- /// <summary>
- /// Gets the serialization type of the lexer action.
- /// </summary>
- /// <returns> The serialization type of the lexer action. </returns>
- virtual LexerActionType getActionType() const = 0;
-
- /// <summary>
- /// Gets whether the lexer action is position-dependent. Position-dependent
- /// actions may have different semantics depending on the <seealso
- /// cref="CharStream"/> index at the time the action is executed.
- ///
- /// <para>Many lexer commands, including {@code type}, {@code skip}, and
- /// {@code more}, do not check the input index during their execution.
- /// Actions like this are position-independent, and may be stored more
- /// efficiently as part of the <seealso
- /// cref="LexerATNConfig#lexerActionExecutor"/>.</para>
- /// </summary>
- /// <returns> {@code true} if the lexer action semantics can be affected by
- /// the position of the input <seealso cref="CharStream"/> at the time it is
- /// executed; otherwise, {@code false}. </returns>
- virtual bool isPositionDependent() const = 0;
-
- /// <summary>
- /// Execute the lexer action in the context of the specified <seealso
- /// cref="Lexer"/>.
- ///
- /// <para>For position-dependent actions, the input stream must already be
- /// positioned correctly prior to calling this method.</para>
- /// </summary>
- /// <param name="lexer"> The lexer instance. </param>
- virtual void execute(Lexer* lexer) = 0;
-
- virtual size_t hashCode() const = 0;
- virtual bool operator==(const LexerAction& obj) const = 0;
- virtual bool operator!=(const LexerAction& obj) const {
- return !(*this == obj);
- }
-
- virtual std::string toString() const = 0;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerActionExecutor.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerActionExecutor.cpp
deleted file mode 100644
index d656caeb71..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerActionExecutor.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/LexerIndexedCustomAction.h"
-#include "misc/MurmurHash.h"
-#include "support/Arrays.h"
-#include "support/CPPUtils.h"
-
-#include "atn/LexerActionExecutor.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-using namespace antlrcpp;
-
-LexerActionExecutor::LexerActionExecutor(
- const std::vector<Ref<LexerAction>>& lexerActions)
- : _lexerActions(lexerActions), _hashCode(generateHashCode()) {}
-
-LexerActionExecutor::~LexerActionExecutor() {}
-
-Ref<LexerActionExecutor> LexerActionExecutor::append(
- Ref<LexerActionExecutor> const& lexerActionExecutor,
- Ref<LexerAction> const& lexerAction) {
- if (lexerActionExecutor == nullptr) {
- return std::make_shared<LexerActionExecutor>(
- std::vector<Ref<LexerAction>>{lexerAction});
- }
-
- std::vector<Ref<LexerAction>> lexerActions =
- lexerActionExecutor->_lexerActions; // Make a copy.
- lexerActions.push_back(lexerAction);
- return std::make_shared<LexerActionExecutor>(lexerActions);
-}
-
-Ref<LexerActionExecutor> LexerActionExecutor::fixOffsetBeforeMatch(
- int offset, const Ref<LexerActionExecutor>& this_ref) {
- std::vector<Ref<LexerAction>> updatedLexerActions;
- for (size_t i = 0; i < _lexerActions.size(); i++) {
- if (_lexerActions[i]->isPositionDependent() &&
- !is<LexerIndexedCustomAction>(_lexerActions[i])) {
- if (updatedLexerActions.empty()) {
- updatedLexerActions = _lexerActions; // Make a copy.
- }
-
- updatedLexerActions[i] =
- std::make_shared<LexerIndexedCustomAction>(offset, _lexerActions[i]);
- }
- }
-
- if (updatedLexerActions.empty()) {
- return this_ref;
- }
-
- return std::make_shared<LexerActionExecutor>(updatedLexerActions);
-}
-
-std::vector<Ref<LexerAction>> LexerActionExecutor::getLexerActions() const {
- return _lexerActions;
-}
-
-void LexerActionExecutor::execute(Lexer* lexer, CharStream* input,
- size_t startIndex) {
- bool requiresSeek = false;
- size_t stopIndex = input->index();
-
- auto onExit = finally([requiresSeek, input, stopIndex]() {
- if (requiresSeek) {
- input->seek(stopIndex);
- }
- });
- for (auto lexerAction : _lexerActions) {
- if (is<LexerIndexedCustomAction>(lexerAction)) {
- int offset =
- (std::static_pointer_cast<LexerIndexedCustomAction>(lexerAction))
- ->getOffset();
- input->seek(startIndex + offset);
- lexerAction =
- std::static_pointer_cast<LexerIndexedCustomAction>(lexerAction)
- ->getAction();
- requiresSeek = (startIndex + offset) != stopIndex;
- } else if (lexerAction->isPositionDependent()) {
- input->seek(stopIndex);
- requiresSeek = false;
- }
-
- lexerAction->execute(lexer);
- }
-}
-
-size_t LexerActionExecutor::hashCode() const { return _hashCode; }
-
-bool LexerActionExecutor::operator==(const LexerActionExecutor& obj) const {
- if (&obj == this) {
- return true;
- }
-
- return _hashCode == obj._hashCode &&
- Arrays::equals(_lexerActions, obj._lexerActions);
-}
-
-bool LexerActionExecutor::operator!=(const LexerActionExecutor& obj) const {
- return !operator==(obj);
-}
-
-size_t LexerActionExecutor::generateHashCode() const {
- size_t hash = MurmurHash::initialize();
- for (auto lexerAction : _lexerActions) {
- hash = MurmurHash::update(hash, lexerAction);
- }
- hash = MurmurHash::finish(hash, _lexerActions.size());
-
- return hash;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerActionExecutor.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerActionExecutor.h
deleted file mode 100644
index acb5c34566..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerActionExecutor.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "CharStream.h"
-#include "atn/LexerAction.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// Represents an executor for a sequence of lexer actions which traversed
-/// during the matching operation of a lexer rule (token).
-///
-/// <para>The executor tracks position information for position-dependent lexer
-/// actions efficiently, ensuring that actions appearing only at the end of the
-/// rule do not cause bloating of the <seealso cref="DFA"/> created for the
-/// lexer.</para>
-class ANTLR4CPP_PUBLIC LexerActionExecutor {
- public:
- /// <summary>
- /// Constructs an executor for a sequence of <seealso cref="LexerAction"/>
- /// actions. </summary> <param name="lexerActions"> The lexer actions to
- /// execute. </param>
- LexerActionExecutor(const std::vector<Ref<LexerAction>>& lexerActions);
- virtual ~LexerActionExecutor();
-
- /// <summary>
- /// Creates a <seealso cref="LexerActionExecutor"/> which executes the actions
- /// for the input {@code lexerActionExecutor} followed by a specified
- /// {@code lexerAction}.
- /// </summary>
- /// <param name="lexerActionExecutor"> The executor for actions already
- /// traversed by the lexer while matching a token within a particular <seealso
- /// cref="LexerATNConfig"/>. If this is {@code null}, the method behaves as
- /// though it were an empty executor. </param>
- /// <param name="lexerAction"> The lexer action to execute after the actions
- /// specified in {@code lexerActionExecutor}.
- /// </param>
- /// <returns> A <seealso cref="LexerActionExecutor"/> for executing the
- /// combine actions of {@code lexerActionExecutor} and {@code lexerAction}.
- /// </returns>
- static Ref<LexerActionExecutor> append(
- Ref<LexerActionExecutor> const& lexerActionExecutor,
- Ref<LexerAction> const& lexerAction);
-
- /// <summary>
- /// Creates a <seealso cref="LexerActionExecutor"/> which encodes the current
- /// offset for position-dependent lexer actions.
- ///
- /// <para>Normally, when the executor encounters lexer actions where
- /// <seealso cref="LexerAction#isPositionDependent"/> returns {@code true}, it
- /// calls <seealso cref="IntStream#seek"/> on the input <seealso
- /// cref="CharStream"/> to set the input position to the <em>end</em> of the
- /// current token. This behavior provides for efficient DFA representation of
- /// lexer actions which appear at the end of a lexer rule, even when the lexer
- /// rule matches a variable number of characters.</para>
- ///
- /// <para>Prior to traversing a match transition in the ATN, the current
- /// offset from the token start index is assigned to all position-dependent
- /// lexer actions which have not already been assigned a fixed offset. By
- /// storing the offsets relative to the token start index, the DFA
- /// representation of lexer actions which appear in the middle of tokens
- /// remains efficient due to sharing among tokens of the same length,
- /// regardless of their absolute position in the input stream.</para>
- ///
- /// <para>If the current executor already has offsets assigned to all
- /// position-dependent lexer actions, the method returns {@code this}.</para>
- /// </summary>
- /// <param name="offset"> The current offset to assign to all
- /// position-dependent lexer actions which do not already have offsets
- /// assigned.
- /// </param>
- /// <returns> A <seealso cref="LexerActionExecutor"/> which stores input
- /// stream offsets for all position-dependent lexer actions. </returns>
- virtual Ref<LexerActionExecutor> fixOffsetBeforeMatch(
- int offset, const Ref<LexerActionExecutor>& this_ref);
-
- /// <summary>
- /// Gets the lexer actions to be executed by this executor. </summary>
- /// <returns> The lexer actions to be executed by this executor. </returns>
- virtual std::vector<Ref<LexerAction>> getLexerActions() const;
-
- /// <summary>
- /// Execute the actions encapsulated by this executor within the context of a
- /// particular <seealso cref="Lexer"/>.
- ///
- /// <para>This method calls <seealso cref="IntStream#seek"/> to set the
- /// position of the
- /// {@code input} <seealso cref="CharStream"/> prior to calling
- /// <seealso cref="LexerAction#execute"/> on a position-dependent action.
- /// Before the method returns, the input position will be restored to the same
- /// position it was in when the method was invoked.</para>
- /// </summary>
- /// <param name="lexer"> The lexer instance. </param>
- /// <param name="input"> The input stream which is the source for the current
- /// token. When this method is called, the current <seealso
- /// cref="IntStream#index"/> for
- /// {@code input} should be the start of the following token, i.e. 1
- /// character past the end of the current token. </param>
- /// <param name="startIndex"> The token start index. This value may be passed
- /// to <seealso cref="IntStream#seek"/> to set the {@code input} position to
- /// the beginning of the token. </param>
- virtual void execute(Lexer* lexer, CharStream* input, size_t startIndex);
-
- virtual size_t hashCode() const;
- virtual bool operator==(const LexerActionExecutor& obj) const;
- virtual bool operator!=(const LexerActionExecutor& obj) const;
-
- private:
- const std::vector<Ref<LexerAction>> _lexerActions;
-
- /// Caches the result of <seealso cref="#hashCode"/> since the hash code is an
- /// element of the performance-critical <seealso
- /// cref="LexerATNConfig#hashCode"/> operation.
- const size_t _hashCode;
-
- size_t generateHashCode() const;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerActionType.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerActionType.h
deleted file mode 100644
index c65d861dfe..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerActionType.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// Represents the serialization type of a <seealso cref="LexerAction"/>.
-///
-/// @author Sam Harwell
-/// @since 4.2
-/// </summary>
-enum class LexerActionType : size_t {
- /// <summary>
- /// The type of a <seealso cref="LexerChannelAction"/> action.
- /// </summary>
- CHANNEL,
- /// <summary>
- /// The type of a <seealso cref="LexerCustomAction"/> action.
- /// </summary>
- CUSTOM,
- /// <summary>
- /// The type of a <seealso cref="LexerModeAction"/> action.
- /// </summary>
- MODE,
- /// <summary>
- /// The type of a <seealso cref="LexerMoreAction"/> action.
- /// </summary>
- MORE,
- /// <summary>
- /// The type of a <seealso cref="LexerPopModeAction"/> action.
- /// </summary>
- POP_MODE,
- /// <summary>
- /// The type of a <seealso cref="LexerPushModeAction"/> action.
- /// </summary>
- PUSH_MODE,
- /// <summary>
- /// The type of a <seealso cref="LexerSkipAction"/> action.
- /// </summary>
- SKIP,
- /// <summary>
- /// The type of a <seealso cref="LexerTypeAction"/> action.
- /// </summary>
- TYPE,
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp
deleted file mode 100644
index cee4c6e9d9..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerChannelAction.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Lexer.h"
-#include "misc/MurmurHash.h"
-
-#include "atn/LexerChannelAction.h"
-
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-
-LexerChannelAction::LexerChannelAction(int channel) : _channel(channel) {}
-
-int LexerChannelAction::getChannel() const { return _channel; }
-
-LexerActionType LexerChannelAction::getActionType() const {
- return LexerActionType::CHANNEL;
-}
-
-bool LexerChannelAction::isPositionDependent() const { return false; }
-
-void LexerChannelAction::execute(Lexer* lexer) { lexer->setChannel(_channel); }
-
-size_t LexerChannelAction::hashCode() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
- hash = MurmurHash::update(hash, _channel);
- return MurmurHash::finish(hash, 2);
-}
-
-bool LexerChannelAction::operator==(const LexerAction& obj) const {
- if (&obj == this) {
- return true;
- }
-
- const LexerChannelAction* action =
- dynamic_cast<const LexerChannelAction*>(&obj);
- if (action == nullptr) {
- return false;
- }
-
- return _channel == action->_channel;
-}
-
-std::string LexerChannelAction::toString() const {
- return "channel(" + std::to_string(_channel) + ")";
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerChannelAction.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerChannelAction.h
deleted file mode 100644
index 2664dbecbc..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerChannelAction.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/LexerAction.h"
-#include "atn/LexerActionType.h"
-
-namespace antlr4 {
-namespace atn {
-
-using antlr4::Lexer;
-
-/// <summary>
-/// Implements the {@code channel} lexer action by calling
-/// <seealso cref="Lexer#setChannel"/> with the assigned channel.
-///
-/// @author Sam Harwell
-/// @since 4.2
-/// </summary>
-class ANTLR4CPP_PUBLIC LexerChannelAction final : public LexerAction {
- public:
- /// <summary>
- /// Constructs a new {@code channel} action with the specified channel value.
- /// </summary> <param name="channel"> The channel value to pass to <seealso
- /// cref="Lexer#setChannel"/>. </param>
- LexerChannelAction(int channel);
-
- /// <summary>
- /// Gets the channel to use for the <seealso cref="Token"/> created by the
- /// lexer.
- /// </summary>
- /// <returns> The channel to use for the <seealso cref="Token"/> created by
- /// the lexer. </returns>
- int getChannel() const;
-
- /// <summary>
- /// {@inheritDoc} </summary>
- /// <returns> This method returns <seealso cref="LexerActionType#CHANNEL"/>.
- /// </returns>
- virtual LexerActionType getActionType() const override;
-
- /// <summary>
- /// {@inheritDoc} </summary>
- /// <returns> This method returns {@code false}. </returns>
- virtual bool isPositionDependent() const override;
-
- /// <summary>
- /// {@inheritDoc}
- ///
- /// <para>This action is implemented by calling <seealso
- /// cref="Lexer#setChannel"/> with the value provided by <seealso
- /// cref="#getChannel"/>.</para>
- /// </summary>
- virtual void execute(Lexer* lexer) override;
-
- virtual size_t hashCode() const override;
- virtual bool operator==(const LexerAction& obj) const override;
- virtual std::string toString() const override;
-
- private:
- const int _channel;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp
deleted file mode 100644
index 4d3789c673..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerCustomAction.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Lexer.h"
-#include "misc/MurmurHash.h"
-#include "support/CPPUtils.h"
-
-#include "atn/LexerCustomAction.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-
-LexerCustomAction::LexerCustomAction(size_t ruleIndex, size_t actionIndex)
- : _ruleIndex(ruleIndex), _actionIndex(actionIndex) {}
-
-size_t LexerCustomAction::getRuleIndex() const { return _ruleIndex; }
-
-size_t LexerCustomAction::getActionIndex() const { return _actionIndex; }
-
-LexerActionType LexerCustomAction::getActionType() const {
- return LexerActionType::CUSTOM;
-}
-
-bool LexerCustomAction::isPositionDependent() const { return true; }
-
-void LexerCustomAction::execute(Lexer* lexer) {
- lexer->action(nullptr, _ruleIndex, _actionIndex);
-}
-
-size_t LexerCustomAction::hashCode() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
- hash = MurmurHash::update(hash, _ruleIndex);
- hash = MurmurHash::update(hash, _actionIndex);
- return MurmurHash::finish(hash, 3);
-}
-
-bool LexerCustomAction::operator==(const LexerAction& obj) const {
- if (&obj == this) {
- return true;
- }
-
- const LexerCustomAction* action =
- dynamic_cast<const LexerCustomAction*>(&obj);
- if (action == nullptr) {
- return false;
- }
-
- return _ruleIndex == action->_ruleIndex &&
- _actionIndex == action->_actionIndex;
-}
-
-std::string LexerCustomAction::toString() const {
- return antlrcpp::toString(this);
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerCustomAction.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerCustomAction.h
deleted file mode 100644
index 770229435a..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerCustomAction.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/LexerAction.h"
-#include "atn/LexerActionType.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// Executes a custom lexer action by calling <seealso
-/// cref="Recognizer#action"/> with the rule and action indexes assigned to the
-/// custom action. The implementation of a custom action is added to the
-/// generated code for the lexer in an override of <seealso
-/// cref="Recognizer#action"/> when the grammar is compiled.
-///
-/// <para>This class may represent embedded actions created with the
-/// <code>{...}</code> syntax in ANTLR 4, as well as actions created for lexer
-/// commands where the command argument could not be evaluated when the grammar
-/// was compiled.</para>
-///
-/// @author Sam Harwell
-/// @since 4.2
-/// </summary>
-class ANTLR4CPP_PUBLIC LexerCustomAction final : public LexerAction {
- public:
- /// <summary>
- /// Constructs a custom lexer action with the specified rule and action
- /// indexes.
- /// </summary>
- /// <param name="ruleIndex"> The rule index to use for calls to
- /// <seealso cref="Recognizer#action"/>. </param>
- /// <param name="actionIndex"> The action index to use for calls to
- /// <seealso cref="Recognizer#action"/>. </param>
- LexerCustomAction(size_t ruleIndex, size_t actionIndex);
-
- /// <summary>
- /// Gets the rule index to use for calls to <seealso
- /// cref="Recognizer#action"/>.
- /// </summary>
- /// <returns> The rule index for the custom action. </returns>
- size_t getRuleIndex() const;
-
- /// <summary>
- /// Gets the action index to use for calls to <seealso
- /// cref="Recognizer#action"/>.
- /// </summary>
- /// <returns> The action index for the custom action. </returns>
- size_t getActionIndex() const;
-
- /// <summary>
- /// {@inheritDoc}
- /// </summary>
- /// <returns> This method returns <seealso cref="LexerActionType#CUSTOM"/>.
- /// </returns>
- virtual LexerActionType getActionType() const override;
-
- /// <summary>
- /// Gets whether the lexer action is position-dependent. Position-dependent
- /// actions may have different semantics depending on the <seealso
- /// cref="CharStream"/> index at the time the action is executed.
- ///
- /// <para>Custom actions are position-dependent since they may represent a
- /// user-defined embedded action which makes calls to methods like
- /// <seealso cref="Lexer#getText"/>.</para>
- /// </summary>
- /// <returns> This method returns {@code true}. </returns>
- virtual bool isPositionDependent() const override;
-
- /// <summary>
- /// {@inheritDoc}
- ///
- /// <para>Custom actions are implemented by calling <seealso
- /// cref="Lexer#action"/> with the appropriate rule and action indexes.</para>
- /// </summary>
- virtual void execute(Lexer* lexer) override;
-
- virtual size_t hashCode() const override;
- virtual bool operator==(const LexerAction& obj) const override;
- virtual std::string toString() const override;
-
- private:
- const size_t _ruleIndex;
- const size_t _actionIndex;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.cpp
deleted file mode 100644
index b3f6e46504..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Lexer.h"
-#include "misc/MurmurHash.h"
-#include "support/CPPUtils.h"
-
-#include "atn/LexerIndexedCustomAction.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-
-LexerIndexedCustomAction::LexerIndexedCustomAction(
- int offset, Ref<LexerAction> const& action)
- : _offset(offset), _action(action) {}
-
-int LexerIndexedCustomAction::getOffset() const { return _offset; }
-
-Ref<LexerAction> LexerIndexedCustomAction::getAction() const { return _action; }
-
-LexerActionType LexerIndexedCustomAction::getActionType() const {
- return _action->getActionType();
-}
-
-bool LexerIndexedCustomAction::isPositionDependent() const { return true; }
-
-void LexerIndexedCustomAction::execute(Lexer* lexer) {
- // assume the input stream position was properly set by the calling code
- _action->execute(lexer);
-}
-
-size_t LexerIndexedCustomAction::hashCode() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, _offset);
- hash = MurmurHash::update(hash, _action);
- return MurmurHash::finish(hash, 2);
-}
-
-bool LexerIndexedCustomAction::operator==(const LexerAction& obj) const {
- if (&obj == this) {
- return true;
- }
-
- const LexerIndexedCustomAction* action =
- dynamic_cast<const LexerIndexedCustomAction*>(&obj);
- if (action == nullptr) {
- return false;
- }
-
- return _offset == action->_offset && *_action == *action->_action;
-}
-
-std::string LexerIndexedCustomAction::toString() const {
- return antlrcpp::toString(this);
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.h
deleted file mode 100644
index e3e7ac5f6a..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerIndexedCustomAction.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "RuleContext.h"
-#include "atn/LexerAction.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// This implementation of <seealso cref="LexerAction"/> is used for tracking
-/// input offsets for position-dependent actions within a <seealso
-/// cref="LexerActionExecutor"/>.
-///
-/// <para>This action is not serialized as part of the ATN, and is only required
-/// for position-dependent lexer actions which appear at a location other than
-/// the end of a rule. For more information about DFA optimizations employed for
-/// lexer actions, see <seealso cref="LexerActionExecutor#append"/> and
-/// <seealso cref="LexerActionExecutor#fixOffsetBeforeMatch"/>.</para>
-///
-/// @author Sam Harwell
-/// @since 4.2
-/// </summary>
-class ANTLR4CPP_PUBLIC LexerIndexedCustomAction final : public LexerAction {
- public:
- /// <summary>
- /// Constructs a new indexed custom action by associating a character offset
- /// with a <seealso cref="LexerAction"/>.
- ///
- /// <para>Note: This class is only required for lexer actions for which
- /// <seealso cref="LexerAction#isPositionDependent"/> returns {@code
- /// true}.</para>
- /// </summary>
- /// <param name="offset"> The offset into the input <seealso
- /// cref="CharStream"/>, relative to the token start index, at which the
- /// specified lexer action should be executed. </param> <param name="action">
- /// The lexer action to execute at a particular offset in the input <seealso
- /// cref="CharStream"/>. </param>
- LexerIndexedCustomAction(int offset, Ref<LexerAction> const& action);
-
- /// <summary>
- /// Gets the location in the input <seealso cref="CharStream"/> at which the
- /// lexer action should be executed. The value is interpreted as an offset
- /// relative to the token start index.
- /// </summary>
- /// <returns> The location in the input <seealso cref="CharStream"/> at which
- /// the lexer action should be executed. </returns>
- int getOffset() const;
-
- /// <summary>
- /// Gets the lexer action to execute.
- /// </summary>
- /// <returns> A <seealso cref="LexerAction"/> object which executes the lexer
- /// action. </returns>
- Ref<LexerAction> getAction() const;
-
- /// <summary>
- /// {@inheritDoc}
- /// </summary>
- /// <returns> This method returns the result of calling <seealso
- /// cref="#getActionType"/> on the <seealso cref="LexerAction"/> returned by
- /// <seealso cref="#getAction"/>. </returns>
- virtual LexerActionType getActionType() const override;
-
- /// <summary>
- /// {@inheritDoc} </summary>
- /// <returns> This method returns {@code true}. </returns>
- virtual bool isPositionDependent() const override;
-
- virtual void execute(Lexer* lexer) override;
- virtual size_t hashCode() const override;
- virtual bool operator==(const LexerAction& obj) const override;
- virtual std::string toString() const override;
-
- private:
- const int _offset;
- const Ref<LexerAction> _action;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp
deleted file mode 100644
index 11e6105972..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerModeAction.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Lexer.h"
-#include "misc/MurmurHash.h"
-
-#include "atn/LexerModeAction.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-
-LexerModeAction::LexerModeAction(int mode) : _mode(mode) {}
-
-int LexerModeAction::getMode() { return _mode; }
-
-LexerActionType LexerModeAction::getActionType() const {
- return LexerActionType::MODE;
-}
-
-bool LexerModeAction::isPositionDependent() const { return false; }
-
-void LexerModeAction::execute(Lexer* lexer) { lexer->setMode(_mode); }
-
-size_t LexerModeAction::hashCode() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
- hash = MurmurHash::update(hash, _mode);
- return MurmurHash::finish(hash, 2);
-}
-
-bool LexerModeAction::operator==(const LexerAction& obj) const {
- if (&obj == this) {
- return true;
- }
-
- const LexerModeAction* action = dynamic_cast<const LexerModeAction*>(&obj);
- if (action == nullptr) {
- return false;
- }
-
- return _mode == action->_mode;
-}
-
-std::string LexerModeAction::toString() const {
- return "mode(" + std::to_string(_mode) + ")";
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerModeAction.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerModeAction.h
deleted file mode 100644
index c46db1e5c4..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerModeAction.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/LexerAction.h"
-#include "atn/LexerActionType.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// Implements the {@code mode} lexer action by calling <seealso
-/// cref="Lexer#mode"/> with the assigned mode.
-///
-/// @author Sam Harwell
-/// @since 4.2
-/// </summary>
-class ANTLR4CPP_PUBLIC LexerModeAction final : public LexerAction {
- public:
- /// <summary>
- /// Constructs a new {@code mode} action with the specified mode value.
- /// </summary> <param name="mode"> The mode value to pass to <seealso
- /// cref="Lexer#mode"/>. </param>
- LexerModeAction(int mode);
-
- /// <summary>
- /// Get the lexer mode this action should transition the lexer to.
- /// </summary>
- /// <returns> The lexer mode for this {@code mode} command. </returns>
- int getMode();
-
- /// <summary>
- /// {@inheritDoc} </summary>
- /// <returns> This method returns <seealso cref="LexerActionType#MODE"/>.
- /// </returns>
- virtual LexerActionType getActionType() const override;
-
- /// <summary>
- /// {@inheritDoc} </summary>
- /// <returns> This method returns {@code false}. </returns>
- virtual bool isPositionDependent() const override;
-
- /// <summary>
- /// {@inheritDoc}
- ///
- /// <para>This action is implemented by calling <seealso cref="Lexer#mode"/>
- /// with the value provided by <seealso cref="#getMode"/>.</para>
- /// </summary>
- virtual void execute(Lexer* lexer) override;
-
- virtual size_t hashCode() const override;
- virtual bool operator==(const LexerAction& obj) const override;
- virtual std::string toString() const override;
-
- private:
- const int _mode;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp
deleted file mode 100644
index 6d4e12c87f..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerMoreAction.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Lexer.h"
-#include "misc/MurmurHash.h"
-
-#include "atn/LexerMoreAction.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-
-const Ref<LexerMoreAction> LexerMoreAction::getInstance() {
- static Ref<LexerMoreAction> instance(new LexerMoreAction());
- return instance;
-}
-
-LexerMoreAction::LexerMoreAction() {}
-
-LexerActionType LexerMoreAction::getActionType() const {
- return LexerActionType::MORE;
-}
-
-bool LexerMoreAction::isPositionDependent() const { return false; }
-
-void LexerMoreAction::execute(Lexer* lexer) { lexer->more(); }
-
-size_t LexerMoreAction::hashCode() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
- return MurmurHash::finish(hash, 1);
-}
-
-bool LexerMoreAction::operator==(const LexerAction& obj) const {
- return &obj == this;
-}
-
-std::string LexerMoreAction::toString() const { return "more"; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerMoreAction.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerMoreAction.h
deleted file mode 100644
index e47f46a03f..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerMoreAction.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/LexerAction.h"
-#include "atn/LexerActionType.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// Implements the {@code more} lexer action by calling <seealso
-/// cref="Lexer#more"/>.
-///
-/// <para>The {@code more} command does not have any parameters, so this action
-/// is implemented as a singleton instance exposed by <seealso
-/// cref="#INSTANCE"/>.</para>
-///
-/// @author Sam Harwell
-/// @since 4.2
-/// </summary>
-class ANTLR4CPP_PUBLIC LexerMoreAction final : public LexerAction {
- public:
- /// <summary>
- /// Provides a singleton instance of this parameterless lexer action.
- /// </summary>
- static const Ref<LexerMoreAction> getInstance();
-
- /// <summary>
- /// {@inheritDoc} </summary>
- /// <returns> This method returns <seealso cref="LexerActionType#MORE"/>.
- /// </returns>
- virtual LexerActionType getActionType() const override;
-
- /// <summary>
- /// {@inheritDoc} </summary>
- /// <returns> This method returns {@code false}. </returns>
- virtual bool isPositionDependent() const override;
-
- /// <summary>
- /// {@inheritDoc}
- ///
- /// <para>This action is implemented by calling <seealso
- /// cref="Lexer#more"/>.</para>
- /// </summary>
- virtual void execute(Lexer* lexer) override;
-
- virtual size_t hashCode() const override;
- virtual bool operator==(const LexerAction& obj) const override;
- virtual std::string toString() const override;
-
- private:
- /// Constructs the singleton instance of the lexer {@code more} command.
- LexerMoreAction();
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp
deleted file mode 100644
index 00db8acec3..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerPopModeAction.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Lexer.h"
-#include "misc/MurmurHash.h"
-
-#include "atn/LexerPopModeAction.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-
-const Ref<LexerPopModeAction> LexerPopModeAction::getInstance() {
- static Ref<LexerPopModeAction> instance(new LexerPopModeAction());
- return instance;
-}
-
-LexerPopModeAction::LexerPopModeAction() {}
-
-LexerActionType LexerPopModeAction::getActionType() const {
- return LexerActionType::POP_MODE;
-}
-
-bool LexerPopModeAction::isPositionDependent() const { return false; }
-
-void LexerPopModeAction::execute(Lexer* lexer) { lexer->popMode(); }
-
-size_t LexerPopModeAction::hashCode() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
- return MurmurHash::finish(hash, 1);
-}
-
-bool LexerPopModeAction::operator==(const LexerAction& obj) const {
- return &obj == this;
-}
-
-std::string LexerPopModeAction::toString() const { return "popMode"; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerPopModeAction.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerPopModeAction.h
deleted file mode 100644
index 9389f11930..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerPopModeAction.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/LexerAction.h"
-#include "atn/LexerActionType.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// Implements the {@code popMode} lexer action by calling <seealso
-/// cref="Lexer#popMode"/>.
-///
-/// <para>The {@code popMode} command does not have any parameters, so this
-/// action is implemented as a singleton instance exposed by <seealso
-/// cref="#INSTANCE"/>.</para>
-///
-/// @author Sam Harwell
-/// @since 4.2
-/// </summary>
-class ANTLR4CPP_PUBLIC LexerPopModeAction final : public LexerAction {
- public:
- /// <summary>
- /// Provides a singleton instance of this parameterless lexer action.
- /// </summary>
- static const Ref<LexerPopModeAction> getInstance();
-
- /// <summary>
- /// {@inheritDoc} </summary>
- /// <returns> This method returns <seealso cref="LexerActionType#POP_MODE"/>.
- /// </returns>
- virtual LexerActionType getActionType() const override;
-
- /// <summary>
- /// {@inheritDoc} </summary>
- /// <returns> This method returns {@code false}. </returns>
- virtual bool isPositionDependent() const override;
-
- /// <summary>
- /// {@inheritDoc}
- ///
- /// <para>This action is implemented by calling <seealso
- /// cref="Lexer#popMode"/>.</para>
- /// </summary>
- virtual void execute(Lexer* lexer) override;
-
- virtual size_t hashCode() const override;
- virtual bool operator==(const LexerAction& obj) const override;
- virtual std::string toString() const override;
-
- private:
- /// Constructs the singleton instance of the lexer {@code popMode} command.
- LexerPopModeAction();
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp
deleted file mode 100644
index 1183fecb09..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerPushModeAction.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Lexer.h"
-#include "misc/MurmurHash.h"
-
-#include "atn/LexerPushModeAction.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-
-LexerPushModeAction::LexerPushModeAction(int mode) : _mode(mode) {}
-
-int LexerPushModeAction::getMode() const { return _mode; }
-
-LexerActionType LexerPushModeAction::getActionType() const {
- return LexerActionType::PUSH_MODE;
-}
-
-bool LexerPushModeAction::isPositionDependent() const { return false; }
-
-void LexerPushModeAction::execute(Lexer* lexer) { lexer->pushMode(_mode); }
-
-size_t LexerPushModeAction::hashCode() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
- hash = MurmurHash::update(hash, _mode);
- return MurmurHash::finish(hash, 2);
-}
-
-bool LexerPushModeAction::operator==(const LexerAction& obj) const {
- if (&obj == this) {
- return true;
- }
-
- const LexerPushModeAction* action =
- dynamic_cast<const LexerPushModeAction*>(&obj);
- if (action == nullptr) {
- return false;
- }
-
- return _mode == action->_mode;
-}
-
-std::string LexerPushModeAction::toString() const {
- return "pushMode(" + std::to_string(_mode) + ")";
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerPushModeAction.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerPushModeAction.h
deleted file mode 100644
index 46ddf03d16..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerPushModeAction.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/LexerAction.h"
-#include "atn/LexerActionType.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// Implements the {@code pushMode} lexer action by calling
-/// <seealso cref="Lexer#pushMode"/> with the assigned mode.
-///
-/// @author Sam Harwell
-/// @since 4.2
-/// </summary>
-class ANTLR4CPP_PUBLIC LexerPushModeAction final : public LexerAction {
- public:
- /// <summary>
- /// Constructs a new {@code pushMode} action with the specified mode value.
- /// </summary> <param name="mode"> The mode value to pass to <seealso
- /// cref="Lexer#pushMode"/>. </param>
- LexerPushModeAction(int mode);
-
- /// <summary>
- /// Get the lexer mode this action should transition the lexer to.
- /// </summary>
- /// <returns> The lexer mode for this {@code pushMode} command. </returns>
- int getMode() const;
-
- /// <summary>
- /// {@inheritDoc} </summary>
- /// <returns> This method returns <seealso cref="LexerActionType#PUSH_MODE"/>.
- /// </returns>
- virtual LexerActionType getActionType() const override;
-
- /// <summary>
- /// {@inheritDoc} </summary>
- /// <returns> This method returns {@code false}. </returns>
- virtual bool isPositionDependent() const override;
-
- /// <summary>
- /// {@inheritDoc}
- ///
- /// <para>This action is implemented by calling <seealso
- /// cref="Lexer#pushMode"/> with the value provided by <seealso
- /// cref="#getMode"/>.</para>
- /// </summary>
- virtual void execute(Lexer* lexer) override;
-
- virtual size_t hashCode() const override;
- virtual bool operator==(const LexerAction& obj) const override;
- virtual std::string toString() const override;
-
- private:
- const int _mode;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp
deleted file mode 100644
index e541e2a01a..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerSkipAction.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Lexer.h"
-#include "misc/MurmurHash.h"
-
-#include "atn/LexerSkipAction.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-
-const Ref<LexerSkipAction> LexerSkipAction::getInstance() {
- static Ref<LexerSkipAction> instance(new LexerSkipAction());
- return instance;
-}
-
-LexerSkipAction::LexerSkipAction() {}
-
-LexerActionType LexerSkipAction::getActionType() const {
- return LexerActionType::SKIP;
-}
-
-bool LexerSkipAction::isPositionDependent() const { return false; }
-
-void LexerSkipAction::execute(Lexer* lexer) { lexer->skip(); }
-
-size_t LexerSkipAction::hashCode() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
- return MurmurHash::finish(hash, 1);
-}
-
-bool LexerSkipAction::operator==(const LexerAction& obj) const {
- return &obj == this;
-}
-
-std::string LexerSkipAction::toString() const { return "skip"; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerSkipAction.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerSkipAction.h
deleted file mode 100644
index 3cfe0e6444..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerSkipAction.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/LexerAction.h"
-#include "atn/LexerActionType.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// Implements the {@code skip} lexer action by calling <seealso
-/// cref="Lexer#skip"/>.
-///
-/// <para>The {@code skip} command does not have any parameters, so this action
-/// is implemented as a singleton instance exposed by <seealso
-/// cref="#INSTANCE"/>.</para>
-///
-/// @author Sam Harwell
-/// @since 4.2
-/// </summary>
-class ANTLR4CPP_PUBLIC LexerSkipAction final : public LexerAction {
- public:
- /// Provides a singleton instance of this parameterless lexer action.
- static const Ref<LexerSkipAction> getInstance();
-
- /// <summary>
- /// {@inheritDoc} </summary>
- /// <returns> This method returns <seealso cref="LexerActionType#SKIP"/>.
- /// </returns>
- virtual LexerActionType getActionType() const override;
-
- /// <summary>
- /// {@inheritDoc} </summary>
- /// <returns> This method returns {@code false}. </returns>
- virtual bool isPositionDependent() const override;
-
- /// <summary>
- /// {@inheritDoc}
- ///
- /// <para>This action is implemented by calling <seealso
- /// cref="Lexer#skip"/>.</para>
- /// </summary>
- virtual void execute(Lexer* lexer) override;
-
- virtual size_t hashCode() const override;
- virtual bool operator==(const LexerAction& obj) const override;
- virtual std::string toString() const override;
-
- private:
- /// Constructs the singleton instance of the lexer {@code skip} command.
- LexerSkipAction();
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp
deleted file mode 100644
index 286715c89f..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerTypeAction.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Lexer.h"
-#include "misc/MurmurHash.h"
-
-#include "atn/LexerTypeAction.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-
-LexerTypeAction::LexerTypeAction(int type) : _type(type) {}
-
-int LexerTypeAction::getType() const { return _type; }
-
-LexerActionType LexerTypeAction::getActionType() const {
- return LexerActionType::TYPE;
-}
-
-bool LexerTypeAction::isPositionDependent() const { return false; }
-
-void LexerTypeAction::execute(Lexer* lexer) { lexer->setType(_type); }
-
-size_t LexerTypeAction::hashCode() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
- hash = MurmurHash::update(hash, _type);
- return MurmurHash::finish(hash, 2);
-}
-
-bool LexerTypeAction::operator==(const LexerAction& obj) const {
- if (&obj == this) {
- return true;
- }
-
- const LexerTypeAction* action = dynamic_cast<const LexerTypeAction*>(&obj);
- if (action == nullptr) {
- return false;
- }
-
- return _type == action->_type;
-}
-
-std::string LexerTypeAction::toString() const {
- return "type(" + std::to_string(_type) + ")";
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerTypeAction.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerTypeAction.h
deleted file mode 100644
index 3c018ef23f..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LexerTypeAction.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/LexerAction.h"
-#include "atn/LexerActionType.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// Implements the {@code type} lexer action by calling <seealso
-/// cref="Lexer#setType"/> with the assigned type.
-class ANTLR4CPP_PUBLIC LexerTypeAction : public LexerAction {
- public:
- /// <summary>
- /// Constructs a new {@code type} action with the specified token type value.
- /// </summary> <param name="type"> The type to assign to the token using
- /// <seealso cref="Lexer#setType"/>. </param>
- LexerTypeAction(int type);
-
- /// <summary>
- /// Gets the type to assign to a token created by the lexer. </summary>
- /// <returns> The type to assign to a token created by the lexer. </returns>
- virtual int getType() const;
-
- /// <summary>
- /// {@inheritDoc} </summary>
- /// <returns> This method returns <seealso cref="LexerActionType#TYPE"/>.
- /// </returns>
- virtual LexerActionType getActionType() const override;
-
- /// <summary>
- /// {@inheritDoc} </summary>
- /// <returns> This method returns {@code false}. </returns>
- virtual bool isPositionDependent() const override;
-
- /// <summary>
- /// {@inheritDoc}
- ///
- /// <para>This action is implemented by calling <seealso
- /// cref="Lexer#setType"/> with the value provided by <seealso
- /// cref="#getType"/>.</para>
- /// </summary>
- virtual void execute(Lexer* lexer) override;
-
- virtual size_t hashCode() const override;
- virtual bool operator==(const LexerAction& obj) const override;
- virtual std::string toString() const override;
-
- private:
- const int _type;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LookaheadEventInfo.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LookaheadEventInfo.cpp
deleted file mode 100644
index 861a66c307..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LookaheadEventInfo.cpp
+++ /dev/null
@@ -1,18 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/LookaheadEventInfo.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-LookaheadEventInfo::LookaheadEventInfo(size_t decision, ATNConfigSet* configs,
- size_t predictedAlt, TokenStream* input,
- size_t startIndex, size_t stopIndex,
- bool fullCtx)
- : DecisionEventInfo(decision, configs, input, startIndex, stopIndex,
- fullCtx) {
- this->predictedAlt = predictedAlt;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LookaheadEventInfo.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LookaheadEventInfo.h
deleted file mode 100644
index f1dd24794d..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LookaheadEventInfo.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionEventInfo.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// This class represents profiling event information for tracking the lookahead
-/// depth required in order to make a prediction.
-class ANTLR4CPP_PUBLIC LookaheadEventInfo : public DecisionEventInfo {
- public:
- /// The alternative chosen by adaptivePredict(), not necessarily
- /// the outermost alt shown for a rule; left-recursive rules have
- /// user-level alts that differ from the rewritten rule with a (...) block
- /// and a (..)* loop.
- size_t predictedAlt = 0;
-
- /// <summary>
- /// Constructs a new instance of the <seealso cref="LookaheadEventInfo"/>
- /// class with the specified detailed lookahead information.
- /// </summary>
- /// <param name="decision"> The decision number </param>
- /// <param name="configs"> The final configuration set containing the
- /// necessary information to determine the result of a prediction, or {@code
- /// null} if the final configuration set is not available </param> <param
- /// name="input"> The input token stream </param> <param name="startIndex">
- /// The start index for the current prediction </param> <param
- /// name="stopIndex"> The index at which the prediction was finally made
- /// </param> <param name="fullCtx"> {@code true} if the current lookahead is
- /// part of an LL prediction; otherwise, {@code false} if the current
- /// lookahead is part of an SLL prediction </param>
- LookaheadEventInfo(size_t decision, ATNConfigSet* configs,
- size_t predictedAlt, TokenStream* input, size_t startIndex,
- size_t stopIndex, bool fullCtx);
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LoopEndState.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LoopEndState.cpp
deleted file mode 100644
index 59fa8f05f1..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LoopEndState.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/LoopEndState.h"
-
-using namespace antlr4::atn;
-
-size_t LoopEndState::getStateType() { return LOOP_END; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LoopEndState.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LoopEndState.h
deleted file mode 100644
index d3f6a53e6a..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/LoopEndState.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNState.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// Mark the end of a * or + loop.
-class ANTLR4CPP_PUBLIC LoopEndState final : public ATNState {
- public:
- ATNState* loopBackState = nullptr;
-
- virtual size_t getStateType() override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/Makefile b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/Makefile
deleted file mode 100644
index 480bd85929..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/Makefile
+++ /dev/null
@@ -1,67 +0,0 @@
-
-CXXFLAGS += -g -std=c++0x -Wall #-Wextra
-CXXFLAGS += -I. -I../ -I../misc/ -I../tree/ -I../dfa/ \
- -I../../../../../antlrcpp/
-
-#TODO LDFLAGS += ?
-
-ALL_CXXFLAGS = $(CPPFLAGS) $(CXXFLAGS)
-ALL_LDFLAGS = $(LDFLAGS)
-
-# Escote's files
-SRCS = \
- AbstractPredicateTransition.cpp \
- ActionTransition.cpp \
- ArrayPredictionContext.cpp \
- ATNDeserializationOptions.cpp \
- ATNDeserializer.cpp \
- ATNState.cpp \
- ATNType.cpp \
- AtomTransition.cpp \
- BasicBlockStartState.cpp \
- BasicState.cpp \
- BlockEndState.cpp \
- BlockStartState.cpp \
- DecisionState.cpp \
- EmptyPredictionContext.cpp \
- EpsilonTransition.cpp \
- LexerATNConfig.cpp \
- LoopEndState.cpp
-# Escote's TODO: LL1Analyzer.cpp LexerATNSimulator.cpp ATNSimulator.cpp \
- ATNSerializer.cpp ATNConfigSet.cpp ATNConfig.cpp \
- ATN.cpp
-
-# Alejandro's files
-SRCS += \
- NotSetTransition.cpp \
- OrderedATNConfigSet.cpp \
- PlusBlockStartState.cpp \
- PlusLoopbackState.cpp \
- PredicateTransition.cpp \
- PredictionMode.cpp \
- RangeTransition.cpp \
- RuleStartState.cpp \
- RuleStopState.cpp \
- RuleTransition.cpp \
- SemanticContext.cpp \
- SetTransition.cpp \
- SingletonPredictionContext.cpp \
- StarBlockStartState.cpp \
- StarLoopbackState.cpp \
- StarLoopEntryState.cpp \
- TokensStartState.cpp \
- Transition.cpp \
- WildcardTransition.cpp
-# Alejandro's TODO: PredictionContext.cpp PredictionContextCache.cpp \
- PrecedencePredicateTransition.cpp ParserATNSimulator.cpp
-
-OBJS = $(SRCS:.cpp=.o)
-
-all: $(OBJS)
-
-%.o: %.cpp
- $(CXX) -c $(ALL_CXXFLAGS) $< -o $@
-
-clean:
- $(RM) $(OBJS)
-
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/NotSetTransition.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/NotSetTransition.cpp
deleted file mode 100644
index ce334a672a..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/NotSetTransition.cpp
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/NotSetTransition.h"
-#include "atn/ATNState.h"
-#include "misc/IntervalSet.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-NotSetTransition::NotSetTransition(ATNState* target,
- const misc::IntervalSet& set)
- : SetTransition(target, set) {}
-
-Transition::SerializationType NotSetTransition::getSerializationType() const {
- return NOT_SET;
-}
-
-bool NotSetTransition::matches(size_t symbol, size_t minVocabSymbol,
- size_t maxVocabSymbol) const {
- return symbol >= minVocabSymbol && symbol <= maxVocabSymbol &&
- !SetTransition::matches(symbol, minVocabSymbol, maxVocabSymbol);
-}
-
-std::string NotSetTransition::toString() const {
- return "NOT_SET " + Transition::toString() + " { " +
- SetTransition::toString() + " }";
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/NotSetTransition.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/NotSetTransition.h
deleted file mode 100644
index 6e2f3e1dc7..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/NotSetTransition.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/SetTransition.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC NotSetTransition final : public SetTransition {
- public:
- NotSetTransition(ATNState* target, const misc::IntervalSet& set);
-
- virtual SerializationType getSerializationType() const override;
-
- virtual bool matches(size_t symbol, size_t minVocabSymbol,
- size_t maxVocabSymbol) const override;
-
- virtual std::string toString() const override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.cpp
deleted file mode 100644
index 19a200e978..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/OrderedATNConfigSet.h"
-
-using namespace antlr4::atn;
-
-size_t OrderedATNConfigSet::getHash(ATNConfig* c) { return c->hashCode(); }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.h
deleted file mode 100644
index 86bcdd33cd..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/OrderedATNConfigSet.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNConfig.h"
-#include "atn/ATNConfigSet.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC OrderedATNConfigSet : public ATNConfigSet {
- protected:
- virtual size_t getHash(ATNConfig* c) override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ParseInfo.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ParseInfo.cpp
deleted file mode 100644
index b39bab2e10..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ParseInfo.cpp
+++ /dev/null
@@ -1,101 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ProfilingATNSimulator.h"
-#include "dfa/DFA.h"
-
-#include "atn/ParseInfo.h"
-
-using namespace antlr4::atn;
-
-ParseInfo::ParseInfo(ProfilingATNSimulator* atnSimulator)
- : _atnSimulator(atnSimulator) {}
-
-ParseInfo::~ParseInfo() {}
-
-std::vector<DecisionInfo> ParseInfo::getDecisionInfo() {
- return _atnSimulator->getDecisionInfo();
-}
-
-std::vector<size_t> ParseInfo::getLLDecisions() {
- std::vector<DecisionInfo> decisions = _atnSimulator->getDecisionInfo();
- std::vector<size_t> LL;
- for (size_t i = 0; i < decisions.size(); ++i) {
- long long fallBack = decisions[i].LL_Fallback;
- if (fallBack > 0) {
- LL.push_back(i);
- }
- }
- return LL;
-}
-
-long long ParseInfo::getTotalTimeInPrediction() {
- std::vector<DecisionInfo> decisions = _atnSimulator->getDecisionInfo();
- long long t = 0;
- for (size_t i = 0; i < decisions.size(); ++i) {
- t += decisions[i].timeInPrediction;
- }
- return t;
-}
-
-long long ParseInfo::getTotalSLLLookaheadOps() {
- std::vector<DecisionInfo> decisions = _atnSimulator->getDecisionInfo();
- long long k = 0;
- for (size_t i = 0; i < decisions.size(); ++i) {
- k += decisions[i].SLL_TotalLook;
- }
- return k;
-}
-
-long long ParseInfo::getTotalLLLookaheadOps() {
- std::vector<DecisionInfo> decisions = _atnSimulator->getDecisionInfo();
- long long k = 0;
- for (size_t i = 0; i < decisions.size(); i++) {
- k += decisions[i].LL_TotalLook;
- }
- return k;
-}
-
-long long ParseInfo::getTotalSLLATNLookaheadOps() {
- std::vector<DecisionInfo> decisions = _atnSimulator->getDecisionInfo();
- long long k = 0;
- for (size_t i = 0; i < decisions.size(); ++i) {
- k += decisions[i].SLL_ATNTransitions;
- }
- return k;
-}
-
-long long ParseInfo::getTotalLLATNLookaheadOps() {
- std::vector<DecisionInfo> decisions = _atnSimulator->getDecisionInfo();
- long long k = 0;
- for (size_t i = 0; i < decisions.size(); ++i) {
- k += decisions[i].LL_ATNTransitions;
- }
- return k;
-}
-
-long long ParseInfo::getTotalATNLookaheadOps() {
- std::vector<DecisionInfo> decisions = _atnSimulator->getDecisionInfo();
- long long k = 0;
- for (size_t i = 0; i < decisions.size(); ++i) {
- k += decisions[i].SLL_ATNTransitions;
- k += decisions[i].LL_ATNTransitions;
- }
- return k;
-}
-
-size_t ParseInfo::getDFASize() {
- size_t n = 0;
- std::vector<dfa::DFA>& decisionToDFA = _atnSimulator->decisionToDFA;
- for (size_t i = 0; i < decisionToDFA.size(); ++i) {
- n += getDFASize(i);
- }
- return n;
-}
-
-size_t ParseInfo::getDFASize(size_t decision) {
- dfa::DFA& decisionToDFA = _atnSimulator->decisionToDFA[decision];
- return decisionToDFA.states.size();
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ParseInfo.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ParseInfo.h
deleted file mode 100644
index 6ba2b4a075..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ParseInfo.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionInfo.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ProfilingATNSimulator;
-
-/// This class provides access to specific and aggregate statistics gathered
-/// during profiling of a parser.
-class ANTLR4CPP_PUBLIC ParseInfo {
- public:
- ParseInfo(ProfilingATNSimulator* atnSimulator);
- ParseInfo(ParseInfo const&) = default;
- virtual ~ParseInfo();
-
- ParseInfo& operator=(ParseInfo const&) = default;
-
- /// <summary>
- /// Gets an array of <seealso cref="DecisionInfo"/> instances containing the
- /// profiling information gathered for each decision in the ATN.
- /// </summary>
- /// <returns> An array of <seealso cref="DecisionInfo"/> instances, indexed by
- /// decision number. </returns>
- virtual std::vector<DecisionInfo> getDecisionInfo();
-
- /// <summary>
- /// Gets the decision numbers for decisions that required one or more
- /// full-context predictions during parsing. These are decisions for which
- /// <seealso cref="DecisionInfo#LL_Fallback"/> is non-zero.
- /// </summary>
- /// <returns> A list of decision numbers which required one or more
- /// full-context predictions during parsing. </returns>
- virtual std::vector<size_t> getLLDecisions();
-
- /// <summary>
- /// Gets the total time spent during prediction across all decisions made
- /// during parsing. This value is the sum of
- /// <seealso cref="DecisionInfo#timeInPrediction"/> for all decisions.
- /// </summary>
- virtual long long getTotalTimeInPrediction();
-
- /// <summary>
- /// Gets the total number of SLL lookahead operations across all decisions
- /// made during parsing. This value is the sum of
- /// <seealso cref="DecisionInfo#SLL_TotalLook"/> for all decisions.
- /// </summary>
- virtual long long getTotalSLLLookaheadOps();
-
- /// <summary>
- /// Gets the total number of LL lookahead operations across all decisions
- /// made during parsing. This value is the sum of
- /// <seealso cref="DecisionInfo#LL_TotalLook"/> for all decisions.
- /// </summary>
- virtual long long getTotalLLLookaheadOps();
-
- /// <summary>
- /// Gets the total number of ATN lookahead operations for SLL prediction
- /// across all decisions made during parsing.
- /// </summary>
- virtual long long getTotalSLLATNLookaheadOps();
-
- /// <summary>
- /// Gets the total number of ATN lookahead operations for LL prediction
- /// across all decisions made during parsing.
- /// </summary>
- virtual long long getTotalLLATNLookaheadOps();
-
- /// <summary>
- /// Gets the total number of ATN lookahead operations for SLL and LL
- /// prediction across all decisions made during parsing.
- ///
- /// <para>
- /// This value is the sum of <seealso cref="#getTotalSLLATNLookaheadOps"/> and
- /// <seealso cref="#getTotalLLATNLookaheadOps"/>.</para>
- /// </summary>
- virtual long long getTotalATNLookaheadOps();
-
- /// <summary>
- /// Gets the total number of DFA states stored in the DFA cache for all
- /// decisions in the ATN.
- /// </summary>
- virtual size_t getDFASize();
-
- /// <summary>
- /// Gets the total number of DFA states stored in the DFA cache for a
- /// particular decision.
- /// </summary>
- virtual size_t getDFASize(size_t decision);
-
- protected:
- const ProfilingATNSimulator*
- _atnSimulator; // non-owning, we are created by this simulator.
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp
deleted file mode 100644
index aac9affe2f..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ParserATNSimulator.cpp
+++ /dev/null
@@ -1,1508 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "CommonTokenStream.h"
-#include "NoViableAltException.h"
-#include "Parser.h"
-#include "ParserRuleContext.h"
-#include "atn/ATNConfig.h"
-#include "atn/ATNConfigSet.h"
-#include "atn/ActionTransition.h"
-#include "atn/AtomTransition.h"
-#include "atn/DecisionState.h"
-#include "atn/EmptyPredictionContext.h"
-#include "atn/EpsilonTransition.h"
-#include "atn/NotSetTransition.h"
-#include "atn/PrecedencePredicateTransition.h"
-#include "atn/PredicateTransition.h"
-#include "atn/RuleStopState.h"
-#include "atn/RuleTransition.h"
-#include "dfa/DFA.h"
-#include "misc/IntervalSet.h"
-
-#include "atn/BlockEndState.h"
-#include "atn/BlockStartState.h"
-#include "atn/StarLoopEntryState.h"
-
-#include "ANTLRErrorListener.h"
-#include "misc/Interval.h"
-
-#include "Vocabulary.h"
-#include "support/Arrays.h"
-
-#include "atn/ParserATNSimulator.h"
-
-#define DEBUG_ATN 0
-#define DEBUG_LIST_ATN_DECISIONS 0
-#define DEBUG_DFA 0
-#define RETRY_DEBUG 0
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-using namespace antlrcpp;
-
-const bool ParserATNSimulator::TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT =
- ParserATNSimulator::getLrLoopSetting();
-
-ParserATNSimulator::ParserATNSimulator(
- const ATN& atn, std::vector<dfa::DFA>& decisionToDFA,
- PredictionContextCache& sharedContextCache)
- : ParserATNSimulator(nullptr, atn, decisionToDFA, sharedContextCache) {}
-
-ParserATNSimulator::ParserATNSimulator(
- Parser* parser, const ATN& atn, std::vector<dfa::DFA>& decisionToDFA,
- PredictionContextCache& sharedContextCache)
- : ATNSimulator(atn, sharedContextCache),
- decisionToDFA(decisionToDFA),
- parser(parser) {
- InitializeInstanceFields();
-}
-
-void ParserATNSimulator::reset() {}
-
-void ParserATNSimulator::clearDFA() {
- int size = (int)decisionToDFA.size();
- decisionToDFA.clear();
- for (int d = 0; d < size; ++d) {
- decisionToDFA.push_back(dfa::DFA(atn.getDecisionState(d), d));
- }
-}
-
-size_t ParserATNSimulator::adaptivePredict(TokenStream* input, size_t decision,
- ParserRuleContext* outerContext) {
-#if DEBUG_ATN == 1 || DEBUG_LIST_ATN_DECISIONS == 1
- std::cout << "adaptivePredict decision " << decision
- << " exec LA(1)==" << getLookaheadName(input) << " line "
- << input->LT(1)->getLine() << ":"
- << input->LT(1)->getCharPositionInLine() << std::endl;
-#endif
-
- _input = input;
- _startIndex = input->index();
- _outerContext = outerContext;
- dfa::DFA& dfa = decisionToDFA[decision];
- _dfa = &dfa;
-
- ssize_t m = input->mark();
- size_t index = _startIndex;
-
- // Now we are certain to have a specific decision's DFA
- // But, do we still need an initial state?
- auto onExit = finally([this, input, index, m] {
- mergeCache.clear(); // wack cache after each prediction
- _dfa = nullptr;
- input->seek(index);
- input->release(m);
- });
-
- dfa::DFAState* s0;
- if (dfa.isPrecedenceDfa()) {
- // the start state for a precedence DFA depends on the current
- // parser precedence, and is provided by a DFA method.
- s0 = dfa.getPrecedenceStartState(parser->getPrecedence());
- } else {
- // the start state for a "regular" DFA is just s0
- s0 = dfa.s0;
- }
-
- if (s0 == nullptr) {
- bool fullCtx = false;
- std::unique_ptr<ATNConfigSet> s0_closure =
- computeStartState(dynamic_cast<ATNState*>(dfa.atnStartState),
- &ParserRuleContext::EMPTY, fullCtx);
-
- _stateLock.writeLock();
- if (dfa.isPrecedenceDfa()) {
- /* If this is a precedence DFA, we use applyPrecedenceFilter
- * to convert the computed start state to a precedence start
- * state. We then use DFA.setPrecedenceStartState to set the
- * appropriate start state for the precedence level rather
- * than simply setting DFA.s0.
- */
- dfa.s0->configs = std::move(s0_closure); // not used for prediction but
- // useful to know start configs
- // anyway
- dfa::DFAState* newState = new dfa::DFAState(applyPrecedenceFilter(
- dfa.s0->configs
- .get())); /* mem-check: managed by the DFA or deleted below */
- s0 = addDFAState(dfa, newState);
- dfa.setPrecedenceStartState(parser->getPrecedence(), s0, _edgeLock);
- if (s0 != newState) {
- delete newState; // If there was already a state with this config set
- // we don't need the new one.
- }
- } else {
- dfa::DFAState* newState = new dfa::DFAState(std::move(
- s0_closure)); /* mem-check: managed by the DFA or deleted below */
- s0 = addDFAState(dfa, newState);
-
- if (dfa.s0 != s0) {
- delete dfa.s0; // Delete existing s0 DFA state, if there's any.
- dfa.s0 = s0;
- }
- if (s0 != newState) {
- delete newState; // If there was already a state with this config set
- // we don't need the new one.
- }
- }
- _stateLock.writeUnlock();
- }
-
- // We can start with an existing DFA.
- size_t alt = execATN(
- dfa, s0, input, index,
- outerContext != nullptr ? outerContext : &ParserRuleContext::EMPTY);
-
- return alt;
-}
-
-size_t ParserATNSimulator::execATN(dfa::DFA& dfa, dfa::DFAState* s0,
- TokenStream* input, size_t startIndex,
- ParserRuleContext* outerContext) {
-#if DEBUG_ATN == 1 || DEBUG_LIST_ATN_DECISIONS == 1
- std::cout << "execATN decision " << dfa.decision
- << " exec LA(1)==" << getLookaheadName(input) << " line "
- << input->LT(1)->getLine() << ":"
- << input->LT(1)->getCharPositionInLine() << std::endl;
-#endif
-
- dfa::DFAState* previousD = s0;
-
-#if DEBUG_ATN == 1
- std::cout << "s0 = " << s0 << std::endl;
-#endif
-
- size_t t = input->LA(1);
-
- while (true) { // while more work
- dfa::DFAState* D = getExistingTargetState(previousD, t);
- if (D == nullptr) {
- D = computeTargetState(dfa, previousD, t);
- }
-
- if (D == ERROR_STATE.get()) {
- // if any configs in previous dipped into outer context, that
- // means that input up to t actually finished entry rule
- // at least for SLL decision. Full LL doesn't dip into outer
- // so don't need special case.
- // We will get an error no matter what so delay until after
- // decision; better error message. Also, no reachable target
- // ATN states in SLL implies LL will also get nowhere.
- // If conflict in states that dip out, choose min since we
- // will get error no matter what.
- NoViableAltException e = noViableAlt(
- input, outerContext, previousD->configs.get(), startIndex);
- input->seek(startIndex);
- size_t alt = getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(
- previousD->configs.get(), outerContext);
- if (alt != ATN::INVALID_ALT_NUMBER) {
- return alt;
- }
-
- throw e;
- }
-
- if (D->requiresFullContext && _mode != PredictionMode::SLL) {
- // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
- BitSet conflictingAlts;
- if (D->predicates.size() != 0) {
-#if DEBUG_ATN == 1
- std::cout << "DFA state has preds in DFA sim LL failover" << std::endl;
-#endif
-
- size_t conflictIndex = input->index();
- if (conflictIndex != startIndex) {
- input->seek(startIndex);
- }
-
- conflictingAlts =
- evalSemanticContext(D->predicates, outerContext, true);
- if (conflictingAlts.count() == 1) {
-#if DEBUG_ATN == 1
- std::cout << "Full LL avoided" << std::endl;
-#endif
-
- return conflictingAlts.nextSetBit(0);
- }
-
- if (conflictIndex != startIndex) {
- // restore the index so reporting the fallback to full
- // context occurs with the index at the correct spot
- input->seek(conflictIndex);
- }
- }
-
-#if DEBUG_DFA == 1
- std::cout << "ctx sensitive state " << outerContext << " in " << D
- << std::endl;
-#endif
-
- bool fullCtx = true;
- Ref<ATNConfigSet> s0_closure =
- computeStartState(dfa.atnStartState, outerContext, fullCtx);
- reportAttemptingFullContext(dfa, conflictingAlts, D->configs.get(),
- startIndex, input->index());
- size_t alt = execATNWithFullContext(dfa, D, s0_closure.get(), input,
- startIndex, outerContext);
- return alt;
- }
-
- if (D->isAcceptState) {
- if (D->predicates.empty()) {
- return D->prediction;
- }
-
- size_t stopIndex = input->index();
- input->seek(startIndex);
- BitSet alts = evalSemanticContext(D->predicates, outerContext, true);
- switch (alts.count()) {
- case 0:
- throw noViableAlt(input, outerContext, D->configs.get(), startIndex);
-
- case 1:
- return alts.nextSetBit(0);
-
- default:
- // report ambiguity after predicate evaluation to make sure the
- // correct set of ambig alts is reported.
- reportAmbiguity(dfa, D, startIndex, stopIndex, false, alts,
- D->configs.get());
- return alts.nextSetBit(0);
- }
- }
-
- previousD = D;
-
- if (t != Token::EOF) {
- input->consume();
- t = input->LA(1);
- }
- }
-}
-
-dfa::DFAState* ParserATNSimulator::getExistingTargetState(
- dfa::DFAState* previousD, size_t t) {
- dfa::DFAState* retval;
- _edgeLock.readLock();
- auto iterator = previousD->edges.find(t);
- retval = (iterator == previousD->edges.end()) ? nullptr : iterator->second;
- _edgeLock.readUnlock();
- return retval;
-}
-
-dfa::DFAState* ParserATNSimulator::computeTargetState(dfa::DFA& dfa,
- dfa::DFAState* previousD,
- size_t t) {
- std::unique_ptr<ATNConfigSet> reach =
- computeReachSet(previousD->configs.get(), t, false);
- if (reach == nullptr) {
- addDFAEdge(dfa, previousD, t, ERROR_STATE.get());
- return ERROR_STATE.get();
- }
-
- // create new target state; we'll add to DFA after it's complete
- dfa::DFAState* D =
- new dfa::DFAState(std::move(reach)); /* mem-check: managed by the DFA or
- deleted below, "reach" is no
- longer valid now. */
- size_t predictedAlt = getUniqueAlt(D->configs.get());
-
- if (predictedAlt != ATN::INVALID_ALT_NUMBER) {
- // NO CONFLICT, UNIQUELY PREDICTED ALT
- D->isAcceptState = true;
- D->configs->uniqueAlt = predictedAlt;
- D->prediction = predictedAlt;
- } else if (PredictionModeClass::hasSLLConflictTerminatingPrediction(
- _mode, D->configs.get())) {
- // MORE THAN ONE VIABLE ALTERNATIVE
- D->configs->conflictingAlts = getConflictingAlts(D->configs.get());
- D->requiresFullContext = true;
- // in SLL-only mode, we will stop at this state and return the minimum alt
- D->isAcceptState = true;
- D->prediction = D->configs->conflictingAlts.nextSetBit(0);
- }
-
- if (D->isAcceptState && D->configs->hasSemanticContext) {
- predicateDFAState(D, atn.getDecisionState(dfa.decision));
- if (D->predicates.size() != 0) {
- D->prediction = ATN::INVALID_ALT_NUMBER;
- }
- }
-
- // all adds to dfa are done after we've created full D state
- dfa::DFAState* state = addDFAEdge(dfa, previousD, t, D);
- if (state != D) {
- delete D; // If the new state exists already we don't need it and use the
- // existing one instead.
- }
- return state;
-}
-
-void ParserATNSimulator::predicateDFAState(dfa::DFAState* dfaState,
- DecisionState* decisionState) {
- // We need to test all predicates, even in DFA states that
- // uniquely predict alternative.
- size_t nalts = decisionState->transitions.size();
-
- // Update DFA so reach becomes accept state with (predicate,alt)
- // pairs if preds found for conflicting alts
- BitSet altsToCollectPredsFrom =
- getConflictingAltsOrUniqueAlt(dfaState->configs.get());
- std::vector<Ref<SemanticContext>> altToPred = getPredsForAmbigAlts(
- altsToCollectPredsFrom, dfaState->configs.get(), nalts);
- if (!altToPred.empty()) {
- dfaState->predicates =
- getPredicatePredictions(altsToCollectPredsFrom, altToPred);
- dfaState->prediction = ATN::INVALID_ALT_NUMBER; // make sure we use preds
- } else {
- // There are preds in configs but they might go away
- // when OR'd together like {p}? || NONE == NONE. If neither
- // alt has preds, resolve to min alt
- dfaState->prediction = altsToCollectPredsFrom.nextSetBit(0);
- }
-}
-
-size_t ParserATNSimulator::execATNWithFullContext(
- dfa::DFA& dfa, dfa::DFAState* D, ATNConfigSet* s0, TokenStream* input,
- size_t startIndex, ParserRuleContext* outerContext) {
- bool fullCtx = true;
- bool foundExactAmbig = false;
-
- std::unique_ptr<ATNConfigSet> reach;
- ATNConfigSet* previous = s0;
- input->seek(startIndex);
- size_t t = input->LA(1);
- size_t predictedAlt;
-
- while (true) {
- reach = computeReachSet(previous, t, fullCtx);
- if (reach == nullptr) {
- // if any configs in previous dipped into outer context, that
- // means that input up to t actually finished entry rule
- // at least for LL decision. Full LL doesn't dip into outer
- // so don't need special case.
- // We will get an error no matter what so delay until after
- // decision; better error message. Also, no reachable target
- // ATN states in SLL implies LL will also get nowhere.
- // If conflict in states that dip out, choose min since we
- // will get error no matter what.
- NoViableAltException e =
- noViableAlt(input, outerContext, previous, startIndex);
- input->seek(startIndex);
- size_t alt = getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(
- previous, outerContext);
- if (alt != ATN::INVALID_ALT_NUMBER) {
- return alt;
- }
- throw e;
- }
- if (previous != s0) // Don't delete the start set.
- delete previous;
- previous = nullptr;
-
- std::vector<BitSet> altSubSets =
- PredictionModeClass::getConflictingAltSubsets(reach.get());
- reach->uniqueAlt = getUniqueAlt(reach.get());
- // unique prediction?
- if (reach->uniqueAlt != ATN::INVALID_ALT_NUMBER) {
- predictedAlt = reach->uniqueAlt;
- break;
- }
- if (_mode != PredictionMode::LL_EXACT_AMBIG_DETECTION) {
- predictedAlt =
- PredictionModeClass::resolvesToJustOneViableAlt(altSubSets);
- if (predictedAlt != ATN::INVALID_ALT_NUMBER) {
- break;
- }
- } else {
- // In exact ambiguity mode, we never try to terminate early.
- // Just keeps scarfing until we know what the conflict is
- if (PredictionModeClass::allSubsetsConflict(altSubSets) &&
- PredictionModeClass::allSubsetsEqual(altSubSets)) {
- foundExactAmbig = true;
- predictedAlt = PredictionModeClass::getSingleViableAlt(altSubSets);
- break;
- }
- // else there are multiple non-conflicting subsets or
- // we're not sure what the ambiguity is yet.
- // So, keep going.
- }
- previous = reach.release();
-
- if (t != Token::EOF) {
- input->consume();
- t = input->LA(1);
- }
- }
-
- // If the configuration set uniquely predicts an alternative,
- // without conflict, then we know that it's a full LL decision
- // not SLL.
- if (reach->uniqueAlt != ATN::INVALID_ALT_NUMBER) {
- reportContextSensitivity(dfa, predictedAlt, reach.get(), startIndex,
- input->index());
- return predictedAlt;
- }
-
- // We do not check predicates here because we have checked them
- // on-the-fly when doing full context prediction.
-
- /*
- In non-exact ambiguity detection mode, we might actually be able to
- detect an exact ambiguity, but I'm not going to spend the cycles
- needed to check. We only emit ambiguity warnings in exact ambiguity
- mode.
-
- For example, we might know that we have conflicting configurations.
- But, that does not mean that there is no way forward without a
- conflict. It's possible to have nonconflicting alt subsets as in:
-
- LL altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
-
- from
-
- [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
- (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
-
- In this case, (17,1,[5 $]) indicates there is some next sequence that
- would resolve this without conflict to alternative 1. Any other viable
- next sequence, however, is associated with a conflict. We stop
- looking for input because no amount of further lookahead will alter
- the fact that we should predict alternative 1. We just can't say for
- sure that there is an ambiguity without looking further.
- */
- reportAmbiguity(dfa, D, startIndex, input->index(), foundExactAmbig,
- reach->getAlts(), reach.get());
-
- return predictedAlt;
-}
-
-std::unique_ptr<ATNConfigSet> ParserATNSimulator::computeReachSet(
- ATNConfigSet* closure_, size_t t, bool fullCtx) {
- std::unique_ptr<ATNConfigSet> intermediate(new ATNConfigSet(fullCtx));
-
- /* Configurations already in a rule stop state indicate reaching the end
- * of the decision rule (local context) or end of the start rule (full
- * context). Once reached, these configurations are never updated by a
- * closure operation, so they are handled separately for the performance
- * advantage of having a smaller intermediate set when calling closure.
- *
- * For full-context reach operations, separate handling is required to
- * ensure that the alternative matching the longest overall sequence is
- * chosen when multiple such configurations can match the input.
- */
- std::vector<Ref<ATNConfig>> skippedStopStates;
-
- // First figure out where we can reach on input t
- for (auto& c : closure_->configs) {
- if (is<RuleStopState*>(c->state)) {
- assert(c->context->isEmpty());
-
- if (fullCtx || t == Token::EOF) {
- skippedStopStates.push_back(c);
- }
-
- continue;
- }
-
- size_t n = c->state->transitions.size();
- for (size_t ti = 0; ti < n; ti++) { // for each transition
- Transition* trans = c->state->transitions[ti];
- ATNState* target = getReachableTarget(trans, (int)t);
- if (target != nullptr) {
- intermediate->add(std::make_shared<ATNConfig>(c, target), &mergeCache);
- }
- }
- }
-
- // Now figure out where the reach operation can take us...
- std::unique_ptr<ATNConfigSet> reach;
-
- /* This block optimizes the reach operation for intermediate sets which
- * trivially indicate a termination state for the overall
- * adaptivePredict operation.
- *
- * The conditions assume that intermediate
- * contains all configurations relevant to the reach set, but this
- * condition is not true when one or more configurations have been
- * withheld in skippedStopStates, or when the current symbol is EOF.
- */
- if (skippedStopStates.empty() && t != Token::EOF) {
- if (intermediate->size() == 1) {
- // Don't pursue the closure if there is just one state.
- // It can only have one alternative; just add to result
- // Also don't pursue the closure if there is unique alternative
- // among the configurations.
- reach = std::move(intermediate);
- } else if (getUniqueAlt(intermediate.get()) != ATN::INVALID_ALT_NUMBER) {
- // Also don't pursue the closure if there is unique alternative
- // among the configurations.
- reach = std::move(intermediate);
- }
- }
-
- /* If the reach set could not be trivially determined, perform a closure
- * operation on the intermediate set to compute its initial value.
- */
- if (reach == nullptr) {
- reach.reset(new ATNConfigSet(fullCtx));
- ATNConfig::Set closureBusy;
-
- bool treatEofAsEpsilon = t == Token::EOF;
- for (auto c : intermediate->configs) {
- closure(c, reach.get(), closureBusy, false, fullCtx, treatEofAsEpsilon);
- }
- }
-
- if (t == IntStream::EOF) {
- /* After consuming EOF no additional input is possible, so we are
- * only interested in configurations which reached the end of the
- * decision rule (local context) or end of the start rule (full
- * context). Update reach to contain only these configurations. This
- * handles both explicit EOF transitions in the grammar and implicit
- * EOF transitions following the end of the decision or start rule.
- *
- * When reach==intermediate, no closure operation was performed. In
- * this case, removeAllConfigsNotInRuleStopState needs to check for
- * reachable rule stop states as well as configurations already in
- * a rule stop state.
- *
- * This is handled before the configurations in skippedStopStates,
- * because any configurations potentially added from that list are
- * already guaranteed to meet this condition whether or not it's
- * required.
- */
- ATNConfigSet* temp = removeAllConfigsNotInRuleStopState(
- reach.get(), *reach == *intermediate);
- if (temp != reach.get())
- reach.reset(temp); // We got a new set, so use that.
- }
-
- /* If skippedStopStates is not null, then it contains at least one
- * configuration. For full-context reach operations, these
- * configurations reached the end of the start rule, in which case we
- * only add them back to reach if no configuration during the current
- * closure operation reached such a state. This ensures adaptivePredict
- * chooses an alternative matching the longest overall sequence when
- * multiple alternatives are viable.
- */
- if (skippedStopStates.size() > 0 &&
- (!fullCtx ||
- !PredictionModeClass::hasConfigInRuleStopState(reach.get()))) {
- assert(!skippedStopStates.empty());
-
- for (auto c : skippedStopStates) {
- reach->add(c, &mergeCache);
- }
- }
-
- if (reach->isEmpty()) {
- return nullptr;
- }
- return reach;
-}
-
-ATNConfigSet* ParserATNSimulator::removeAllConfigsNotInRuleStopState(
- ATNConfigSet* configs, bool lookToEndOfRule) {
- if (PredictionModeClass::allConfigsInRuleStopStates(configs)) {
- return configs;
- }
-
- ATNConfigSet* result =
- new ATNConfigSet(configs->fullCtx); /* mem-check: released by caller */
-
- for (auto& config : configs->configs) {
- if (is<RuleStopState*>(config->state)) {
- result->add(config, &mergeCache);
- continue;
- }
-
- if (lookToEndOfRule && config->state->epsilonOnlyTransitions) {
- misc::IntervalSet nextTokens = atn.nextTokens(config->state);
- if (nextTokens.contains(Token::EPSILON)) {
- ATNState* endOfRuleState =
- atn.ruleToStopState[config->state->ruleIndex];
- result->add(std::make_shared<ATNConfig>(config, endOfRuleState),
- &mergeCache);
- }
- }
- }
-
- return result;
-}
-
-std::unique_ptr<ATNConfigSet> ParserATNSimulator::computeStartState(
- ATNState* p, RuleContext* ctx, bool fullCtx) {
- // always at least the implicit call to start rule
- Ref<PredictionContext> initialContext =
- PredictionContext::fromRuleContext(atn, ctx);
- std::unique_ptr<ATNConfigSet> configs(new ATNConfigSet(fullCtx));
-
- for (size_t i = 0; i < p->transitions.size(); i++) {
- ATNState* target = p->transitions[i]->target;
- Ref<ATNConfig> c =
- std::make_shared<ATNConfig>(target, (int)i + 1, initialContext);
- ATNConfig::Set closureBusy;
- closure(c, configs.get(), closureBusy, true, fullCtx, false);
- }
-
- return configs;
-}
-
-std::unique_ptr<ATNConfigSet> ParserATNSimulator::applyPrecedenceFilter(
- ATNConfigSet* configs) {
- std::map<size_t, Ref<PredictionContext>> statesFromAlt1;
- std::unique_ptr<ATNConfigSet> configSet(new ATNConfigSet(configs->fullCtx));
- for (Ref<ATNConfig>& config : configs->configs) {
- // handle alt 1 first
- if (config->alt != 1) {
- continue;
- }
-
- Ref<SemanticContext> updatedContext =
- config->semanticContext->evalPrecedence(parser, _outerContext,
- config->semanticContext);
- if (updatedContext == nullptr) {
- // the configuration was eliminated
- continue;
- }
-
- statesFromAlt1[config->state->stateNumber] = config->context;
- if (updatedContext != config->semanticContext) {
- configSet->add(std::make_shared<ATNConfig>(config, updatedContext),
- &mergeCache);
- } else {
- configSet->add(config, &mergeCache);
- }
- }
-
- for (Ref<ATNConfig>& config : configs->configs) {
- if (config->alt == 1) {
- // already handled
- continue;
- }
-
- if (!config->isPrecedenceFilterSuppressed()) {
- /* In the future, this elimination step could be updated to also
- * filter the prediction context for alternatives predicting alt>1
- * (basically a graph subtraction algorithm).
- */
- auto iterator = statesFromAlt1.find(config->state->stateNumber);
- if (iterator != statesFromAlt1.end() &&
- *iterator->second == *config->context) {
- // eliminated
- continue;
- }
- }
-
- configSet->add(config, &mergeCache);
- }
-
- return configSet;
-}
-
-atn::ATNState* ParserATNSimulator::getReachableTarget(Transition* trans,
- size_t ttype) {
- if (trans->matches(ttype, 0, atn.maxTokenType)) {
- return trans->target;
- }
-
- return nullptr;
-}
-
-// Note that caller must memory manage the returned value from this function
-std::vector<Ref<SemanticContext>> ParserATNSimulator::getPredsForAmbigAlts(
- const BitSet& ambigAlts, ATNConfigSet* configs, size_t nalts) {
- // REACH=[1|1|[]|0:0, 1|2|[]|0:1]
- /* altToPred starts as an array of all null contexts. The entry at index i
- * corresponds to alternative i. altToPred[i] may have one of three values:
- * 1. null: no ATNConfig c is found such that c.alt==i
- * 2. SemanticContext.NONE: At least one ATNConfig c exists such that
- * c.alt==i and c.semanticContext==SemanticContext.NONE. In other words,
- * alt i has at least one un-predicated config.
- * 3. Non-NONE Semantic Context: There exists at least one, and for all
- * ATNConfig c such that c.alt==i,
- * c.semanticContext!=SemanticContext.NONE.
- *
- * From this, it is clear that NONE||anything==NONE.
- */
- std::vector<Ref<SemanticContext>> altToPred(nalts + 1);
-
- for (auto& c : configs->configs) {
- if (ambigAlts.test(c->alt)) {
- altToPred[c->alt] =
- SemanticContext::Or(altToPred[c->alt], c->semanticContext);
- }
- }
-
- size_t nPredAlts = 0;
- for (size_t i = 1; i <= nalts; i++) {
- if (altToPred[i] == nullptr) {
- altToPred[i] = SemanticContext::NONE;
- } else if (altToPred[i] != SemanticContext::NONE) {
- nPredAlts++;
- }
- }
-
- // nonambig alts are null in altToPred
- if (nPredAlts == 0) {
- altToPred.clear();
- }
-#if DEBUG_ATN == 1
- std::cout << "getPredsForAmbigAlts result " << Arrays::toString(altToPred)
- << std::endl;
-#endif
-
- return altToPred;
-}
-
-std::vector<dfa::DFAState::PredPrediction*>
-ParserATNSimulator::getPredicatePredictions(
- const antlrcpp::BitSet& ambigAlts,
- std::vector<Ref<SemanticContext>> altToPred) {
- std::vector<dfa::DFAState::PredPrediction*> pairs;
- bool containsPredicate = false;
- for (size_t i = 1; i < altToPred.size(); i++) {
- Ref<SemanticContext> pred = altToPred[i];
-
- // unpredicted is indicated by SemanticContext.NONE
- assert(pred != nullptr);
-
- if (ambigAlts.test(i)) {
- pairs.push_back(new dfa::DFAState::PredPrediction(
- pred, (int)i)); /* mem-check: managed by the DFAState it will be
- assigned to after return */
- }
- if (pred != SemanticContext::NONE) {
- containsPredicate = true;
- }
- }
-
- if (!containsPredicate) {
- pairs.clear();
- }
-
- return pairs;
-}
-
-size_t
-ParserATNSimulator::getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(
- ATNConfigSet* configs, ParserRuleContext* outerContext) {
- std::pair<ATNConfigSet*, ATNConfigSet*> sets =
- splitAccordingToSemanticValidity(configs, outerContext);
- std::unique_ptr<ATNConfigSet> semValidConfigs(sets.first);
- std::unique_ptr<ATNConfigSet> semInvalidConfigs(sets.second);
- size_t alt = getAltThatFinishedDecisionEntryRule(semValidConfigs.get());
- if (alt != ATN::INVALID_ALT_NUMBER) { // semantically/syntactically viable
- // path exists
- return alt;
- }
- // Is there a syntactically valid path with a failed pred?
- if (!semInvalidConfigs->configs.empty()) {
- alt = getAltThatFinishedDecisionEntryRule(semInvalidConfigs.get());
- if (alt != ATN::INVALID_ALT_NUMBER) { // syntactically viable path exists
- return alt;
- }
- }
- return ATN::INVALID_ALT_NUMBER;
-}
-
-size_t ParserATNSimulator::getAltThatFinishedDecisionEntryRule(
- ATNConfigSet* configs) {
- misc::IntervalSet alts;
- for (auto& c : configs->configs) {
- if (c->getOuterContextDepth() > 0 ||
- (is<RuleStopState*>(c->state) && c->context->hasEmptyPath())) {
- alts.add(c->alt);
- }
- }
- if (alts.size() == 0) {
- return ATN::INVALID_ALT_NUMBER;
- }
- return alts.getMinElement();
-}
-
-std::pair<ATNConfigSet*, ATNConfigSet*>
-ParserATNSimulator::splitAccordingToSemanticValidity(
- ATNConfigSet* configs, ParserRuleContext* outerContext) {
- // mem-check: both pointers must be freed by the caller.
- ATNConfigSet* succeeded(new ATNConfigSet(configs->fullCtx));
- ATNConfigSet* failed(new ATNConfigSet(configs->fullCtx));
- for (Ref<ATNConfig>& c : configs->configs) {
- if (c->semanticContext != SemanticContext::NONE) {
- bool predicateEvaluationResult = evalSemanticContext(
- c->semanticContext, outerContext, c->alt, configs->fullCtx);
- if (predicateEvaluationResult) {
- succeeded->add(c);
- } else {
- failed->add(c);
- }
- } else {
- succeeded->add(c);
- }
- }
- return {succeeded, failed};
-}
-
-BitSet ParserATNSimulator::evalSemanticContext(
- std::vector<dfa::DFAState::PredPrediction*> predPredictions,
- ParserRuleContext* outerContext, bool complete) {
- BitSet predictions;
- for (auto prediction : predPredictions) {
- if (prediction->pred == SemanticContext::NONE) {
- predictions.set(prediction->alt);
- if (!complete) {
- break;
- }
- continue;
- }
-
- bool fullCtx = false; // in dfa
- bool predicateEvaluationResult = evalSemanticContext(
- prediction->pred, outerContext, prediction->alt, fullCtx);
-#if DEBUG_ATN == 1 || DEBUG_DFA == 1
- std::cout << "eval pred " << prediction->toString() << " = "
- << predicateEvaluationResult << std::endl;
-#endif
-
- if (predicateEvaluationResult) {
-#if DEBUG_ATN == 1 || DEBUG_DFA == 1
- std::cout << "PREDICT " << prediction->alt << std::endl;
-#endif
-
- predictions.set(prediction->alt);
- if (!complete) {
- break;
- }
- }
- }
-
- return predictions;
-}
-
-bool ParserATNSimulator::evalSemanticContext(Ref<SemanticContext> const& pred,
- ParserRuleContext* parserCallStack,
- size_t /*alt*/, bool /*fullCtx*/) {
- return pred->eval(parser, parserCallStack);
-}
-
-void ParserATNSimulator::closure(Ref<ATNConfig> const& config,
- ATNConfigSet* configs,
- ATNConfig::Set& closureBusy,
- bool collectPredicates, bool fullCtx,
- bool treatEofAsEpsilon) {
- const int initialDepth = 0;
- closureCheckingStopState(config, configs, closureBusy, collectPredicates,
- fullCtx, initialDepth, treatEofAsEpsilon);
-
- assert(!fullCtx || !configs->dipsIntoOuterContext);
-}
-
-void ParserATNSimulator::closureCheckingStopState(Ref<ATNConfig> const& config,
- ATNConfigSet* configs,
- ATNConfig::Set& closureBusy,
- bool collectPredicates,
- bool fullCtx, int depth,
- bool treatEofAsEpsilon) {
-#if DEBUG_ATN == 1
- std::cout << "closure(" << config->toString(true) << ")" << std::endl;
-#endif
-
- if (is<RuleStopState*>(config->state)) {
- // We hit rule end. If we have context info, use it
- // run thru all possible stack tops in ctx
- if (!config->context->isEmpty()) {
- for (size_t i = 0; i < config->context->size(); i++) {
- if (config->context->getReturnState(i) ==
- PredictionContext::EMPTY_RETURN_STATE) {
- if (fullCtx) {
- configs->add(std::make_shared<ATNConfig>(config, config->state,
- PredictionContext::EMPTY),
- &mergeCache);
- continue;
- } else {
-// we have no context info, just chase follow links (if greedy)
-#if DEBUG_ATN == 1
- std::cout << "FALLING off rule "
- << getRuleName(config->state->ruleIndex) << std::endl;
-#endif
- closure_(config, configs, closureBusy, collectPredicates, fullCtx,
- depth, treatEofAsEpsilon);
- }
- continue;
- }
- ATNState* returnState = atn.states[config->context->getReturnState(i)];
- std::weak_ptr<PredictionContext> newContext =
- config->context->getParent(i); // "pop" return state
- Ref<ATNConfig> c = std::make_shared<ATNConfig>(returnState, config->alt,
- newContext.lock(),
- config->semanticContext);
- // While we have context to pop back from, we may have
- // gotten that context AFTER having falling off a rule.
- // Make sure we track that we are now out of context.
- //
- // This assignment also propagates the
- // isPrecedenceFilterSuppressed() value to the new
- // configuration.
- c->reachesIntoOuterContext = config->reachesIntoOuterContext;
- assert(depth > INT_MIN);
-
- closureCheckingStopState(c, configs, closureBusy, collectPredicates,
- fullCtx, depth - 1, treatEofAsEpsilon);
- }
- return;
- } else if (fullCtx) {
- // reached end of start rule
- configs->add(config, &mergeCache);
- return;
- } else {
- // else if we have no context info, just chase follow links (if greedy)
- }
- }
-
- closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth,
- treatEofAsEpsilon);
-}
-
-void ParserATNSimulator::closure_(Ref<ATNConfig> const& config,
- ATNConfigSet* configs,
- ATNConfig::Set& closureBusy,
- bool collectPredicates, bool fullCtx,
- int depth, bool treatEofAsEpsilon) {
- ATNState* p = config->state;
- // optimization
- if (!p->epsilonOnlyTransitions) {
- // make sure to not return here, because EOF transitions can act as
- // both epsilon transitions and non-epsilon transitions.
- configs->add(config, &mergeCache);
- }
-
- for (size_t i = 0; i < p->transitions.size(); i++) {
- if (i == 0 && canDropLoopEntryEdgeInLeftRecursiveRule(config.get()))
- continue;
-
- Transition* t = p->transitions[i];
- bool continueCollecting = !is<ActionTransition*>(t) && collectPredicates;
- Ref<ATNConfig> c = getEpsilonTarget(config, t, continueCollecting,
- depth == 0, fullCtx, treatEofAsEpsilon);
- if (c != nullptr) {
- if (!t->isEpsilon()) {
- // avoid infinite recursion for EOF* and EOF+
- if (closureBusy.count(c) == 0) {
- closureBusy.insert(c);
- } else {
- continue;
- }
- }
-
- int newDepth = depth;
- if (is<RuleStopState*>(config->state)) {
- assert(!fullCtx);
-
- // target fell off end of rule; mark resulting c as having dipped into
- // outer context We can't get here if incoming config was rule stop and
- // we had context track how far we dip into outer context. Might come
- // in handy and we avoid evaluating context dependent preds if this is >
- // 0.
-
- if (closureBusy.count(c) > 0) {
- // avoid infinite recursion for right-recursive rules
- continue;
- }
- closureBusy.insert(c);
-
- if (_dfa != nullptr && _dfa->isPrecedenceDfa()) {
- size_t outermostPrecedenceReturn =
- dynamic_cast<EpsilonTransition*>(t)->outermostPrecedenceReturn();
- if (outermostPrecedenceReturn == _dfa->atnStartState->ruleIndex) {
- c->setPrecedenceFilterSuppressed(true);
- }
- }
-
- c->reachesIntoOuterContext++;
- configs->dipsIntoOuterContext = true; // TO_DO: can remove? only care
- // when we add to set per middle
- // of this method
- assert(newDepth > INT_MIN);
-
- newDepth--;
-#if DEBUG_DFA == 1
- std::cout << "dips into outer ctx: " << c << std::endl;
-#endif
-
- } else if (is<RuleTransition*>(t)) {
- // latch when newDepth goes negative - once we step out of the entry
- // context we can't return
- if (newDepth >= 0) {
- newDepth++;
- }
- }
-
- closureCheckingStopState(c, configs, closureBusy, continueCollecting,
- fullCtx, newDepth, treatEofAsEpsilon);
- }
- }
-}
-
-bool ParserATNSimulator::canDropLoopEntryEdgeInLeftRecursiveRule(
- ATNConfig* config) const {
- if (TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT) return false;
-
- ATNState* p = config->state;
-
- // First check to see if we are in StarLoopEntryState generated during
- // left-recursion elimination. For efficiency, also check if
- // the context has an empty stack case. If so, it would mean
- // global FOLLOW so we can't perform optimization
- if (p->getStateType() != ATNState::STAR_LOOP_ENTRY ||
- !((StarLoopEntryState*)p)->isPrecedenceDecision || // Are we the special
- // loop entry/exit
- // state?
- config->context->isEmpty() || // If SLL wildcard
- config->context->hasEmptyPath()) {
- return false;
- }
-
- // Require all return states to return back to the same rule
- // that p is in.
- size_t numCtxs = config->context->size();
- for (size_t i = 0; i < numCtxs; i++) { // for each stack context
- ATNState* returnState = atn.states[config->context->getReturnState(i)];
- if (returnState->ruleIndex != p->ruleIndex) return false;
- }
-
- BlockStartState* decisionStartState =
- (BlockStartState*)p->transitions[0]->target;
- size_t blockEndStateNum = decisionStartState->endState->stateNumber;
- BlockEndState* blockEndState = (BlockEndState*)atn.states[blockEndStateNum];
-
- // Verify that the top of each stack context leads to loop entry/exit
- // state through epsilon edges and w/o leaving rule.
- for (size_t i = 0; i < numCtxs; i++) { // for each stack context
- size_t returnStateNumber = config->context->getReturnState(i);
- ATNState* returnState = atn.states[returnStateNumber];
- // All states must have single outgoing epsilon edge.
- if (returnState->transitions.size() != 1 ||
- !returnState->transitions[0]->isEpsilon()) {
- return false;
- }
-
- // Look for prefix op case like 'not expr', (' type ')' expr
- ATNState* returnStateTarget = returnState->transitions[0]->target;
- if (returnState->getStateType() == ATNState::BLOCK_END &&
- returnStateTarget == p) {
- continue;
- }
-
- // Look for 'expr op expr' or case where expr's return state is block end
- // of (...)* internal block; the block end points to loop back
- // which points to p but we don't need to check that
- if (returnState == blockEndState) {
- continue;
- }
-
- // Look for ternary expr ? expr : expr. The return state points at block
- // end, which points at loop entry state
- if (returnStateTarget == blockEndState) {
- continue;
- }
-
- // Look for complex prefix 'between expr and expr' case where 2nd expr's
- // return state points at block end state of (...)* internal block
- if (returnStateTarget->getStateType() == ATNState::BLOCK_END &&
- returnStateTarget->transitions.size() == 1 &&
- returnStateTarget->transitions[0]->isEpsilon() &&
- returnStateTarget->transitions[0]->target == p) {
- continue;
- }
-
- // Anything else ain't conforming.
- return false;
- }
-
- return true;
-}
-
-std::string ParserATNSimulator::getRuleName(size_t index) {
- if (parser != nullptr) {
- return parser->getRuleNames()[index];
- }
- return "<rule " + std::to_string(index) + ">";
-}
-
-Ref<ATNConfig> ParserATNSimulator::getEpsilonTarget(
- Ref<ATNConfig> const& config, Transition* t, bool collectPredicates,
- bool inContext, bool fullCtx, bool treatEofAsEpsilon) {
- switch (t->getSerializationType()) {
- case Transition::RULE:
- return ruleTransition(config, static_cast<RuleTransition*>(t));
-
- case Transition::PRECEDENCE:
- return precedenceTransition(
- config, static_cast<PrecedencePredicateTransition*>(t),
- collectPredicates, inContext, fullCtx);
-
- case Transition::PREDICATE:
- return predTransition(config, static_cast<PredicateTransition*>(t),
- collectPredicates, inContext, fullCtx);
-
- case Transition::ACTION:
- return actionTransition(config, static_cast<ActionTransition*>(t));
-
- case Transition::EPSILON:
- return std::make_shared<ATNConfig>(config, t->target);
-
- case Transition::ATOM:
- case Transition::RANGE:
- case Transition::SET:
- // EOF transitions act like epsilon transitions after the first EOF
- // transition is traversed
- if (treatEofAsEpsilon) {
- if (t->matches(Token::EOF, 0, 1)) {
- return std::make_shared<ATNConfig>(config, t->target);
- }
- }
-
- return nullptr;
-
- default:
- return nullptr;
- }
-}
-
-Ref<ATNConfig> ParserATNSimulator::actionTransition(
- Ref<ATNConfig> const& config, ActionTransition* t) {
-#if DEBUG_DFA == 1
- std::cout << "ACTION edge " << t->ruleIndex << ":" << t->actionIndex
- << std::endl;
-#endif
-
- return std::make_shared<ATNConfig>(config, t->target);
-}
-
-Ref<ATNConfig> ParserATNSimulator::precedenceTransition(
- Ref<ATNConfig> const& config, PrecedencePredicateTransition* pt,
- bool collectPredicates, bool inContext, bool fullCtx) {
-#if DEBUG_DFA == 1
- std::cout << "PRED (collectPredicates=" << collectPredicates << ") "
- << pt->precedence << ">=_p"
- << ", ctx dependent=true" << std::endl;
- if (parser != nullptr) {
- std::cout << "context surrounding pred is "
- << Arrays::listToString(parser->getRuleInvocationStack(), ", ")
- << std::endl;
- }
-#endif
-
- Ref<ATNConfig> c;
- if (collectPredicates && inContext) {
- Ref<SemanticContext::PrecedencePredicate> predicate = pt->getPredicate();
-
- if (fullCtx) {
- // In full context mode, we can evaluate predicates on-the-fly
- // during closure, which dramatically reduces the size of
- // the config sets. It also obviates the need to test predicates
- // later during conflict resolution.
- size_t currentPosition = _input->index();
- _input->seek(_startIndex);
- bool predSucceeds = evalSemanticContext(pt->getPredicate(), _outerContext,
- config->alt, fullCtx);
- _input->seek(currentPosition);
- if (predSucceeds) {
- c = std::make_shared<ATNConfig>(config, pt->target); // no pred context
- }
- } else {
- Ref<SemanticContext> newSemCtx =
- SemanticContext::And(config->semanticContext, predicate);
- c = std::make_shared<ATNConfig>(config, pt->target, newSemCtx);
- }
- } else {
- c = std::make_shared<ATNConfig>(config, pt->target);
- }
-
-#if DEBUG_DFA == 1
- std::cout << "config from pred transition=" << c << std::endl;
-#endif
-
- return c;
-}
-
-Ref<ATNConfig> ParserATNSimulator::predTransition(Ref<ATNConfig> const& config,
- PredicateTransition* pt,
- bool collectPredicates,
- bool inContext,
- bool fullCtx) {
-#if DEBUG_DFA == 1
- std::cout << "PRED (collectPredicates=" << collectPredicates << ") "
- << pt->ruleIndex << ":" << pt->predIndex
- << ", ctx dependent=" << pt->isCtxDependent << std::endl;
- if (parser != nullptr) {
- std::cout << "context surrounding pred is "
- << Arrays::listToString(parser->getRuleInvocationStack(), ", ")
- << std::endl;
- }
-#endif
-
- Ref<ATNConfig> c = nullptr;
- if (collectPredicates &&
- (!pt->isCtxDependent || (pt->isCtxDependent && inContext))) {
- Ref<SemanticContext::Predicate> predicate = pt->getPredicate();
- if (fullCtx) {
- // In full context mode, we can evaluate predicates on-the-fly
- // during closure, which dramatically reduces the size of
- // the config sets. It also obviates the need to test predicates
- // later during conflict resolution.
- size_t currentPosition = _input->index();
- _input->seek(_startIndex);
- bool predSucceeds = evalSemanticContext(pt->getPredicate(), _outerContext,
- config->alt, fullCtx);
- _input->seek(currentPosition);
- if (predSucceeds) {
- c = std::make_shared<ATNConfig>(config, pt->target); // no pred context
- }
- } else {
- Ref<SemanticContext> newSemCtx =
- SemanticContext::And(config->semanticContext, predicate);
- c = std::make_shared<ATNConfig>(config, pt->target, newSemCtx);
- }
- } else {
- c = std::make_shared<ATNConfig>(config, pt->target);
- }
-
-#if DEBUG_DFA == 1
- std::cout << "config from pred transition=" << c << std::endl;
-#endif
-
- return c;
-}
-
-Ref<ATNConfig> ParserATNSimulator::ruleTransition(Ref<ATNConfig> const& config,
- RuleTransition* t) {
-#if DEBUG_DFA == 1
- std::cout << "CALL rule " << getRuleName(t->target->ruleIndex)
- << ", ctx=" << config->context << std::endl;
-#endif
-
- atn::ATNState* returnState = t->followState;
- Ref<PredictionContext> newContext = SingletonPredictionContext::create(
- config->context, returnState->stateNumber);
- return std::make_shared<ATNConfig>(config, t->target, newContext);
-}
-
-BitSet ParserATNSimulator::getConflictingAlts(ATNConfigSet* configs) {
- std::vector<BitSet> altsets =
- PredictionModeClass::getConflictingAltSubsets(configs);
- return PredictionModeClass::getAlts(altsets);
-}
-
-BitSet ParserATNSimulator::getConflictingAltsOrUniqueAlt(
- ATNConfigSet* configs) {
- BitSet conflictingAlts;
- if (configs->uniqueAlt != ATN::INVALID_ALT_NUMBER) {
- conflictingAlts.set(configs->uniqueAlt);
- } else {
- conflictingAlts = configs->conflictingAlts;
- }
- return conflictingAlts;
-}
-
-std::string ParserATNSimulator::getTokenName(size_t t) {
- if (t == Token::EOF) {
- return "EOF";
- }
-
- const dfa::Vocabulary& vocabulary = parser != nullptr
- ? parser->getVocabulary()
- : dfa::Vocabulary::EMPTY_VOCABULARY;
- std::string displayName = vocabulary.getDisplayName(t);
- if (displayName == std::to_string(t)) {
- return displayName;
- }
-
- return displayName + "<" + std::to_string(t) + ">";
-}
-
-std::string ParserATNSimulator::getLookaheadName(TokenStream* input) {
- return getTokenName(input->LA(1));
-}
-
-void ParserATNSimulator::dumpDeadEndConfigs(NoViableAltException& nvae) {
- std::cerr << "dead end configs: ";
- for (auto c : nvae.getDeadEndConfigs()->configs) {
- std::string trans = "no edges";
- if (c->state->transitions.size() > 0) {
- Transition* t = c->state->transitions[0];
- if (is<AtomTransition*>(t)) {
- AtomTransition* at = static_cast<AtomTransition*>(t);
- trans = "Atom " + getTokenName(at->_label);
- } else if (is<SetTransition*>(t)) {
- SetTransition* st = static_cast<SetTransition*>(t);
- bool is_not = is<NotSetTransition*>(st);
- trans = (is_not ? "~" : "");
- trans += "Set ";
- trans += st->set.toString();
- }
- }
- std::cerr << c->toString(true) + ":" + trans;
- }
-}
-
-NoViableAltException ParserATNSimulator::noViableAlt(
- TokenStream* input, ParserRuleContext* outerContext, ATNConfigSet* configs,
- size_t startIndex) {
- return NoViableAltException(parser, input, input->get(startIndex),
- input->LT(1), configs, outerContext);
-}
-
-size_t ParserATNSimulator::getUniqueAlt(ATNConfigSet* configs) {
- size_t alt = ATN::INVALID_ALT_NUMBER;
- for (auto& c : configs->configs) {
- if (alt == ATN::INVALID_ALT_NUMBER) {
- alt = c->alt; // found first alt
- } else if (c->alt != alt) {
- return ATN::INVALID_ALT_NUMBER;
- }
- }
- return alt;
-}
-
-dfa::DFAState* ParserATNSimulator::addDFAEdge(dfa::DFA& dfa,
- dfa::DFAState* from, ssize_t t,
- dfa::DFAState* to) {
-#if DEBUG_DFA == 1
- std::cout << "EDGE " << from << " -> " << to << " upon " << getTokenName(t)
- << std::endl;
-#endif
-
- if (to == nullptr) {
- return nullptr;
- }
-
- _stateLock.writeLock();
- to = addDFAState(dfa, to); // used existing if possible not incoming
- _stateLock.writeUnlock();
- if (from == nullptr || t > (int)atn.maxTokenType) {
- return to;
- }
-
- {
- _edgeLock.writeLock();
- from->edges[t] = to; // connect
- _edgeLock.writeUnlock();
- }
-
-#if DEBUG_DFA == 1
- std::string dfaText;
- if (parser != nullptr) {
- dfaText = dfa.toString(parser->getVocabulary());
- } else {
- dfaText = dfa.toString(dfa::Vocabulary::EMPTY_VOCABULARY);
- }
- std::cout << "DFA=\n" << dfaText << std::endl;
-#endif
-
- return to;
-}
-
-dfa::DFAState* ParserATNSimulator::addDFAState(dfa::DFA& dfa,
- dfa::DFAState* D) {
- if (D == ERROR_STATE.get()) {
- return D;
- }
-
- auto existing = dfa.states.find(D);
- if (existing != dfa.states.end()) {
- return *existing;
- }
-
- D->stateNumber = (int)dfa.states.size();
- if (!D->configs->isReadonly()) {
- D->configs->optimizeConfigs(this);
- D->configs->setReadonly(true);
- }
-
- dfa.states.insert(D);
-
-#if DEBUG_DFA == 1
- std::cout << "adding new DFA state: " << D << std::endl;
-#endif
-
- return D;
-}
-
-void ParserATNSimulator::reportAttemptingFullContext(
- dfa::DFA& dfa, const antlrcpp::BitSet& conflictingAlts,
- ATNConfigSet* configs, size_t startIndex, size_t stopIndex) {
-#if DEBUG_DFA == 1 || RETRY_DEBUG == 1
- misc::Interval interval = misc::Interval((int)startIndex, (int)stopIndex);
- std::cout << "reportAttemptingFullContext decision=" << dfa.decision << ":"
- << configs
- << ", input=" << parser->getTokenStream()->getText(interval)
- << std::endl;
-#endif
-
- if (parser != nullptr) {
- parser->getErrorListenerDispatch().reportAttemptingFullContext(
- parser, dfa, startIndex, stopIndex, conflictingAlts, configs);
- }
-}
-
-void ParserATNSimulator::reportContextSensitivity(dfa::DFA& dfa,
- size_t prediction,
- ATNConfigSet* configs,
- size_t startIndex,
- size_t stopIndex) {
-#if DEBUG_DFA == 1 || RETRY_DEBUG == 1
- misc::Interval interval = misc::Interval(startIndex, stopIndex);
- std::cout << "reportContextSensitivity decision=" << dfa.decision << ":"
- << configs
- << ", input=" << parser->getTokenStream()->getText(interval)
- << std::endl;
-#endif
-
- if (parser != nullptr) {
- parser->getErrorListenerDispatch().reportContextSensitivity(
- parser, dfa, startIndex, stopIndex, prediction, configs);
- }
-}
-
-void ParserATNSimulator::reportAmbiguity(dfa::DFA& dfa, dfa::DFAState* /*D*/,
- size_t startIndex, size_t stopIndex,
- bool exact,
- const antlrcpp::BitSet& ambigAlts,
- ATNConfigSet* configs) {
-#if DEBUG_DFA == 1 || RETRY_DEBUG == 1
- misc::Interval interval = misc::Interval((int)startIndex, (int)stopIndex);
- std::cout << "reportAmbiguity " << ambigAlts << ":" << configs
- << ", input=" << parser->getTokenStream()->getText(interval)
- << std::endl;
-#endif
-
- if (parser != nullptr) {
- parser->getErrorListenerDispatch().reportAmbiguity(
- parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs);
- }
-}
-
-void ParserATNSimulator::setPredictionMode(PredictionMode newMode) {
- _mode = newMode;
-}
-
-atn::PredictionMode ParserATNSimulator::getPredictionMode() { return _mode; }
-
-Parser* ParserATNSimulator::getParser() { return parser; }
-
-bool ParserATNSimulator::getLrLoopSetting() {
- char* var = std::getenv("TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT");
- if (var == nullptr) return false;
- std::string value(var);
- return value == "true" || value == "1";
-}
-
-void ParserATNSimulator::InitializeInstanceFields() {
- _mode = PredictionMode::LL;
- _startIndex = 0;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h
deleted file mode 100644
index 3c043ea324..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ParserATNSimulator.h
+++ /dev/null
@@ -1,942 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "PredictionMode.h"
-#include "SemanticContext.h"
-#include "atn/ATNConfig.h"
-#include "atn/ATNSimulator.h"
-#include "atn/PredictionContext.h"
-#include "dfa/DFAState.h"
-
-namespace antlr4 {
-namespace atn {
-
-/**
- * The embodiment of the adaptive LL(*), ALL(*), parsing strategy.
- *
- * <p>
- * The basic complexity of the adaptive strategy makes it harder to understand.
- * We begin with ATN simulation to build paths in a DFA. Subsequent prediction
- * requests go through the DFA first. If they reach a state without an edge for
- * the current symbol, the algorithm fails over to the ATN simulation to
- * complete the DFA path for the current input (until it finds a conflict state
- * or uniquely predicting state).</p>
- *
- * <p>
- * All of that is done without using the outer context because we want to create
- * a DFA that is not dependent upon the rule invocation stack when we do a
- * prediction. One DFA works in all contexts. We avoid using context not
- * necessarily because it's slower, although it can be, but because of the DFA
- * caching problem. The closure routine only considers the rule invocation stack
- * created during prediction beginning in the decision rule. For example, if
- * prediction occurs without invoking another rule's ATN, there are no context
- * stacks in the configurations. When lack of context leads to a conflict, we
- * don't know if it's an ambiguity or a weakness in the strong LL(*) parsing
- * strategy (versus full LL(*)).</p>
- *
- * <p>
- * When SLL yields a configuration set with conflict, we rewind the input and
- * retry the ATN simulation, this time using full outer context without adding
- * to the DFA. Configuration context stacks will be the full invocation stacks
- * from the start rule. If we get a conflict using full context, then we can
- * definitively say we have a true ambiguity for that input sequence. If we
- * don't get a conflict, it implies that the decision is sensitive to the outer
- * context. (It is not context-sensitive in the sense of context-sensitive
- * grammars.)</p>
- *
- * <p>
- * The next time we reach this DFA state with an SLL conflict, through DFA
- * simulation, we will again retry the ATN simulation using full context mode.
- * This is slow because we can't save the results and have to "interpret" the
- * ATN each time we get that input.</p>
- *
- * <p>
- * <strong>CACHING FULL CONTEXT PREDICTIONS</strong></p>
- *
- * <p>
- * We could cache results from full context to predicted alternative easily and
- * that saves a lot of time but doesn't work in presence of predicates. The set
- * of visible predicates from the ATN start state changes depending on the
- * context, because closure can fall off the end of a rule. I tried to cache
- * tuples (stack context, semantic context, predicted alt) but it was slower
- * than interpreting and much more complicated. Also required a huge amount of
- * memory. The goal is not to create the world's fastest parser anyway. I'd like
- * to keep this algorithm simple. By launching multiple threads, we can improve
- * the speed of parsing across a large number of files.</p>
- *
- * <p>
- * There is no strict ordering between the amount of input used by SLL vs LL,
- * which makes it really hard to build a cache for full context. Let's say that
- * we have input A B C that leads to an SLL conflict with full context X. That
- * implies that using X we might only use A B but we could also use A B C D to
- * resolve conflict. Input A B C D could predict alternative 1 in one position
- * in the input and A B C E could predict alternative 2 in another position in
- * input. The conflicting SLL configurations could still be non-unique in the
- * full context prediction, which would lead us to requiring more input than the
- * original A B C. To make a prediction cache work, we have to track
- * the exact input used during the previous prediction. That amounts to a
- * cache that maps X to a specific DFA for that context.</p>
- *
- * <p>
- * Something should be done for left-recursive expression predictions. They are
- * likely LL(1) + pred eval. Easier to do the whole SLL unless error and retry
- * with full LL thing Sam does.</p>
- *
- * <p>
- * <strong>AVOIDING FULL CONTEXT PREDICTION</strong></p>
- *
- * <p>
- * We avoid doing full context retry when the outer context is empty, we did not
- * dip into the outer context by falling off the end of the decision state rule,
- * or when we force SLL mode.</p>
- *
- * <p>
- * As an example of the not dip into outer context case, consider as super
- * constructor calls versus function calls. One grammar might look like
- * this:</p>
- *
- * <pre>
- * ctorBody
- * : '{' superCall? stat* '}'
- * ;
- * </pre>
- *
- * <p>
- * Or, you might see something like</p>
- *
- * <pre>
- * stat
- * : superCall ';'
- * | expression ';'
- * | ...
- * ;
- * </pre>
- *
- * <p>
- * In both cases I believe that no closure operations will dip into the outer
- * context. In the first case ctorBody in the worst case will stop at the '}'.
- * In the 2nd case it should stop at the ';'. Both cases should stay within the
- * entry rule and not dip into the outer context.</p>
- *
- * <p>
- * <strong>PREDICATES</strong></p>
- *
- * <p>
- * Predicates are always evaluated if present in either SLL or LL both. SLL and
- * LL simulation deals with predicates differently. SLL collects predicates as
- * it performs closure operations like ANTLR v3 did. It delays predicate
- * evaluation until it reaches and accept state. This allows us to cache the SLL
- * ATN simulation whereas, if we had evaluated predicates on-the-fly during
- * closure, the DFA state configuration sets would be different and we couldn't
- * build up a suitable DFA.</p>
- *
- * <p>
- * When building a DFA accept state during ATN simulation, we evaluate any
- * predicates and return the sole semantically valid alternative. If there is
- * more than 1 alternative, we report an ambiguity. If there are 0 alternatives,
- * we throw an exception. Alternatives without predicates act like they have
- * true predicates. The simple way to think about it is to strip away all
- * alternatives with false predicates and choose the minimum alternative that
- * remains.</p>
- *
- * <p>
- * When we start in the DFA and reach an accept state that's predicated, we test
- * those and return the minimum semantically viable alternative. If no
- * alternatives are viable, we throw an exception.</p>
- *
- * <p>
- * During full LL ATN simulation, closure always evaluates predicates and
- * on-the-fly. This is crucial to reducing the configuration set size during
- * closure. It hits a landmine when parsing with the Java grammar, for example,
- * without this on-the-fly evaluation.</p>
- *
- * <p>
- * <strong>SHARING DFA</strong></p>
- *
- * <p>
- * All instances of the same parser share the same decision DFAs through a
- * static field. Each instance gets its own ATN simulator but they share the
- * same {@link #decisionToDFA} field. They also share a
- * {@link PredictionContextCache} object that makes sure that all
- * {@link PredictionContext} objects are shared among the DFA states. This makes
- * a big size difference.</p>
- *
- * <p>
- * <strong>THREAD SAFETY</strong></p>
- *
- * <p>
- * The {@link ParserATNSimulator} locks on the {@link #decisionToDFA} field when
- * it adds a new DFA object to that array. {@link #addDFAEdge}
- * locks on the DFA for the current decision when setting the
- * {@link DFAState#edges} field. {@link #addDFAState} locks on
- * the DFA for the current decision when looking up a DFA state to see if it
- * already exists. We must make sure that all requests to add DFA states that
- * are equivalent result in the same shared DFA object. This is because lots of
- * threads will be trying to update the DFA at once. The
- * {@link #addDFAState} method also locks inside the DFA lock
- * but this time on the shared context cache when it rebuilds the
- * configurations' {@link PredictionContext} objects using cached
- * subgraphs/nodes. No other locking occurs, even during DFA simulation. This is
- * safe as long as we can guarantee that all threads referencing
- * {@code s.edge[t]} get the same physical target {@link DFAState}, or
- * {@code null}. Once into the DFA, the DFA simulation does not reference the
- * {@link DFA#states} map. It follows the {@link DFAState#edges} field to new
- * targets. The DFA simulator will either find {@link DFAState#edges} to be
- * {@code null}, to be non-{@code null} and {@code dfa.edges[t]} null, or
- * {@code dfa.edges[t]} to be non-null. The
- * {@link #addDFAEdge} method could be racing to set the field
- * but in either case the DFA simulator works; if {@code null}, and requests ATN
- * simulation. It could also race trying to get {@code dfa.edges[t]}, but either
- * way it will work because it's not doing a test and set operation.</p>
- *
- * <p>
- * <strong>Starting with SLL then failing to combined SLL/LL (Two-Stage
- * Parsing)</strong></p>
- *
- * <p>
- * Sam pointed out that if SLL does not give a syntax error, then there is no
- * point in doing full LL, which is slower. We only have to try LL if we get a
- * syntax error. For maximum speed, Sam starts the parser set to pure SLL
- * mode with the {@link BailErrorStrategy}:</p>
- *
- * <pre>
- * parser.{@link Parser#getInterpreter() getInterpreter()}.{@link
- * #setPredictionMode setPredictionMode}{@code (}{@link
- * PredictionMode#SLL}{@code )}; parser.{@link Parser#setErrorHandler
- * setErrorHandler}(new {@link BailErrorStrategy}());
- * </pre>
- *
- * <p>
- * If it does not get a syntax error, then we're done. If it does get a syntax
- * error, we need to retry with the combined SLL/LL strategy.</p>
- *
- * <p>
- * The reason this works is as follows. If there are no SLL conflicts, then the
- * grammar is SLL (at least for that input set). If there is an SLL conflict,
- * the full LL analysis must yield a set of viable alternatives which is a
- * subset of the alternatives reported by SLL. If the LL set is a singleton,
- * then the grammar is LL but not SLL. If the LL set is the same size as the SLL
- * set, the decision is SLL. If the LL set has size &gt; 1, then that decision
- * is truly ambiguous on the current input. If the LL set is smaller, then the
- * SLL conflict resolution might choose an alternative that the full LL would
- * rule out as a possibility based upon better context information. If that's
- * the case, then the SLL parse will definitely get an error because the full LL
- * analysis says it's not viable. If SLL conflict resolution chooses an
- * alternative within the LL set, them both SLL and LL would choose the same
- * alternative because they both choose the minimum of multiple conflicting
- * alternatives.</p>
- *
- * <p>
- * Let's say we have a set of SLL conflicting alternatives {@code {1, 2, 3}} and
- * a smaller LL set called <em>s</em>. If <em>s</em> is {@code {2, 3}}, then SLL
- * parsing will get an error because SLL will pursue alternative 1. If
- * <em>s</em> is {@code {1, 2}} or {@code {1, 3}} then both SLL and LL will
- * choose the same alternative because alternative one is the minimum of either
- * set. If <em>s</em> is {@code {2}} or {@code {3}} then SLL will get a syntax
- * error. If <em>s</em> is {@code {1}} then SLL will succeed.</p>
- *
- * <p>
- * Of course, if the input is invalid, then we will get an error for sure in
- * both SLL and LL parsing. Erroneous input will therefore require 2 passes over
- * the input.</p>
- */
-class ANTLR4CPP_PUBLIC ParserATNSimulator : public ATNSimulator {
- public:
- /// Testing only!
- ParserATNSimulator(const ATN& atn, std::vector<dfa::DFA>& decisionToDFA,
- PredictionContextCache& sharedContextCache);
-
- ParserATNSimulator(Parser* parser, const ATN& atn,
- std::vector<dfa::DFA>& decisionToDFA,
- PredictionContextCache& sharedContextCache);
-
- virtual void reset() override;
- virtual void clearDFA() override;
- virtual size_t adaptivePredict(TokenStream* input, size_t decision,
- ParserRuleContext* outerContext);
-
- static const bool TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT;
-
- std::vector<dfa::DFA>& decisionToDFA;
-
- /** Implements first-edge (loop entry) elimination as an optimization
- * during closure operations. See antlr/antlr4#1398.
- *
- * The optimization is to avoid adding the loop entry config when
- * the exit path can only lead back to the same
- * StarLoopEntryState after popping context at the rule end state
- * (traversing only epsilon edges, so we're still in closure, in
- * this same rule).
- *
- * We need to detect any state that can reach loop entry on
- * epsilon w/o exiting rule. We don't have to look at FOLLOW
- * links, just ensure that all stack tops for config refer to key
- * states in LR rule.
- *
- * To verify we are in the right situation we must first check
- * closure is at a StarLoopEntryState generated during LR removal.
- * Then we check that each stack top of context is a return state
- * from one of these cases:
- *
- * 1. 'not' expr, '(' type ')' expr. The return state points at loop entry
- * state 2. expr op expr. The return state is the block end of internal block
- * of (...)* 3. 'between' expr 'and' expr. The return state of 2nd expr
- * reference. That state points at block end of internal block of (...)*. 4.
- * expr '?' expr ':' expr. The return state points at block end, which
- * points at loop entry state.
- *
- * If any is true for each stack top, then closure does not add a
- * config to the current config set for edge[0], the loop entry branch.
- *
- * Conditions fail if any context for the current config is:
- *
- * a. empty (we'd fall out of expr to do a global FOLLOW which could
- * even be to some weird spot in expr) or,
- * b. lies outside of expr or,
- * c. lies within expr but at a state not the BlockEndState
- * generated during LR removal
- *
- * Do we need to evaluate predicates ever in closure for this case?
- *
- * No. Predicates, including precedence predicates, are only
- * evaluated when computing a DFA start state. I.e., only before
- * the lookahead (but not parser) consumes a token.
- *
- * There are no epsilon edges allowed in LR rule alt blocks or in
- * the "primary" part (ID here). If closure is in
- * StarLoopEntryState any lookahead operation will have consumed a
- * token as there are no epsilon-paths that lead to
- * StarLoopEntryState. We do not have to evaluate predicates
- * therefore if we are in the generated StarLoopEntryState of a LR
- * rule. Note that when making a prediction starting at that
- * decision point, decision d=2, compute-start-state performs
- * closure starting at edges[0], edges[1] emanating from
- * StarLoopEntryState. That means it is not performing closure on
- * StarLoopEntryState during compute-start-state.
- *
- * How do we know this always gives same prediction answer?
- *
- * Without predicates, loop entry and exit paths are ambiguous
- * upon remaining input +b (in, say, a+b). Either paths lead to
- * valid parses. Closure can lead to consuming + immediately or by
- * falling out of this call to expr back into expr and loop back
- * again to StarLoopEntryState to match +b. In this special case,
- * we choose the more efficient path, which is to take the bypass
- * path.
- *
- * The lookahead language has not changed because closure chooses
- * one path over the other. Both paths lead to consuming the same
- * remaining input during a lookahead operation. If the next token
- * is an operator, lookahead will enter the choice block with
- * operators. If it is not, lookahead will exit expr. Same as if
- * closure had chosen to enter the choice block immediately.
- *
- * Closure is examining one config (some loopentrystate, some alt,
- * context) which means it is considering exactly one alt. Closure
- * always copies the same alt to any derived configs.
- *
- * How do we know this optimization doesn't mess up precedence in
- * our parse trees?
- *
- * Looking through expr from left edge of stat only has to confirm
- * that an input, say, a+b+c; begins with any valid interpretation
- * of an expression. The precedence actually doesn't matter when
- * making a decision in stat seeing through expr. It is only when
- * parsing rule expr that we must use the precedence to get the
- * right interpretation and, hence, parse tree.
- */
- bool canDropLoopEntryEdgeInLeftRecursiveRule(ATNConfig* config) const;
- virtual std::string getRuleName(size_t index);
-
- virtual Ref<ATNConfig> precedenceTransition(Ref<ATNConfig> const& config,
- PrecedencePredicateTransition* pt,
- bool collectPredicates,
- bool inContext, bool fullCtx);
-
- void setPredictionMode(PredictionMode newMode);
- PredictionMode getPredictionMode();
-
- Parser* getParser();
-
- virtual std::string getTokenName(size_t t);
-
- virtual std::string getLookaheadName(TokenStream* input);
-
- /// <summary>
- /// Used for debugging in adaptivePredict around execATN but I cut
- /// it out for clarity now that alg. works well. We can leave this
- /// "dead" code for a bit.
- /// </summary>
- virtual void dumpDeadEndConfigs(NoViableAltException& nvae);
-
- protected:
- Parser* const parser;
-
- /// <summary>
- /// Each prediction operation uses a cache for merge of prediction contexts.
- /// Don't keep around as it wastes huge amounts of memory. The merge cache
- /// isn't synchronized but we're ok since two threads shouldn't reuse same
- /// parser/atnsim object because it can only handle one input at a time.
- /// This maps graphs a and b to merged result c. (a,b)->c. We can avoid
- /// the merge if we ever see a and b again. Note that (b,a)->c should
- /// also be examined during cache lookup.
- /// </summary>
- PredictionContextMergeCache mergeCache;
-
- // LAME globals to avoid parameters!!!!! I need these down deep in
- // predTransition
- TokenStream* _input;
- size_t _startIndex;
- ParserRuleContext* _outerContext;
- dfa::DFA* _dfa; // Reference into the decisionToDFA vector.
-
- /// <summary>
- /// Performs ATN simulation to compute a predicted alternative based
- /// upon the remaining input, but also updates the DFA cache to avoid
- /// having to traverse the ATN again for the same input sequence.
- ///
- /// There are some key conditions we're looking for after computing a new
- /// set of ATN configs (proposed DFA state):
- /// if the set is empty, there is no viable alternative for current symbol
- /// does the state uniquely predict an alternative?
- /// does the state have a conflict that would prevent us from
- /// putting it on the work list?
- ///
- /// We also have some key operations to do:
- /// add an edge from previous DFA state to potentially new DFA state, D,
- /// upon current symbol but only if adding to work list, which means
- /// in all cases except no viable alternative (and possibly non-greedy
- /// decisions?)
- /// collecting predicates and adding semantic context to DFA accept states
- /// adding rule context to context-sensitive DFA accept states
- /// consuming an input symbol
- /// reporting a conflict
- /// reporting an ambiguity
- /// reporting a context sensitivity
- /// reporting insufficient predicates
- ///
- /// cover these cases:
- /// dead end
- /// single alt
- /// single alt + preds
- /// conflict
- /// conflict + preds
- /// </summary>
- virtual size_t execATN(dfa::DFA& dfa, dfa::DFAState* s0, TokenStream* input,
- size_t startIndex, ParserRuleContext* outerContext);
-
- /// <summary>
- /// Get an existing target state for an edge in the DFA. If the target state
- /// for the edge has not yet been computed or is otherwise not available,
- /// this method returns {@code null}.
- /// </summary>
- /// <param name="previousD"> The current DFA state </param>
- /// <param name="t"> The next input symbol </param>
- /// <returns> The existing target DFA state for the given input symbol
- /// {@code t}, or {@code null} if the target state for this edge is not
- /// already cached </returns>
- virtual dfa::DFAState* getExistingTargetState(dfa::DFAState* previousD,
- size_t t);
-
- /// <summary>
- /// Compute a target state for an edge in the DFA, and attempt to add the
- /// computed state and corresponding edge to the DFA.
- /// </summary>
- /// <param name="dfa"> The DFA </param>
- /// <param name="previousD"> The current DFA state </param>
- /// <param name="t"> The next input symbol
- /// </param>
- /// <returns> The computed target DFA state for the given input symbol
- /// {@code t}. If {@code t} does not lead to a valid DFA state, this method
- /// returns <seealso cref="#ERROR"/>. </returns>
- virtual dfa::DFAState* computeTargetState(dfa::DFA& dfa,
- dfa::DFAState* previousD, size_t t);
-
- virtual void predicateDFAState(dfa::DFAState* dfaState,
- DecisionState* decisionState);
-
- // comes back with reach.uniqueAlt set to a valid alt
- virtual size_t execATNWithFullContext(
- dfa::DFA& dfa, dfa::DFAState* D, ATNConfigSet* s0, TokenStream* input,
- size_t startIndex,
- ParserRuleContext* outerContext); // how far we got before failing over
-
- virtual std::unique_ptr<ATNConfigSet> computeReachSet(ATNConfigSet* closure,
- size_t t, bool fullCtx);
-
- /// <summary>
- /// Return a configuration set containing only the configurations from
- /// {@code configs} which are in a <seealso cref="RuleStopState"/>. If all
- /// configurations in {@code configs} are already in a rule stop state, this
- /// method simply returns {@code configs}.
- /// <p/>
- /// When {@code lookToEndOfRule} is true, this method uses
- /// <seealso cref="ATN#nextTokens"/> for each configuration in {@code configs}
- /// which is not already in a rule stop state to see if a rule stop state is
- /// reachable from the configuration via epsilon-only transitions.
- /// </summary>
- /// <param name="configs"> the configuration set to update </param>
- /// <param name="lookToEndOfRule"> when true, this method checks for rule stop
- /// states reachable by epsilon-only transitions from each configuration in
- /// {@code configs}.
- /// </param>
- /// <returns> {@code configs} if all configurations in {@code configs} are in
- /// a rule stop state, otherwise return a new configuration set containing
- /// only the configurations from {@code configs} which are in a rule stop
- /// state </returns>
- virtual ATNConfigSet* removeAllConfigsNotInRuleStopState(
- ATNConfigSet* configs, bool lookToEndOfRule);
-
- virtual std::unique_ptr<ATNConfigSet> computeStartState(ATNState* p,
- RuleContext* ctx,
- bool fullCtx);
-
- /* parrt internal source braindump that doesn't mess up
- * external API spec.
-
- applyPrecedenceFilter is an optimization to avoid highly
- nonlinear prediction of expressions and other left recursive
- rules. The precedence predicates such as {3>=prec}? Are highly
- context-sensitive in that they can only be properly evaluated
- in the context of the proper prec argument. Without pruning,
- these predicates are normal predicates evaluated when we reach
- conflict state (or unique prediction). As we cannot evaluate
- these predicates out of context, the resulting conflict leads
- to full LL evaluation and nonlinear prediction which shows up
- very clearly with fairly large expressions.
-
- Example grammar:
-
- e : e '*' e
- | e '+' e
- | INT
- ;
-
- We convert that to the following:
-
- e[int prec]
- : INT
- ( {3>=prec}? '*' e[4]
- | {2>=prec}? '+' e[3]
- )*
- ;
-
- The (..)* loop has a decision for the inner block as well as
- an enter or exit decision, which is what concerns us here. At
- the 1st + of input 1+2+3, the loop entry sees both predicates
- and the loop exit also sees both predicates by falling off the
- edge of e. This is because we have no stack information with
- SLL and find the follow of e, which will hit the return states
- inside the loop after e[4] and e[3], which brings it back to
- the enter or exit decision. In this case, we know that we
- cannot evaluate those predicates because we have fallen off
- the edge of the stack and will in general not know which prec
- parameter is the right one to use in the predicate.
-
- Because we have special information, that these are precedence
- predicates, we can resolve them without failing over to full
- LL despite their context sensitive nature. We make an
- assumption that prec[-1] <= prec[0], meaning that the current
- precedence level is greater than or equal to the precedence
- level of recursive invocations above us in the stack. For
- example, if predicate {3>=prec}? is true of the current prec,
- then one option is to enter the loop to match it now. The
- other option is to exit the loop and the left recursive rule
- to match the current operator in rule invocation further up
- the stack. But, we know that all of those prec are lower or
- the same value and so we can decide to enter the loop instead
- of matching it later. That means we can strip out the other
- configuration for the exit branch.
-
- So imagine we have (14,1,$,{2>=prec}?) and then
- (14,2,$-dipsIntoOuterContext,{2>=prec}?). The optimization
- allows us to collapse these two configurations. We know that
- if {2>=prec}? is true for the current prec parameter, it will
- also be true for any prec from an invoking e call, indicated
- by dipsIntoOuterContext. As the predicates are both true, we
- have the option to evaluate them early in the decision start
- state. We do this by stripping both predicates and choosing to
- enter the loop as it is consistent with the notion of operator
- precedence. It's also how the full LL conflict resolution
- would work.
-
- The solution requires a different DFA start state for each
- precedence level.
-
- The basic filter mechanism is to remove configurations of the
- form (p, 2, pi) if (p, 1, pi) exists for the same p and pi. In
- other words, for the same ATN state and predicate context,
- remove any configuration associated with an exit branch if
- there is a configuration associated with the enter branch.
-
- It's also the case that the filter evaluates precedence
- predicates and resolves conflicts according to precedence
- levels. For example, for input 1+2+3 at the first +, we see
- prediction filtering
-
- [(11,1,[$],{3>=prec}?), (14,1,[$],{2>=prec}?), (5,2,[$],up=1),
- (11,2,[$],up=1),
- (14,2,[$],up=1)],hasSemanticContext=true,dipsIntoOuterContext
-
- to
-
- [(11,1,[$]), (14,1,[$]), (5,2,[$],up=1)],dipsIntoOuterContext
-
- This filters because {3>=prec}? evals to true and collapses
- (11,1,[$],{3>=prec}?) and (11,2,[$],up=1) since early conflict
- resolution based upon rules of operator precedence fits with
- our usual match first alt upon conflict.
-
- We noticed a problem where a recursive call resets precedence
- to 0. Sam's fix: each config has flag indicating if it has
- returned from an expr[0] call. then just don't filter any
- config with that flag set. flag is carried along in
- closure(). so to avoid adding field, set bit just under sign
- bit of dipsIntoOuterContext (SUPPRESS_PRECEDENCE_FILTER).
- With the change you filter "unless (p, 2, pi) was reached
- after leaving the rule stop state of the LR rule containing
- state p, corresponding to a rule invocation with precedence
- level 0"
- */
-
- /**
- * This method transforms the start state computed by
- * {@link #computeStartState} to the special start state used by a
- * precedence DFA for a particular precedence value. The transformation
- * process applies the following changes to the start state's configuration
- * set.
- *
- * <ol>
- * <li>Evaluate the precedence predicates for each configuration using
- * {@link SemanticContext#evalPrecedence}.</li>
- * <li>When {@link ATNConfig#isPrecedenceFilterSuppressed} is {@code false},
- * remove all configurations which predict an alternative greater than 1,
- * for which another configuration that predicts alternative 1 is in the
- * same ATN state with the same prediction context. This transformation is
- * valid for the following reasons:
- * <ul>
- * <li>The closure block cannot contain any epsilon transitions which bypass
- * the body of the closure, so all states reachable via alternative 1 are
- * part of the precedence alternatives of the transformed left-recursive
- * rule.</li>
- * <li>The "primary" portion of a left recursive rule cannot contain an
- * epsilon transition, so the only way an alternative other than 1 can exist
- * in a state that is also reachable via alternative 1 is by nesting calls
- * to the left-recursive rule, with the outer calls not being at the
- * preferred precedence level. The
- * {@link ATNConfig#isPrecedenceFilterSuppressed} property marks ATN
- * configurations which do not meet this condition, and therefore are not
- * eligible for elimination during the filtering process.</li>
- * </ul>
- * </li>
- * </ol>
- *
- * <p>
- * The prediction context must be considered by this filter to address
- * situations like the following.
- * </p>
- * <code>
- * <pre>
- * grammar TA;
- * prog: statement* EOF;
- * statement: letterA | statement letterA 'b' ;
- * letterA: 'a';
- * </pre>
- * </code>
- * <p>
- * If the above grammar, the ATN state immediately before the token
- * reference {@code 'a'} in {@code letterA} is reachable from the left edge
- * of both the primary and closure blocks of the left-recursive rule
- * {@code statement}. The prediction context associated with each of these
- * configurations distinguishes between them, and prevents the alternative
- * which stepped out to {@code prog} (and then back in to {@code statement}
- * from being eliminated by the filter.
- * </p>
- *
- * @param configs The configuration set computed by
- * {@link #computeStartState} as the start state for the DFA.
- * @return The transformed configuration set representing the start state
- * for a precedence DFA at a particular precedence level (determined by
- * calling {@link Parser#getPrecedence}).
- */
- std::unique_ptr<ATNConfigSet> applyPrecedenceFilter(ATNConfigSet* configs);
-
- virtual ATNState* getReachableTarget(Transition* trans, size_t ttype);
-
- virtual std::vector<Ref<SemanticContext>> getPredsForAmbigAlts(
- const antlrcpp::BitSet& ambigAlts, ATNConfigSet* configs, size_t nalts);
-
- virtual std::vector<dfa::DFAState::PredPrediction*> getPredicatePredictions(
- const antlrcpp::BitSet& ambigAlts,
- std::vector<Ref<SemanticContext>> altToPred);
-
- /**
- * This method is used to improve the localization of error messages by
- * choosing an alternative rather than throwing a
- * {@link NoViableAltException} in particular prediction scenarios where the
- * {@link #ERROR} state was reached during ATN simulation.
- *
- * <p>
- * The default implementation of this method uses the following
- * algorithm to identify an ATN configuration which successfully parsed the
- * decision entry rule. Choosing such an alternative ensures that the
- * {@link ParserRuleContext} returned by the calling rule will be complete
- * and valid, and the syntax error will be reported later at a more
- * localized location.</p>
- *
- * <ul>
- * <li>If a syntactically valid path or paths reach the end of the decision
- * rule and they are semantically valid if predicated, return the min
- * associated alt.</li> <li>Else, if a semantically invalid but syntactically
- * valid path exist or paths exist, return the minimum associated alt.
- * </li>
- * <li>Otherwise, return {@link ATN#INVALID_ALT_NUMBER}.</li>
- * </ul>
- *
- * <p>
- * In some scenarios, the algorithm described above could predict an
- * alternative which will result in a {@link FailedPredicateException} in
- * the parser. Specifically, this could occur if the <em>only</em>
- * configuration capable of successfully parsing to the end of the decision
- * rule is blocked by a semantic predicate. By choosing this alternative
- * within
- * {@link #adaptivePredict} instead of throwing a
- * {@link NoViableAltException}, the resulting
- * {@link FailedPredicateException} in the parser will identify the specific
- * predicate which is preventing the parser from successfully parsing the
- * decision rule, which helps developers identify and correct logic errors
- * in semantic predicates.
- * </p>
- *
- * @param configs The ATN configurations which were valid immediately before
- * the {@link #ERROR} state was reached
- * @param outerContext The is the \gamma_0 initial parser context from the
- * paper or the parser stack at the instant before prediction commences.
- *
- * @return The value to return from {@link #adaptivePredict}, or
- * {@link ATN#INVALID_ALT_NUMBER} if a suitable alternative was not
- * identified and {@link #adaptivePredict} should report an error instead.
- */
- size_t getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(
- ATNConfigSet* configs, ParserRuleContext* outerContext);
-
- virtual size_t getAltThatFinishedDecisionEntryRule(ATNConfigSet* configs);
-
- /** Walk the list of configurations and split them according to
- * those that have preds evaluating to true/false. If no pred, assume
- * true pred and include in succeeded set. Returns Pair of sets.
- *
- * Create a new set so as not to alter the incoming parameter.
- *
- * Assumption: the input stream has been restored to the starting point
- * prediction, which is where predicates need to evaluate.
- */
- std::pair<ATNConfigSet*, ATNConfigSet*> splitAccordingToSemanticValidity(
- ATNConfigSet* configs, ParserRuleContext* outerContext);
-
- /// <summary>
- /// Look through a list of predicate/alt pairs, returning alts for the
- /// pairs that win. A {@code NONE} predicate indicates an alt containing an
- /// unpredicated config which behaves as "always true." If !complete
- /// then we stop at the first predicate that evaluates to true. This
- /// includes pairs with null predicates.
- /// </summary>
- virtual antlrcpp::BitSet evalSemanticContext(
- std::vector<dfa::DFAState::PredPrediction*> predPredictions,
- ParserRuleContext* outerContext, bool complete);
-
- /**
- * Evaluate a semantic context within a specific parser context.
- *
- * <p>
- * This method might not be called for every semantic context evaluated
- * during the prediction process. In particular, we currently do not
- * evaluate the following but it may change in the future:</p>
- *
- * <ul>
- * <li>Precedence predicates (represented by
- * {@link SemanticContext.PrecedencePredicate}) are not currently evaluated
- * through this method.</li>
- * <li>Operator predicates (represented by {@link SemanticContext.AND} and
- * {@link SemanticContext.OR}) are evaluated as a single semantic
- * context, rather than evaluating the operands individually.
- * Implementations which require evaluation results from individual
- * predicates should override this method to explicitly handle evaluation of
- * the operands within operator predicates.</li>
- * </ul>
- *
- * @param pred The semantic context to evaluate
- * @param parserCallStack The parser context in which to evaluate the
- * semantic context
- * @param alt The alternative which is guarded by {@code pred}
- * @param fullCtx {@code true} if the evaluation is occurring during LL
- * prediction; otherwise, {@code false} if the evaluation is occurring
- * during SLL prediction
- *
- * @since 4.3
- */
- virtual bool evalSemanticContext(Ref<SemanticContext> const& pred,
- ParserRuleContext* parserCallStack,
- size_t alt, bool fullCtx);
-
- /* TO_DO: If we are doing predicates, there is no point in pursuing
- closure operations if we reach a DFA state that uniquely predicts
- alternative. We will not be caching that DFA state and it is a
- waste to pursue the closure. Might have to advance when we do
- ambig detection thought :(
- */
- virtual void closure(Ref<ATNConfig> const& config, ATNConfigSet* configs,
- ATNConfig::Set& closureBusy, bool collectPredicates,
- bool fullCtx, bool treatEofAsEpsilon);
-
- virtual void closureCheckingStopState(Ref<ATNConfig> const& config,
- ATNConfigSet* configs,
- ATNConfig::Set& closureBusy,
- bool collectPredicates, bool fullCtx,
- int depth, bool treatEofAsEpsilon);
-
- /// Do the actual work of walking epsilon edges.
- virtual void closure_(Ref<ATNConfig> const& config, ATNConfigSet* configs,
- ATNConfig::Set& closureBusy, bool collectPredicates,
- bool fullCtx, int depth, bool treatEofAsEpsilon);
-
- virtual Ref<ATNConfig> getEpsilonTarget(Ref<ATNConfig> const& config,
- Transition* t, bool collectPredicates,
- bool inContext, bool fullCtx,
- bool treatEofAsEpsilon);
- virtual Ref<ATNConfig> actionTransition(Ref<ATNConfig> const& config,
- ActionTransition* t);
-
- virtual Ref<ATNConfig> predTransition(Ref<ATNConfig> const& config,
- PredicateTransition* pt,
- bool collectPredicates, bool inContext,
- bool fullCtx);
-
- virtual Ref<ATNConfig> ruleTransition(Ref<ATNConfig> const& config,
- RuleTransition* t);
-
- /**
- * Gets a {@link BitSet} containing the alternatives in {@code configs}
- * which are part of one or more conflicting alternative subsets.
- *
- * @param configs The {@link ATNConfigSet} to analyze.
- * @return The alternatives in {@code configs} which are part of one or more
- * conflicting alternative subsets. If {@code configs} does not contain any
- * conflicting subsets, this method returns an empty {@link BitSet}.
- */
- virtual antlrcpp::BitSet getConflictingAlts(ATNConfigSet* configs);
-
- /// <summary>
- /// Sam pointed out a problem with the previous definition, v3, of
- /// ambiguous states. If we have another state associated with conflicting
- /// alternatives, we should keep going. For example, the following grammar
- ///
- /// s : (ID | ID ID?) ';' ;
- ///
- /// When the ATN simulation reaches the state before ';', it has a DFA
- /// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally
- /// 12|1|[] and 12|2|[] conflict, but we cannot stop processing this node
- /// because alternative to has another way to continue, via [6|2|[]].
- /// The key is that we have a single state that has config's only associated
- /// with a single alternative, 2, and crucially the state transitions
- /// among the configurations are all non-epsilon transitions. That means
- /// we don't consider any conflicts that include alternative 2. So, we
- /// ignore the conflict between alts 1 and 2. We ignore a set of
- /// conflicting alts when there is an intersection with an alternative
- /// associated with a single alt state in the state->config-list map.
- ///
- /// It's also the case that we might have two conflicting configurations but
- /// also a 3rd nonconflicting configuration for a different alternative:
- /// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar:
- ///
- /// a : A | A | A B ;
- ///
- /// After matching input A, we reach the stop state for rule A, state 1.
- /// State 8 is the state right before B. Clearly alternatives 1 and 2
- /// conflict and no amount of further lookahead will separate the two.
- /// However, alternative 3 will be able to continue and so we do not
- /// stop working on this state. In the previous example, we're concerned
- /// with states associated with the conflicting alternatives. Here alt
- /// 3 is not associated with the conflicting configs, but since we can
- /// continue looking for input reasonably, I don't declare the state done. We
- /// ignore a set of conflicting alts when we have an alternative
- /// that we still need to pursue.
- /// </summary>
-
- virtual antlrcpp::BitSet getConflictingAltsOrUniqueAlt(ATNConfigSet* configs);
-
- virtual NoViableAltException noViableAlt(TokenStream* input,
- ParserRuleContext* outerContext,
- ATNConfigSet* configs,
- size_t startIndex);
-
- static size_t getUniqueAlt(ATNConfigSet* configs);
-
- /// <summary>
- /// Add an edge to the DFA, if possible. This method calls
- /// <seealso cref="#addDFAState"/> to ensure the {@code to} state is present
- /// in the DFA. If {@code from} is {@code null}, or if {@code t} is outside
- /// the range of edges that can be represented in the DFA tables, this method
- /// returns without adding the edge to the DFA.
- /// <p/>
- /// If {@code to} is {@code null}, this method returns {@code null}.
- /// Otherwise, this method returns the <seealso cref="DFAState"/> returned by
- /// calling <seealso cref="#addDFAState"/> for the {@code to} state.
- /// </summary>
- /// <param name="dfa"> The DFA </param>
- /// <param name="from"> The source state for the edge </param>
- /// <param name="t"> The input symbol </param>
- /// <param name="to"> The target state for the edge
- /// </param>
- /// <returns> If {@code to} is {@code null}, this method returns {@code null};
- /// otherwise this method returns the result of calling <seealso
- /// cref="#addDFAState"/> on {@code to} </returns>
- virtual dfa::DFAState* addDFAEdge(dfa::DFA& dfa, dfa::DFAState* from,
- ssize_t t, dfa::DFAState* to);
-
- /// <summary>
- /// Add state {@code D} to the DFA if it is not already present, and return
- /// the actual instance stored in the DFA. If a state equivalent to {@code D}
- /// is already in the DFA, the existing state is returned. Otherwise this
- /// method returns {@code D} after adding it to the DFA.
- /// <p/>
- /// If {@code D} is <seealso cref="#ERROR"/>, this method returns <seealso
- /// cref="#ERROR"/> and does not change the DFA.
- /// </summary>
- /// <param name="dfa"> The dfa </param>
- /// <param name="D"> The DFA state to add </param>
- /// <returns> The state stored in the DFA. This will be either the existing
- /// state if {@code D} is already in the DFA, or {@code D} itself if the
- /// state was not already present. </returns>
- virtual dfa::DFAState* addDFAState(dfa::DFA& dfa, dfa::DFAState* D);
-
- virtual void reportAttemptingFullContext(
- dfa::DFA& dfa, const antlrcpp::BitSet& conflictingAlts,
- ATNConfigSet* configs, size_t startIndex, size_t stopIndex);
-
- virtual void reportContextSensitivity(dfa::DFA& dfa, size_t prediction,
- ATNConfigSet* configs,
- size_t startIndex, size_t stopIndex);
-
- /// If context sensitive parsing, we know it's ambiguity not conflict.
- virtual void reportAmbiguity(
- dfa::DFA& dfa,
- dfa::DFAState* D, // the DFA state from execATN() that had SLL conflicts
- size_t startIndex, size_t stopIndex, bool exact,
- const antlrcpp::BitSet& ambigAlts,
- ATNConfigSet* configs); // configs that LL not SLL considered conflicting
-
- private:
- // SLL, LL, or LL + exact ambig detection?
- PredictionMode _mode;
-
- static bool getLrLoopSetting();
- void InitializeInstanceFields();
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PlusBlockStartState.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PlusBlockStartState.cpp
deleted file mode 100644
index 1a53b12963..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PlusBlockStartState.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/PlusBlockStartState.h"
-
-using namespace antlr4::atn;
-
-size_t PlusBlockStartState::getStateType() { return PLUS_BLOCK_START; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PlusBlockStartState.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PlusBlockStartState.h
deleted file mode 100644
index 22a7013ae7..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PlusBlockStartState.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/BlockStartState.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// Start of {@code (A|B|...)+} loop. Technically a decision state, but
-/// we don't use for code generation; somebody might need it, so I'm defining
-/// it for completeness. In reality, the <seealso cref="PlusLoopbackState"/>
-/// node is the real decision-making note for {@code A+}.
-class ANTLR4CPP_PUBLIC PlusBlockStartState final : public BlockStartState {
- public:
- PlusLoopbackState* loopBackState = nullptr;
-
- virtual size_t getStateType() override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PlusLoopbackState.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PlusLoopbackState.cpp
deleted file mode 100644
index bceea768fe..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PlusLoopbackState.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/PlusLoopbackState.h"
-
-using namespace antlr4::atn;
-
-size_t PlusLoopbackState::getStateType() { return PLUS_LOOP_BACK; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PlusLoopbackState.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PlusLoopbackState.h
deleted file mode 100644
index 5d19a010de..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PlusLoopbackState.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionState.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// Decision state for {@code A+} and {@code (A|B)+}. It has two transitions:
-/// one to the loop back to start of the block and one to exit.
-class ANTLR4CPP_PUBLIC PlusLoopbackState final : public DecisionState {
- public:
- virtual size_t getStateType() override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.cpp
deleted file mode 100644
index 8c29c1dd6c..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/PrecedencePredicateTransition.h"
-
-using namespace antlr4::atn;
-
-PrecedencePredicateTransition::PrecedencePredicateTransition(ATNState* target,
- int precedence)
- : AbstractPredicateTransition(target), precedence(precedence) {}
-
-Transition::SerializationType
-PrecedencePredicateTransition::getSerializationType() const {
- return PRECEDENCE;
-}
-
-bool PrecedencePredicateTransition::isEpsilon() const { return true; }
-
-bool PrecedencePredicateTransition::matches(size_t /*symbol*/,
- size_t /*minVocabSymbol*/,
- size_t /*maxVocabSymbol*/) const {
- return false;
-}
-
-Ref<SemanticContext::PrecedencePredicate>
-PrecedencePredicateTransition::getPredicate() const {
- return std::make_shared<SemanticContext::PrecedencePredicate>(precedence);
-}
-
-std::string PrecedencePredicateTransition::toString() const {
- return "PRECEDENCE " + Transition::toString() +
- " { precedence: " + std::to_string(precedence) + " }";
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.h
deleted file mode 100644
index d1d93bb08c..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PrecedencePredicateTransition.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "SemanticContext.h"
-#include "atn/AbstractPredicateTransition.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC PrecedencePredicateTransition final
- : public AbstractPredicateTransition {
- public:
- const int precedence;
-
- PrecedencePredicateTransition(ATNState* target, int precedence);
-
- virtual SerializationType getSerializationType() const override;
- virtual bool isEpsilon() const override;
- virtual bool matches(size_t symbol, size_t minVocabSymbol,
- size_t maxVocabSymbol) const override;
- Ref<SemanticContext::PrecedencePredicate> getPredicate() const;
- virtual std::string toString() const override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredicateEvalInfo.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredicateEvalInfo.cpp
deleted file mode 100644
index 3aba5b7fbe..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredicateEvalInfo.cpp
+++ /dev/null
@@ -1,22 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "SemanticContext.h"
-
-#include "atn/PredicateEvalInfo.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-PredicateEvalInfo::PredicateEvalInfo(size_t decision, TokenStream* input,
- size_t startIndex, size_t stopIndex,
- Ref<SemanticContext> const& semctx,
- bool evalResult, size_t predictedAlt,
- bool fullCtx)
- : DecisionEventInfo(decision, nullptr, input, startIndex, stopIndex,
- fullCtx),
- semctx(semctx),
- predictedAlt(predictedAlt),
- evalResult(evalResult) {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredicateEvalInfo.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredicateEvalInfo.h
deleted file mode 100644
index 8d4fc2645e..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredicateEvalInfo.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionEventInfo.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// This class represents profiling event information for semantic predicate
-/// evaluations which occur during prediction.
-/// </summary>
-/// <seealso cref= ParserATNSimulator#evalSemanticContext
-///
-/// @since 4.3 </seealso>
-class ANTLR4CPP_PUBLIC PredicateEvalInfo : public DecisionEventInfo {
- public:
- /// The semantic context which was evaluated.
- const Ref<SemanticContext> semctx;
-
- /// <summary>
- /// The alternative number for the decision which is guarded by the semantic
- /// context <seealso cref="#semctx"/>. Note that other ATN
- /// configurations may predict the same alternative which are guarded by
- /// other semantic contexts and/or <seealso cref="SemanticContext#NONE"/>.
- /// </summary>
- const size_t predictedAlt;
-
- /// The result of evaluating the semantic context <seealso cref="#semctx"/>.
- const bool evalResult;
-
- /// <summary>
- /// Constructs a new instance of the <seealso cref="PredicateEvalInfo"/> class
- /// with the specified detailed predicate evaluation information.
- /// </summary>
- /// <param name="decision"> The decision number </param>
- /// <param name="input"> The input token stream </param>
- /// <param name="startIndex"> The start index for the current prediction
- /// </param> <param name="stopIndex"> The index at which the predicate
- /// evaluation was triggered. Note that the input stream may be reset to other
- /// positions for the actual evaluation of individual predicates. </param>
- /// <param name="semctx"> The semantic context which was evaluated </param>
- /// <param name="evalResult"> The results of evaluating the semantic context
- /// </param> <param name="predictedAlt"> The alternative number for the
- /// decision which is guarded by the semantic context {@code semctx}. See
- /// <seealso cref="#predictedAlt"/> for more information. </param> <param
- /// name="fullCtx"> {@code true} if the semantic context was evaluated during
- /// LL prediction; otherwise, {@code false} if the semantic context was
- /// evaluated during SLL prediction
- /// </param>
- /// <seealso cref= ParserATNSimulator#evalSemanticContext(SemanticContext,
- /// ParserRuleContext, int, boolean) </seealso> <seealso cref=
- /// SemanticContext#eval(Recognizer, RuleContext) </seealso>
- PredicateEvalInfo(size_t decision, TokenStream* input, size_t startIndex,
- size_t stopIndex, Ref<SemanticContext> const& semctx,
- bool evalResult, size_t predictedAlt, bool fullCtx);
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredicateTransition.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredicateTransition.cpp
deleted file mode 100644
index 97a0395aa8..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredicateTransition.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/PredicateTransition.h"
-
-using namespace antlr4::atn;
-
-PredicateTransition::PredicateTransition(ATNState* target, size_t ruleIndex,
- size_t predIndex, bool isCtxDependent)
- : AbstractPredicateTransition(target),
- ruleIndex(ruleIndex),
- predIndex(predIndex),
- isCtxDependent(isCtxDependent) {}
-
-Transition::SerializationType PredicateTransition::getSerializationType()
- const {
- return PREDICATE;
-}
-
-bool PredicateTransition::isEpsilon() const { return true; }
-
-bool PredicateTransition::matches(size_t /*symbol*/, size_t /*minVocabSymbol*/,
- size_t /*maxVocabSymbol*/) const {
- return false;
-}
-
-Ref<SemanticContext::Predicate> PredicateTransition::getPredicate() const {
- return std::make_shared<SemanticContext::Predicate>(ruleIndex, predIndex,
- isCtxDependent);
-}
-
-std::string PredicateTransition::toString() const {
- return "PREDICATE " + Transition::toString() +
- " { ruleIndex: " + std::to_string(ruleIndex) +
- ", predIndex: " + std::to_string(predIndex) +
- ", isCtxDependent: " + std::to_string(isCtxDependent) + " }";
-
- // Generate and add a predicate context here?
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredicateTransition.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredicateTransition.h
deleted file mode 100644
index f2f009f556..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredicateTransition.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "SemanticContext.h"
-#include "atn/AbstractPredicateTransition.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// TO_DO: this is old comment:
-/// A tree of semantic predicates from the grammar AST if label==SEMPRED.
-/// In the ATN, labels will always be exactly one predicate, but the DFA
-/// may have to combine a bunch of them as it collects predicates from
-/// multiple ATN configurations into a single DFA state.
-class ANTLR4CPP_PUBLIC PredicateTransition final
- : public AbstractPredicateTransition {
- public:
- const size_t ruleIndex;
- const size_t predIndex;
- const bool isCtxDependent; // e.g., $i ref in pred
-
- PredicateTransition(ATNState* target, size_t ruleIndex, size_t predIndex,
- bool isCtxDependent);
-
- virtual SerializationType getSerializationType() const override;
-
- virtual bool isEpsilon() const override;
- virtual bool matches(size_t symbol, size_t minVocabSymbol,
- size_t maxVocabSymbol) const override;
-
- Ref<SemanticContext::Predicate> getPredicate() const;
-
- virtual std::string toString() const override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredictionContext.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredictionContext.cpp
deleted file mode 100644
index 622a569cbe..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredictionContext.cpp
+++ /dev/null
@@ -1,694 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ParserRuleContext.h"
-#include "RuleContext.h"
-#include "atn/ArrayPredictionContext.h"
-#include "atn/EmptyPredictionContext.h"
-#include "atn/RuleTransition.h"
-#include "misc/MurmurHash.h"
-#include "support/Arrays.h"
-#include "support/CPPUtils.h"
-
-#include "atn/PredictionContext.h"
-
-using namespace antlr4;
-using namespace antlr4::misc;
-using namespace antlr4::atn;
-
-using namespace antlrcpp;
-
-size_t PredictionContext::globalNodeCount = 0;
-const Ref<PredictionContext> PredictionContext::EMPTY =
- std::make_shared<EmptyPredictionContext>();
-
-//----------------- PredictionContext
-//----------------------------------------------------------------------------------
-
-PredictionContext::PredictionContext(size_t cachedHashCode)
- : id(globalNodeCount++), cachedHashCode(cachedHashCode) {}
-
-PredictionContext::~PredictionContext() {}
-
-Ref<PredictionContext> PredictionContext::fromRuleContext(
- const ATN& atn, RuleContext* outerContext) {
- if (outerContext == nullptr) {
- return PredictionContext::EMPTY;
- }
-
- // if we are in RuleContext of start rule, s, then PredictionContext
- // is EMPTY. Nobody called us. (if we are empty, return empty)
- if (outerContext->parent == nullptr ||
- outerContext == &ParserRuleContext::EMPTY) {
- return PredictionContext::EMPTY;
- }
-
- // If we have a parent, convert it to a PredictionContext graph
- Ref<PredictionContext> parent = PredictionContext::fromRuleContext(
- atn, dynamic_cast<RuleContext*>(outerContext->parent));
-
- ATNState* state = atn.states.at(outerContext->invokingState);
- RuleTransition* transition = (RuleTransition*)state->transitions[0];
- return SingletonPredictionContext::create(
- parent, transition->followState->stateNumber);
-}
-
-bool PredictionContext::isEmpty() const { return this == EMPTY.get(); }
-
-bool PredictionContext::hasEmptyPath() const {
- // since EMPTY_RETURN_STATE can only appear in the last position, we check
- // last one
- return getReturnState(size() - 1) == EMPTY_RETURN_STATE;
-}
-
-size_t PredictionContext::hashCode() const { return cachedHashCode; }
-
-size_t PredictionContext::calculateEmptyHashCode() {
- size_t hash = MurmurHash::initialize(INITIAL_HASH);
- hash = MurmurHash::finish(hash, 0);
- return hash;
-}
-
-size_t PredictionContext::calculateHashCode(Ref<PredictionContext> parent,
- size_t returnState) {
- size_t hash = MurmurHash::initialize(INITIAL_HASH);
- hash = MurmurHash::update(hash, parent);
- hash = MurmurHash::update(hash, returnState);
- hash = MurmurHash::finish(hash, 2);
- return hash;
-}
-
-size_t PredictionContext::calculateHashCode(
- const std::vector<Ref<PredictionContext>>& parents,
- const std::vector<size_t>& returnStates) {
- size_t hash = MurmurHash::initialize(INITIAL_HASH);
-
- for (auto parent : parents) {
- hash = MurmurHash::update(hash, parent);
- }
-
- for (auto returnState : returnStates) {
- hash = MurmurHash::update(hash, returnState);
- }
-
- return MurmurHash::finish(hash, parents.size() + returnStates.size());
-}
-
-Ref<PredictionContext> PredictionContext::merge(
- const Ref<PredictionContext>& a, const Ref<PredictionContext>& b,
- bool rootIsWildcard, PredictionContextMergeCache* mergeCache) {
- assert(a && b);
-
- // share same graph if both same
- if (a == b || *a == *b) {
- return a;
- }
-
- if (is<SingletonPredictionContext>(a) && is<SingletonPredictionContext>(b)) {
- return mergeSingletons(
- std::dynamic_pointer_cast<SingletonPredictionContext>(a),
- std::dynamic_pointer_cast<SingletonPredictionContext>(b),
- rootIsWildcard, mergeCache);
- }
-
- // At least one of a or b is array.
- // If one is $ and rootIsWildcard, return $ as * wildcard.
- if (rootIsWildcard) {
- if (is<EmptyPredictionContext>(a)) {
- return a;
- }
- if (is<EmptyPredictionContext>(b)) {
- return b;
- }
- }
-
- // convert singleton so both are arrays to normalize
- Ref<ArrayPredictionContext> left;
- if (is<SingletonPredictionContext>(a)) {
- left = std::make_shared<ArrayPredictionContext>(
- std::dynamic_pointer_cast<SingletonPredictionContext>(a));
- } else {
- left = std::dynamic_pointer_cast<ArrayPredictionContext>(a);
- }
- Ref<ArrayPredictionContext> right;
- if (is<SingletonPredictionContext>(b)) {
- right = std::make_shared<ArrayPredictionContext>(
- std::dynamic_pointer_cast<SingletonPredictionContext>(b));
- } else {
- right = std::dynamic_pointer_cast<ArrayPredictionContext>(b);
- }
- return mergeArrays(left, right, rootIsWildcard, mergeCache);
-}
-
-Ref<PredictionContext> PredictionContext::mergeSingletons(
- const Ref<SingletonPredictionContext>& a,
- const Ref<SingletonPredictionContext>& b, bool rootIsWildcard,
- PredictionContextMergeCache* mergeCache) {
- if (mergeCache != nullptr) { // Can be null if not given to the ATNState from
- // which this call originates.
- auto existing = mergeCache->get(a, b);
- if (existing) {
- return existing;
- }
- existing = mergeCache->get(b, a);
- if (existing) {
- return existing;
- }
- }
-
- Ref<PredictionContext> rootMerge = mergeRoot(a, b, rootIsWildcard);
- if (rootMerge) {
- if (mergeCache != nullptr) {
- mergeCache->put(a, b, rootMerge);
- }
- return rootMerge;
- }
-
- Ref<PredictionContext> parentA = a->parent;
- Ref<PredictionContext> parentB = b->parent;
- if (a->returnState == b->returnState) { // a == b
- Ref<PredictionContext> parent =
- merge(parentA, parentB, rootIsWildcard, mergeCache);
-
- // If parent is same as existing a or b parent or reduced to a parent,
- // return it.
- if (parent == parentA) { // ax + bx = ax, if a=b
- return a;
- }
- if (parent == parentB) { // ax + bx = bx, if a=b
- return b;
- }
-
- // else: ax + ay = a'[x,y]
- // merge parents x and y, giving array node with x,y then remainders
- // of those graphs. dup a, a' points at merged array
- // new joined parent so create new singleton pointing to it, a'
- Ref<PredictionContext> a_ =
- SingletonPredictionContext::create(parent, a->returnState);
- if (mergeCache != nullptr) {
- mergeCache->put(a, b, a_);
- }
- return a_;
- } else {
- // a != b payloads differ
- // see if we can collapse parents due to $+x parents if local ctx
- Ref<PredictionContext> singleParent;
- if (a == b || (*parentA == *parentB)) { // ax + bx = [a,b]x
- singleParent = parentA;
- }
- if (singleParent) { // parents are same, sort payloads and use same parent
- std::vector<size_t> payloads = {a->returnState, b->returnState};
- if (a->returnState > b->returnState) {
- payloads[0] = b->returnState;
- payloads[1] = a->returnState;
- }
- std::vector<Ref<PredictionContext>> parents = {singleParent,
- singleParent};
- Ref<PredictionContext> a_ =
- std::make_shared<ArrayPredictionContext>(parents, payloads);
- if (mergeCache != nullptr) {
- mergeCache->put(a, b, a_);
- }
- return a_;
- }
-
- // parents differ and can't merge them. Just pack together
- // into array; can't merge.
- // ax + by = [ax,by]
- Ref<PredictionContext> a_;
- if (a->returnState > b->returnState) { // sort by payload
- std::vector<size_t> payloads = {b->returnState, a->returnState};
- std::vector<Ref<PredictionContext>> parents = {b->parent, a->parent};
- a_ = std::make_shared<ArrayPredictionContext>(parents, payloads);
- } else {
- std::vector<size_t> payloads = {a->returnState, b->returnState};
- std::vector<Ref<PredictionContext>> parents = {a->parent, b->parent};
- a_ = std::make_shared<ArrayPredictionContext>(parents, payloads);
- }
-
- if (mergeCache != nullptr) {
- mergeCache->put(a, b, a_);
- }
- return a_;
- }
-}
-
-Ref<PredictionContext> PredictionContext::mergeRoot(
- const Ref<SingletonPredictionContext>& a,
- const Ref<SingletonPredictionContext>& b, bool rootIsWildcard) {
- if (rootIsWildcard) {
- if (a == EMPTY) { // * + b = *
- return EMPTY;
- }
- if (b == EMPTY) { // a + * = *
- return EMPTY;
- }
- } else {
- if (a == EMPTY && b == EMPTY) { // $ + $ = $
- return EMPTY;
- }
- if (a == EMPTY) { // $ + x = [$,x]
- std::vector<size_t> payloads = {b->returnState, EMPTY_RETURN_STATE};
- std::vector<Ref<PredictionContext>> parents = {b->parent, nullptr};
- Ref<PredictionContext> joined =
- std::make_shared<ArrayPredictionContext>(parents, payloads);
- return joined;
- }
- if (b == EMPTY) { // x + $ = [$,x] ($ is always first if present)
- std::vector<size_t> payloads = {a->returnState, EMPTY_RETURN_STATE};
- std::vector<Ref<PredictionContext>> parents = {a->parent, nullptr};
- Ref<PredictionContext> joined =
- std::make_shared<ArrayPredictionContext>(parents, payloads);
- return joined;
- }
- }
- return nullptr;
-}
-
-Ref<PredictionContext> PredictionContext::mergeArrays(
- const Ref<ArrayPredictionContext>& a, const Ref<ArrayPredictionContext>& b,
- bool rootIsWildcard, PredictionContextMergeCache* mergeCache) {
- if (mergeCache != nullptr) {
- auto existing = mergeCache->get(a, b);
- if (existing) {
- return existing;
- }
- existing = mergeCache->get(b, a);
- if (existing) {
- return existing;
- }
- }
-
- // merge sorted payloads a + b => M
- size_t i = 0; // walks a
- size_t j = 0; // walks b
- size_t k = 0; // walks target M array
-
- std::vector<size_t> mergedReturnStates(a->returnStates.size() +
- b->returnStates.size());
- std::vector<Ref<PredictionContext>> mergedParents(a->returnStates.size() +
- b->returnStates.size());
-
- // walk and merge to yield mergedParents, mergedReturnStates
- while (i < a->returnStates.size() && j < b->returnStates.size()) {
- Ref<PredictionContext> a_parent = a->parents[i];
- Ref<PredictionContext> b_parent = b->parents[j];
- if (a->returnStates[i] == b->returnStates[j]) {
- // same payload (stack tops are equal), must yield merged singleton
- size_t payload = a->returnStates[i];
- // $+$ = $
- bool both$ = payload == EMPTY_RETURN_STATE && a_parent && b_parent;
- bool ax_ax =
- (a_parent && b_parent) && *a_parent == *b_parent; // ax+ax -> ax
- if (both$ || ax_ax) {
- mergedParents[k] = a_parent; // choose left
- mergedReturnStates[k] = payload;
- } else { // ax+ay -> a'[x,y]
- Ref<PredictionContext> mergedParent =
- merge(a_parent, b_parent, rootIsWildcard, mergeCache);
- mergedParents[k] = mergedParent;
- mergedReturnStates[k] = payload;
- }
- i++; // hop over left one as usual
- j++; // but also skip one in right side since we merge
- } else if (a->returnStates[i] < b->returnStates[j]) { // copy a[i] to M
- mergedParents[k] = a_parent;
- mergedReturnStates[k] = a->returnStates[i];
- i++;
- } else { // b > a, copy b[j] to M
- mergedParents[k] = b_parent;
- mergedReturnStates[k] = b->returnStates[j];
- j++;
- }
- k++;
- }
-
- // copy over any payloads remaining in either array
- if (i < a->returnStates.size()) {
- for (std::vector<int>::size_type p = i; p < a->returnStates.size(); p++) {
- mergedParents[k] = a->parents[p];
- mergedReturnStates[k] = a->returnStates[p];
- k++;
- }
- } else {
- for (std::vector<int>::size_type p = j; p < b->returnStates.size(); p++) {
- mergedParents[k] = b->parents[p];
- mergedReturnStates[k] = b->returnStates[p];
- k++;
- }
- }
-
- // trim merged if we combined a few that had same stack tops
- if (k < mergedParents.size()) { // write index < last position; trim
- if (k == 1) { // for just one merged element, return singleton top
- Ref<PredictionContext> a_ = SingletonPredictionContext::create(
- mergedParents[0], mergedReturnStates[0]);
- if (mergeCache != nullptr) {
- mergeCache->put(a, b, a_);
- }
- return a_;
- }
- mergedParents.resize(k);
- mergedReturnStates.resize(k);
- }
-
- Ref<ArrayPredictionContext> M = std::make_shared<ArrayPredictionContext>(
- mergedParents, mergedReturnStates);
-
- // if we created same array as a or b, return that instead
- // TO_DO: track whether this is possible above during merge sort for speed
- if (*M == *a) {
- if (mergeCache != nullptr) {
- mergeCache->put(a, b, a);
- }
- return a;
- }
- if (*M == *b) {
- if (mergeCache != nullptr) {
- mergeCache->put(a, b, b);
- }
- return b;
- }
-
- // ml: this part differs from Java code. We have to recreate the context as
- // the parents array is copied on creation.
- if (combineCommonParents(mergedParents)) {
- mergedReturnStates.resize(mergedParents.size());
- M = std::make_shared<ArrayPredictionContext>(mergedParents,
- mergedReturnStates);
- }
-
- if (mergeCache != nullptr) {
- mergeCache->put(a, b, M);
- }
- return M;
-}
-
-bool PredictionContext::combineCommonParents(
- std::vector<Ref<PredictionContext>>& parents) {
- std::set<Ref<PredictionContext>> uniqueParents;
- for (size_t p = 0; p < parents.size(); ++p) {
- Ref<PredictionContext> parent = parents[p];
- if (uniqueParents.find(parent) == uniqueParents.end()) { // don't replace
- uniqueParents.insert(parent);
- }
- }
-
- for (size_t p = 0; p < parents.size(); ++p) {
- parents[p] = *uniqueParents.find(parents[p]);
- }
-
- return true;
-}
-
-std::string PredictionContext::toDOTString(
- const Ref<PredictionContext>& context) {
- if (context == nullptr) {
- return "";
- }
-
- std::stringstream ss;
- ss << "digraph G {\n"
- << "rankdir=LR;\n";
-
- std::vector<Ref<PredictionContext>> nodes = getAllContextNodes(context);
- std::sort(nodes.begin(), nodes.end(),
- [](const Ref<PredictionContext>& o1,
- const Ref<PredictionContext>& o2) { return o1->id - o2->id; });
-
- for (auto current : nodes) {
- if (is<SingletonPredictionContext>(current)) {
- std::string s = std::to_string(current->id);
- ss << " s" << s;
- std::string returnState = std::to_string(current->getReturnState(0));
- if (is<EmptyPredictionContext>(current)) {
- returnState = "$";
- }
- ss << " [label=\"" << returnState << "\"];\n";
- continue;
- }
- Ref<ArrayPredictionContext> arr =
- std::static_pointer_cast<ArrayPredictionContext>(current);
- ss << " s" << arr->id << " [shape=box, label=\""
- << "[";
- bool first = true;
- for (auto inv : arr->returnStates) {
- if (!first) {
- ss << ", ";
- }
- if (inv == EMPTY_RETURN_STATE) {
- ss << "$";
- } else {
- ss << inv;
- }
- first = false;
- }
- ss << "]";
- ss << "\"];\n";
- }
-
- for (auto current : nodes) {
- if (current == EMPTY) {
- continue;
- }
- for (size_t i = 0; i < current->size(); i++) {
- if (!current->getParent(i)) {
- continue;
- }
- ss << " s" << current->id << "->"
- << "s" << current->getParent(i)->id;
- if (current->size() > 1) {
- ss << " [label=\"parent[" << i << "]\"];\n";
- } else {
- ss << ";\n";
- }
- }
- }
-
- ss << "}\n";
- return ss.str();
-}
-
-// The "visited" map is just a temporary structure to control the retrieval
-// process (which is recursive).
-Ref<PredictionContext> PredictionContext::getCachedContext(
- const Ref<PredictionContext>& context, PredictionContextCache& contextCache,
- std::map<Ref<PredictionContext>, Ref<PredictionContext>>& visited) {
- if (context->isEmpty()) {
- return context;
- }
-
- {
- auto iterator = visited.find(context);
- if (iterator != visited.end())
- return iterator->second; // Not necessarly the same as context.
- }
-
- auto iterator = contextCache.find(context);
- if (iterator != contextCache.end()) {
- visited[context] = *iterator;
-
- return *iterator;
- }
-
- bool changed = false;
-
- std::vector<Ref<PredictionContext>> parents(context->size());
- for (size_t i = 0; i < parents.size(); i++) {
- Ref<PredictionContext> parent =
- getCachedContext(context->getParent(i), contextCache, visited);
- if (changed || parent != context->getParent(i)) {
- if (!changed) {
- parents.clear();
- for (size_t j = 0; j < context->size(); j++) {
- parents.push_back(context->getParent(j));
- }
-
- changed = true;
- }
-
- parents[i] = parent;
- }
- }
-
- if (!changed) {
- contextCache.insert(context);
- visited[context] = context;
-
- return context;
- }
-
- Ref<PredictionContext> updated;
- if (parents.empty()) {
- updated = EMPTY;
- } else if (parents.size() == 1) {
- updated = SingletonPredictionContext::create(parents[0],
- context->getReturnState(0));
- contextCache.insert(updated);
- } else {
- updated = std::make_shared<ArrayPredictionContext>(
- parents, std::dynamic_pointer_cast<ArrayPredictionContext>(context)
- ->returnStates);
- contextCache.insert(updated);
- }
-
- visited[updated] = updated;
- visited[context] = updated;
-
- return updated;
-}
-
-std::vector<Ref<PredictionContext>> PredictionContext::getAllContextNodes(
- const Ref<PredictionContext>& context) {
- std::vector<Ref<PredictionContext>> nodes;
- std::set<PredictionContext*> visited;
- getAllContextNodes_(context, nodes, visited);
- return nodes;
-}
-
-void PredictionContext::getAllContextNodes_(
- const Ref<PredictionContext>& context,
- std::vector<Ref<PredictionContext>>& nodes,
- std::set<PredictionContext*>& visited) {
- if (visited.find(context.get()) != visited.end()) {
- return; // Already done.
- }
-
- visited.insert(context.get());
- nodes.push_back(context);
-
- for (size_t i = 0; i < context->size(); i++) {
- getAllContextNodes_(context->getParent(i), nodes, visited);
- }
-}
-
-std::string PredictionContext::toString() const {
- return antlrcpp::toString(this);
-}
-
-std::string PredictionContext::toString(Recognizer* /*recog*/) const {
- return toString();
-}
-
-std::vector<std::string> PredictionContext::toStrings(Recognizer* recognizer,
- int currentState) {
- return toStrings(recognizer, EMPTY, currentState);
-}
-
-std::vector<std::string> PredictionContext::toStrings(
- Recognizer* recognizer, const Ref<PredictionContext>& stop,
- int currentState) {
- std::vector<std::string> result;
-
- for (size_t perm = 0;; perm++) {
- size_t offset = 0;
- bool last = true;
- PredictionContext* p = this;
- size_t stateNumber = currentState;
-
- std::stringstream ss;
- ss << "[";
- bool outerContinue = false;
- while (!p->isEmpty() && p != stop.get()) {
- size_t index = 0;
- if (p->size() > 0) {
- size_t bits = 1;
- while ((1ULL << bits) < p->size()) {
- bits++;
- }
-
- size_t mask = (1 << bits) - 1;
- index = (perm >> offset) & mask;
- last &= index >= p->size() - 1;
- if (index >= p->size()) {
- outerContinue = true;
- break;
- }
- offset += bits;
- }
-
- if (recognizer != nullptr) {
- if (ss.tellp() > 1) {
- // first char is '[', if more than that this isn't the first rule
- ss << ' ';
- }
-
- const ATN& atn = recognizer->getATN();
- ATNState* s = atn.states[stateNumber];
- std::string ruleName = recognizer->getRuleNames()[s->ruleIndex];
- ss << ruleName;
- } else if (p->getReturnState(index) != EMPTY_RETURN_STATE) {
- if (!p->isEmpty()) {
- if (ss.tellp() > 1) {
- // first char is '[', if more than that this isn't the first rule
- ss << ' ';
- }
-
- ss << p->getReturnState(index);
- }
- }
- stateNumber = p->getReturnState(index);
- p = p->getParent(index).get();
- }
-
- if (outerContinue) continue;
-
- ss << "]";
- result.push_back(ss.str());
-
- if (last) {
- break;
- }
- }
-
- return result;
-}
-
-//----------------- PredictionContextMergeCache
-//------------------------------------------------------------------------
-
-Ref<PredictionContext> PredictionContextMergeCache::put(
- Ref<PredictionContext> const& key1, Ref<PredictionContext> const& key2,
- Ref<PredictionContext> const& value) {
- Ref<PredictionContext> previous;
-
- auto iterator = _data.find(key1);
- if (iterator == _data.end())
- _data[key1][key2] = value;
- else {
- auto iterator2 = iterator->second.find(key2);
- if (iterator2 != iterator->second.end()) previous = iterator2->second;
- iterator->second[key2] = value;
- }
-
- return previous;
-}
-
-Ref<PredictionContext> PredictionContextMergeCache::get(
- Ref<PredictionContext> const& key1, Ref<PredictionContext> const& key2) {
- auto iterator = _data.find(key1);
- if (iterator == _data.end()) return nullptr;
-
- auto iterator2 = iterator->second.find(key2);
- if (iterator2 == iterator->second.end()) return nullptr;
-
- return iterator2->second;
-}
-
-void PredictionContextMergeCache::clear() { _data.clear(); }
-
-std::string PredictionContextMergeCache::toString() const {
- std::string result;
- for (auto pair : _data)
- for (auto pair2 : pair.second) result += pair2.second->toString() + "\n";
-
- return result;
-}
-
-size_t PredictionContextMergeCache::count() const {
- size_t result = 0;
- for (auto entry : _data) result += entry.second.size();
- return result;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredictionContext.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredictionContext.h
deleted file mode 100644
index 0c985b8829..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredictionContext.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Recognizer.h"
-#include "atn/ATN.h"
-#include "atn/ATNState.h"
-
-namespace antlr4 {
-namespace atn {
-
-struct PredictionContextHasher;
-struct PredictionContextComparer;
-class PredictionContextMergeCache;
-
-typedef std::unordered_set<Ref<PredictionContext>, PredictionContextHasher,
- PredictionContextComparer>
- PredictionContextCache;
-
-class ANTLR4CPP_PUBLIC PredictionContext {
- public:
- /// Represents $ in local context prediction, which means wildcard.
- /// *+x = *.
- static const Ref<PredictionContext> EMPTY;
-
- /// Represents $ in an array in full context mode, when $
- /// doesn't mean wildcard: $ + x = [$,x]. Here,
- /// $ = EMPTY_RETURN_STATE.
- // ml: originally Integer.MAX_VALUE, which would be -1 for us, but this is
- // already used in places where
- // -1 is converted to unsigned, so we use a different value here. Any
- // value does the job provided it doesn't conflict with real return
- // states.
- static const size_t EMPTY_RETURN_STATE =
- static_cast<size_t>(-10); // std::numeric_limits<size_t>::max() - 9;
-
- private:
- static const size_t INITIAL_HASH = 1;
-
- public:
- static size_t globalNodeCount;
- const size_t id;
-
- /// <summary>
- /// Stores the computed hash code of this <seealso cref="PredictionContext"/>.
- /// The hash code is computed in parts to match the following reference
- /// algorithm.
- ///
- /// <pre>
- /// private int referenceHashCode() {
- /// int hash = <seealso cref="MurmurHash#initialize"/>(<seealso
- /// cref="#INITIAL_HASH"/>);
- ///
- /// for (int i = 0; i < <seealso cref="#size()"/>; i++) {
- /// hash = <seealso cref="MurmurHash#update"/>(hash, <seealso
- /// cref="#getParent"/>(i));
- /// }
- ///
- /// for (int i = 0; i < <seealso cref="#size()"/>; i++) {
- /// hash = <seealso cref="MurmurHash#update"/>(hash, <seealso
- /// cref="#getReturnState"/>(i));
- /// }
- ///
- /// hash = <seealso cref="MurmurHash#finish"/>(hash, 2 * <seealso
- /// cref="#size()"/>); return hash;
- /// }
- /// </pre>
- /// </summary>
- const size_t cachedHashCode;
-
- protected:
- PredictionContext(size_t cachedHashCode);
- ~PredictionContext();
-
- public:
- /// Convert a RuleContext tree to a PredictionContext graph.
- /// Return EMPTY if outerContext is empty.
- static Ref<PredictionContext> fromRuleContext(const ATN& atn,
- RuleContext* outerContext);
-
- virtual size_t size() const = 0;
- virtual Ref<PredictionContext> getParent(size_t index) const = 0;
- virtual size_t getReturnState(size_t index) const = 0;
-
- virtual bool operator==(const PredictionContext& o) const = 0;
-
- /// This means only the EMPTY (wildcard? not sure) context is in set.
- virtual bool isEmpty() const;
- virtual bool hasEmptyPath() const;
- virtual size_t hashCode() const;
-
- protected:
- static size_t calculateEmptyHashCode();
- static size_t calculateHashCode(Ref<PredictionContext> parent,
- size_t returnState);
- static size_t calculateHashCode(
- const std::vector<Ref<PredictionContext>>& parents,
- const std::vector<size_t>& returnStates);
-
- public:
- // dispatch
- static Ref<PredictionContext> merge(const Ref<PredictionContext>& a,
- const Ref<PredictionContext>& b,
- bool rootIsWildcard,
- PredictionContextMergeCache* mergeCache);
-
- /// <summary>
- /// Merge two <seealso cref="SingletonPredictionContext"/> instances.
- ///
- /// <p/>
- ///
- /// Stack tops equal, parents merge is same; return left graph.<br/>
- /// <embed src="images/SingletonMerge_SameRootSamePar.svg"
- /// type="image/svg+xml"/>
- ///
- /// <p/>
- ///
- /// Same stack top, parents differ; merge parents giving array node, then
- /// remainders of those graphs. A new root node is created to point to the
- /// merged parents.<br/>
- /// <embed src="images/SingletonMerge_SameRootDiffPar.svg"
- /// type="image/svg+xml"/>
- ///
- /// <p/>
- ///
- /// Different stack tops pointing to same parent. Make array node for the
- /// root where both element in the root point to the same (original)
- /// parent.<br/>
- /// <embed src="images/SingletonMerge_DiffRootSamePar.svg"
- /// type="image/svg+xml"/>
- ///
- /// <p/>
- ///
- /// Different stack tops pointing to different parents. Make array node for
- /// the root where each element points to the corresponding original
- /// parent.<br/>
- /// <embed src="images/SingletonMerge_DiffRootDiffPar.svg"
- /// type="image/svg+xml"/>
- /// </summary>
- /// <param name="a"> the first <seealso cref="SingletonPredictionContext"/>
- /// </param> <param name="b"> the second <seealso
- /// cref="SingletonPredictionContext"/> </param> <param name="rootIsWildcard">
- /// {@code true} if this is a local-context merge, otherwise false to indicate
- /// a full-context merge </param> <param name="mergeCache"> </param>
- static Ref<PredictionContext> mergeSingletons(
- const Ref<SingletonPredictionContext>& a,
- const Ref<SingletonPredictionContext>& b, bool rootIsWildcard,
- PredictionContextMergeCache* mergeCache);
-
- /**
- * Handle case where at least one of {@code a} or {@code b} is
- * {@link #EMPTY}. In the following diagrams, the symbol {@code $} is used
- * to represent {@link #EMPTY}.
- *
- * <h2>Local-Context Merges</h2>
- *
- * <p>These local-context merge operations are used when {@code
- * rootIsWildcard} is true.</p>
- *
- * <p>{@link #EMPTY} is superset of any graph; return {@link #EMPTY}.<br>
- * <embed src="images/LocalMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
- *
- * <p>{@link #EMPTY} and anything is {@code #EMPTY}, so merged parent is
- * {@code #EMPTY}; return left graph.<br>
- * <embed src="images/LocalMerge_EmptyParent.svg" type="image/svg+xml"/></p>
- *
- * <p>Special case of last merge if local context.<br>
- * <embed src="images/LocalMerge_DiffRoots.svg" type="image/svg+xml"/></p>
- *
- * <h2>Full-Context Merges</h2>
- *
- * <p>These full-context merge operations are used when {@code rootIsWildcard}
- * is false.</p>
- *
- * <p><embed src="images/FullMerge_EmptyRoots.svg" type="image/svg+xml"/></p>
- *
- * <p>Must keep all contexts; {@link #EMPTY} in array is a special value (and
- * null parent).<br>
- * <embed src="images/FullMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
- *
- * <p><embed src="images/FullMerge_SameRoot.svg" type="image/svg+xml"/></p>
- *
- * @param a the first {@link SingletonPredictionContext}
- * @param b the second {@link SingletonPredictionContext}
- * @param rootIsWildcard {@code true} if this is a local-context merge,
- * otherwise false to indicate a full-context merge
- */
- static Ref<PredictionContext> mergeRoot(
- const Ref<SingletonPredictionContext>& a,
- const Ref<SingletonPredictionContext>& b, bool rootIsWildcard);
-
- /**
- * Merge two {@link ArrayPredictionContext} instances.
- *
- * <p>Different tops, different parents.<br>
- * <embed src="images/ArrayMerge_DiffTopDiffPar.svg"
- * type="image/svg+xml"/></p>
- *
- * <p>Shared top, same parents.<br>
- * <embed src="images/ArrayMerge_ShareTopSamePar.svg"
- * type="image/svg+xml"/></p>
- *
- * <p>Shared top, different parents.<br>
- * <embed src="images/ArrayMerge_ShareTopDiffPar.svg"
- * type="image/svg+xml"/></p>
- *
- * <p>Shared top, all shared parents.<br>
- * <embed src="images/ArrayMerge_ShareTopSharePar.svg"
- * type="image/svg+xml"/></p>
- *
- * <p>Equal tops, merge parents and reduce top to
- * {@link SingletonPredictionContext}.<br>
- * <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/></p>
- */
- static Ref<PredictionContext> mergeArrays(
- const Ref<ArrayPredictionContext>& a,
- const Ref<ArrayPredictionContext>& b, bool rootIsWildcard,
- PredictionContextMergeCache* mergeCache);
-
- protected:
- /// Make pass over all M parents; merge any equal() ones.
- /// @returns true if the list has been changed (i.e. duplicates where found).
- static bool combineCommonParents(
- std::vector<Ref<PredictionContext>>& parents);
-
- public:
- static std::string toDOTString(const Ref<PredictionContext>& context);
-
- static Ref<PredictionContext> getCachedContext(
- const Ref<PredictionContext>& context,
- PredictionContextCache& contextCache,
- std::map<Ref<PredictionContext>, Ref<PredictionContext>>& visited);
-
- // ter's recursive version of Sam's getAllNodes()
- static std::vector<Ref<PredictionContext>> getAllContextNodes(
- const Ref<PredictionContext>& context);
- static void getAllContextNodes_(const Ref<PredictionContext>& context,
- std::vector<Ref<PredictionContext>>& nodes,
- std::set<PredictionContext*>& visited);
-
- virtual std::string toString() const;
- virtual std::string toString(Recognizer* recog) const;
-
- std::vector<std::string> toStrings(Recognizer* recognizer, int currentState);
- std::vector<std::string> toStrings(Recognizer* recognizer,
- const Ref<PredictionContext>& stop,
- int currentState);
-};
-
-struct PredictionContextHasher {
- size_t operator()(const Ref<PredictionContext>& k) const {
- return k->hashCode();
- }
-};
-
-struct PredictionContextComparer {
- bool operator()(const Ref<PredictionContext>& lhs,
- const Ref<PredictionContext>& rhs) const {
- if (lhs == rhs) // Object identity.
- return true;
- return (lhs->hashCode() == rhs->hashCode()) && (*lhs == *rhs);
- }
-};
-
-class PredictionContextMergeCache {
- public:
- Ref<PredictionContext> put(Ref<PredictionContext> const& key1,
- Ref<PredictionContext> const& key2,
- Ref<PredictionContext> const& value);
- Ref<PredictionContext> get(Ref<PredictionContext> const& key1,
- Ref<PredictionContext> const& key2);
-
- void clear();
- std::string toString() const;
- size_t count() const;
-
- private:
- std::unordered_map<
- Ref<PredictionContext>,
- std::unordered_map<Ref<PredictionContext>, Ref<PredictionContext>,
- PredictionContextHasher, PredictionContextComparer>,
- PredictionContextHasher, PredictionContextComparer>
- _data;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredictionMode.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredictionMode.cpp
deleted file mode 100644
index 6b6a0c8a43..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredictionMode.cpp
+++ /dev/null
@@ -1,217 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "SemanticContext.h"
-#include "atn/ATNConfig.h"
-#include "atn/ATNConfigSet.h"
-#include "atn/RuleStopState.h"
-#include "misc/MurmurHash.h"
-
-#include "PredictionMode.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-struct AltAndContextConfigHasher {
- /**
- * The hash code is only a function of the {@link ATNState#stateNumber}
- * and {@link ATNConfig#context}.
- */
- size_t operator()(ATNConfig* o) const {
- size_t hashCode = misc::MurmurHash::initialize(7);
- hashCode = misc::MurmurHash::update(hashCode, o->state->stateNumber);
- hashCode = misc::MurmurHash::update(hashCode, o->context);
- return misc::MurmurHash::finish(hashCode, 2);
- }
-};
-
-struct AltAndContextConfigComparer {
- bool operator()(ATNConfig* a, ATNConfig* b) const {
- if (a == b) {
- return true;
- }
- return a->state->stateNumber == b->state->stateNumber &&
- *a->context == *b->context;
- }
-};
-
-bool PredictionModeClass::hasSLLConflictTerminatingPrediction(
- PredictionMode mode, ATNConfigSet* configs) {
- /* Configs in rule stop states indicate reaching the end of the decision
- * rule (local context) or end of start rule (full context). If all
- * configs meet this condition, then none of the configurations is able
- * to match additional input so we terminate prediction.
- */
- if (allConfigsInRuleStopStates(configs)) {
- return true;
- }
-
- bool heuristic;
-
- // Pure SLL mode parsing or SLL+LL if:
- // Don't bother with combining configs from different semantic
- // contexts if we can fail over to full LL; costs more time
- // since we'll often fail over anyway.
- if (mode == PredictionMode::SLL || !configs->hasSemanticContext) {
- std::vector<antlrcpp::BitSet> altsets = getConflictingAltSubsets(configs);
- heuristic =
- hasConflictingAltSet(altsets) && !hasStateAssociatedWithOneAlt(configs);
- } else {
- // dup configs, tossing out semantic predicates
- ATNConfigSet dup(true);
- for (auto& config : configs->configs) {
- Ref<ATNConfig> c =
- std::make_shared<ATNConfig>(config, SemanticContext::NONE);
- dup.add(c);
- }
- std::vector<antlrcpp::BitSet> altsets = getConflictingAltSubsets(&dup);
- heuristic =
- hasConflictingAltSet(altsets) && !hasStateAssociatedWithOneAlt(&dup);
- }
-
- return heuristic;
-}
-
-bool PredictionModeClass::hasConfigInRuleStopState(ATNConfigSet* configs) {
- for (auto& c : configs->configs) {
- if (is<RuleStopState*>(c->state)) {
- return true;
- }
- }
-
- return false;
-}
-
-bool PredictionModeClass::allConfigsInRuleStopStates(ATNConfigSet* configs) {
- for (auto& config : configs->configs) {
- if (!is<RuleStopState*>(config->state)) {
- return false;
- }
- }
-
- return true;
-}
-
-size_t PredictionModeClass::resolvesToJustOneViableAlt(
- const std::vector<antlrcpp::BitSet>& altsets) {
- return getSingleViableAlt(altsets);
-}
-
-bool PredictionModeClass::allSubsetsConflict(
- const std::vector<antlrcpp::BitSet>& altsets) {
- return !hasNonConflictingAltSet(altsets);
-}
-
-bool PredictionModeClass::hasNonConflictingAltSet(
- const std::vector<antlrcpp::BitSet>& altsets) {
- for (antlrcpp::BitSet alts : altsets) {
- if (alts.count() == 1) {
- return true;
- }
- }
- return false;
-}
-
-bool PredictionModeClass::hasConflictingAltSet(
- const std::vector<antlrcpp::BitSet>& altsets) {
- for (antlrcpp::BitSet alts : altsets) {
- if (alts.count() > 1) {
- return true;
- }
- }
- return false;
-}
-
-bool PredictionModeClass::allSubsetsEqual(
- const std::vector<antlrcpp::BitSet>& altsets) {
- if (altsets.empty()) {
- return true;
- }
-
- const antlrcpp::BitSet& first = *altsets.begin();
- for (const antlrcpp::BitSet& alts : altsets) {
- if (alts != first) {
- return false;
- }
- }
- return true;
-}
-
-size_t PredictionModeClass::getUniqueAlt(
- const std::vector<antlrcpp::BitSet>& altsets) {
- antlrcpp::BitSet all = getAlts(altsets);
- if (all.count() == 1) {
- return all.nextSetBit(0);
- }
- return ATN::INVALID_ALT_NUMBER;
-}
-
-antlrcpp::BitSet PredictionModeClass::getAlts(
- const std::vector<antlrcpp::BitSet>& altsets) {
- antlrcpp::BitSet all;
- for (antlrcpp::BitSet alts : altsets) {
- all |= alts;
- }
-
- return all;
-}
-
-antlrcpp::BitSet PredictionModeClass::getAlts(ATNConfigSet* configs) {
- antlrcpp::BitSet alts;
- for (auto& config : configs->configs) {
- alts.set(config->alt);
- }
- return alts;
-}
-
-std::vector<antlrcpp::BitSet> PredictionModeClass::getConflictingAltSubsets(
- ATNConfigSet* configs) {
- std::unordered_map<ATNConfig*, antlrcpp::BitSet, AltAndContextConfigHasher,
- AltAndContextConfigComparer>
- configToAlts;
- for (auto& config : configs->configs) {
- configToAlts[config.get()].set(config->alt);
- }
- std::vector<antlrcpp::BitSet> values;
- for (auto it : configToAlts) {
- values.push_back(it.second);
- }
- return values;
-}
-
-std::map<ATNState*, antlrcpp::BitSet> PredictionModeClass::getStateToAltMap(
- ATNConfigSet* configs) {
- std::map<ATNState*, antlrcpp::BitSet> m;
- for (auto& c : configs->configs) {
- m[c->state].set(c->alt);
- }
- return m;
-}
-
-bool PredictionModeClass::hasStateAssociatedWithOneAlt(ATNConfigSet* configs) {
- std::map<ATNState*, antlrcpp::BitSet> x = getStateToAltMap(configs);
- for (std::map<ATNState*, antlrcpp::BitSet>::iterator it = x.begin();
- it != x.end(); it++) {
- if (it->second.count() == 1) return true;
- }
- return false;
-}
-
-size_t PredictionModeClass::getSingleViableAlt(
- const std::vector<antlrcpp::BitSet>& altsets) {
- antlrcpp::BitSet viableAlts;
- for (antlrcpp::BitSet alts : altsets) {
- size_t minAlt = alts.nextSetBit(0);
-
- viableAlts.set(minAlt);
- if (viableAlts.count() > 1) // more than 1 viable alt
- {
- return ATN::INVALID_ALT_NUMBER;
- }
- }
-
- return viableAlts.nextSetBit(0);
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredictionMode.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredictionMode.h
deleted file mode 100644
index 042e4b19a0..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/PredictionMode.h
+++ /dev/null
@@ -1,442 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "support/BitSet.h"
-
-namespace antlr4 {
-namespace atn {
-
-/**
- * This enumeration defines the prediction modes available in ANTLR 4 along with
- * utility methods for analyzing configuration sets for conflicts and/or
- * ambiguities.
- */
-enum class PredictionMode {
- /**
- * The SLL(*) prediction mode. This prediction mode ignores the current
- * parser context when making predictions. This is the fastest prediction
- * mode, and provides correct results for many grammars. This prediction
- * mode is more powerful than the prediction mode provided by ANTLR 3, but
- * may result in syntax errors for grammar and input combinations which are
- * not SLL.
- *
- * <p>
- * When using this prediction mode, the parser will either return a correct
- * parse tree (i.e. the same parse tree that would be returned with the
- * {@link #LL} prediction mode), or it will report a syntax error. If a
- * syntax error is encountered when using the {@link #SLL} prediction mode,
- * it may be due to either an actual syntax error in the input or indicate
- * that the particular combination of grammar and input requires the more
- * powerful {@link #LL} prediction abilities to complete successfully.</p>
- *
- * <p>
- * This prediction mode does not provide any guarantees for prediction
- * behavior for syntactically-incorrect inputs.</p>
- */
- SLL,
-
- /**
- * The LL(*) prediction mode. This prediction mode allows the current parser
- * context to be used for resolving SLL conflicts that occur during
- * prediction. This is the fastest prediction mode that guarantees correct
- * parse results for all combinations of grammars with syntactically correct
- * inputs.
- *
- * <p>
- * When using this prediction mode, the parser will make correct decisions
- * for all syntactically-correct grammar and input combinations. However, in
- * cases where the grammar is truly ambiguous this prediction mode might not
- * report a precise answer for <em>exactly which</em> alternatives are
- * ambiguous.</p>
- *
- * <p>
- * This prediction mode does not provide any guarantees for prediction
- * behavior for syntactically-incorrect inputs.</p>
- */
- LL,
-
- /**
- * The LL(*) prediction mode with exact ambiguity detection. In addition to
- * the correctness guarantees provided by the {@link #LL} prediction mode,
- * this prediction mode instructs the prediction algorithm to determine the
- * complete and exact set of ambiguous alternatives for every ambiguous
- * decision encountered while parsing.
- *
- * <p>
- * This prediction mode may be used for diagnosing ambiguities during
- * grammar development. Due to the performance overhead of calculating sets
- * of ambiguous alternatives, this prediction mode should be avoided when
- * the exact results are not necessary.</p>
- *
- * <p>
- * This prediction mode does not provide any guarantees for prediction
- * behavior for syntactically-incorrect inputs.</p>
- */
- LL_EXACT_AMBIG_DETECTION
-};
-
-class ANTLR4CPP_PUBLIC PredictionModeClass {
- public:
- /**
- * Computes the SLL prediction termination condition.
- *
- * <p>
- * This method computes the SLL prediction termination condition for both of
- * the following cases.</p>
- *
- * <ul>
- * <li>The usual SLL+LL fallback upon SLL conflict</li>
- * <li>Pure SLL without LL fallback</li>
- * </ul>
- *
- * <p><strong>COMBINED SLL+LL PARSING</strong></p>
- *
- * <p>When LL-fallback is enabled upon SLL conflict, correct predictions are
- * ensured regardless of how the termination condition is computed by this
- * method. Due to the substantially higher cost of LL prediction, the
- * prediction should only fall back to LL when the additional lookahead
- * cannot lead to a unique SLL prediction.</p>
- *
- * <p>Assuming combined SLL+LL parsing, an SLL configuration set with only
- * conflicting subsets should fall back to full LL, even if the
- * configuration sets don't resolve to the same alternative (e.g.
- * {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
- * configuration, SLL could continue with the hopes that more lookahead will
- * resolve via one of those non-conflicting configurations.</p>
- *
- * <p>Here's the prediction termination rule them: SLL (for SLL+LL parsing)
- * stops when it sees only conflicting configuration subsets. In contrast,
- * full LL keeps going when there is uncertainty.</p>
- *
- * <p><strong>HEURISTIC</strong></p>
- *
- * <p>As a heuristic, we stop prediction when we see any conflicting subset
- * unless we see a state that only has one alternative associated with it.
- * The single-alt-state thing lets prediction continue upon rules like
- * (otherwise, it would admit defeat too soon):</p>
- *
- * <p>{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;}</p>
- *
- * <p>When the ATN simulation reaches the state before {@code ';'}, it has a
- * DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
- * {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
- * processing this node because alternative to has another way to continue,
- * via {@code [6|2|[]]}.</p>
- *
- * <p>It also let's us continue for this rule:</p>
- *
- * <p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;}</p>
- *
- * <p>After matching input A, we reach the stop state for rule A, state 1.
- * State 8 is the state right before B. Clearly alternatives 1 and 2
- * conflict and no amount of further lookahead will separate the two.
- * However, alternative 3 will be able to continue and so we do not stop
- * working on this state. In the previous example, we're concerned with
- * states associated with the conflicting alternatives. Here alt 3 is not
- * associated with the conflicting configs, but since we can continue
- * looking for input reasonably, don't declare the state done.</p>
- *
- * <p><strong>PURE SLL PARSING</strong></p>
- *
- * <p>To handle pure SLL parsing, all we have to do is make sure that we
- * combine stack contexts for configurations that differ only by semantic
- * predicate. From there, we can do the usual SLL termination heuristic.</p>
- *
- * <p><strong>PREDICATES IN SLL+LL PARSING</strong></p>
- *
- * <p>SLL decisions don't evaluate predicates until after they reach DFA stop
- * states because they need to create the DFA cache that works in all
- * semantic situations. In contrast, full LL evaluates predicates collected
- * during start state computation so it can ignore predicates thereafter.
- * This means that SLL termination detection can totally ignore semantic
- * predicates.</p>
- *
- * <p>Implementation-wise, {@link ATNConfigSet} combines stack contexts but
- * not semantic predicate contexts so we might see two configurations like the
- * following.</p>
- *
- * <p>{@code (s, 1, x, {}), (s, 1, x', {p})}</p>
- *
- * <p>Before testing these configurations against others, we have to merge
- * {@code x} and {@code x'} (without modifying the existing configurations).
- * For example, we test {@code (x+x')==x''} when looking for conflicts in
- * the following configurations.</p>
- *
- * <p>{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}</p>
- *
- * <p>If the configuration set has predicates (as indicated by
- * {@link ATNConfigSet#hasSemanticContext}), this algorithm makes a copy of
- * the configurations to strip out all of the predicates so that a standard
- * {@link ATNConfigSet} will merge everything ignoring predicates.</p>
- */
- static bool hasSLLConflictTerminatingPrediction(PredictionMode mode,
- ATNConfigSet* configs);
-
- /// <summary>
- /// Checks if any configuration in {@code configs} is in a
- /// <seealso cref="RuleStopState"/>. Configurations meeting this condition
- /// have reached the end of the decision rule (local context) or end of start
- /// rule (full context).
- /// </summary>
- /// <param name="configs"> the configuration set to test </param>
- /// <returns> {@code true} if any configuration in {@code configs} is in a
- /// <seealso cref="RuleStopState"/>, otherwise {@code false} </returns>
- static bool hasConfigInRuleStopState(ATNConfigSet* configs);
-
- /// <summary>
- /// Checks if all configurations in {@code configs} are in a
- /// <seealso cref="RuleStopState"/>. Configurations meeting this condition
- /// have reached the end of the decision rule (local context) or end of start
- /// rule (full context).
- /// </summary>
- /// <param name="configs"> the configuration set to test </param>
- /// <returns> {@code true} if all configurations in {@code configs} are in a
- /// <seealso cref="RuleStopState"/>, otherwise {@code false} </returns>
- static bool allConfigsInRuleStopStates(ATNConfigSet* configs);
-
- /**
- * Full LL prediction termination.
- *
- * <p>Can we stop looking ahead during ATN simulation or is there some
- * uncertainty as to which alternative we will ultimately pick, after
- * consuming more input? Even if there are partial conflicts, we might know
- * that everything is going to resolve to the same minimum alternative. That
- * means we can stop since no more lookahead will change that fact. On the
- * other hand, there might be multiple conflicts that resolve to different
- * minimums. That means we need more look ahead to decide which of those
- * alternatives we should predict.</p>
- *
- * <p>The basic idea is to split the set of configurations {@code C}, into
- * conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
- * non-conflicting configurations. Two configurations conflict if they have
- * identical {@link ATNConfig#state} and {@link ATNConfig#context} values
- * but different {@link ATNConfig#alt} value, e.g. {@code (s, i, ctx, _)}
- * and {@code (s, j, ctx, _)} for {@code i!=j}.</p>
- *
- * <p>Reduce these configuration subsets to the set of possible alternatives.
- * You can compute the alternative subsets in one pass as follows:</p>
- *
- * <p>{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
- * {@code C} holding {@code s} and {@code ctx} fixed.</p>
- *
- * <p>Or in pseudo-code, for each configuration {@code c} in {@code C}:</p>
- *
- * <pre>
- * map[c] U= c.{@link ATNConfig#alt alt} # map hash/equals uses s and x, not
- * alt and not pred
- * </pre>
- *
- * <p>The values in {@code map} are the set of {@code A_s,ctx} sets.</p>
- *
- * <p>If {@code |A_s,ctx|=1} then there is no conflict associated with
- * {@code s} and {@code ctx}.</p>
- *
- * <p>Reduce the subsets to singletons by choosing a minimum of each subset.
- * If the union of these alternative subsets is a singleton, then no amount of
- * more lookahead will help us. We will always pick that alternative. If,
- * however, there is more than one alternative, then we are uncertain which
- * alternative to predict and must continue looking for resolution. We may
- * or may not discover an ambiguity in the future, even if there are no
- * conflicting subsets this round.</p>
- *
- * <p>The biggest sin is to terminate early because it means we've made a
- * decision but were uncertain as to the eventual outcome. We haven't used
- * enough lookahead. On the other hand, announcing a conflict too late is no
- * big deal; you will still have the conflict. It's just inefficient. It
- * might even look until the end of file.</p>
- *
- * <p>No special consideration for semantic predicates is required because
- * predicates are evaluated on-the-fly for full LL prediction, ensuring that
- * no configuration contains a semantic context during the termination
- * check.</p>
- *
- * <p><strong>CONFLICTING CONFIGS</strong></p>
- *
- * <p>Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
- * when {@code i!=j} but {@code x=x'}. Because we merge all
- * {@code (s, i, _)} configurations together, that means that there are at
- * most {@code n} configurations associated with state {@code s} for
- * {@code n} possible alternatives in the decision. The merged stacks
- * complicate the comparison of configuration contexts {@code x} and
- * {@code x'}. Sam checks to see if one is a subset of the other by calling
- * merge and checking to see if the merged result is either {@code x} or
- * {@code x'}. If the {@code x} associated with lowest alternative {@code i}
- * is the superset, then {@code i} is the only possible prediction since the
- * others resolve to {@code min(i)} as well. However, if {@code x} is
- * associated with {@code j>i} then at least one stack configuration for
- * {@code j} is not in conflict with alternative {@code i}. The algorithm
- * should keep going, looking for more lookahead due to the uncertainty.</p>
- *
- * <p>For simplicity, I'm doing a equality check between {@code x} and
- * {@code x'} that lets the algorithm continue to consume lookahead longer
- * than necessary. The reason I like the equality is of course the
- * simplicity but also because that is the test you need to detect the
- * alternatives that are actually in conflict.</p>
- *
- * <p><strong>CONTINUE/STOP RULE</strong></p>
- *
- * <p>Continue if union of resolved alternative sets from non-conflicting and
- * conflicting alternative subsets has more than one alternative. We are
- * uncertain about which alternative to predict.</p>
- *
- * <p>The complete set of alternatives, {@code [i for (_,i,_)]}, tells us
- * which alternatives are still in the running for the amount of input we've
- * consumed at this point. The conflicting sets let us to strip away
- * configurations that won't lead to more states because we resolve
- * conflicts to the configuration with a minimum alternate for the
- * conflicting set.</p>
- *
- * <p><strong>CASES</strong></p>
- *
- * <ul>
- *
- * <li>no conflicts and more than 1 alternative in set =&gt; continue</li>
- *
- * <li> {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
- * {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
- * {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
- * {@code {1,3}} =&gt; continue
- * </li>
- *
- * <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
- * {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set
- * {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
- * {@code {1}} =&gt; stop and predict 1</li>
- *
- * <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
- * {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
- * {@code {1}} = {@code {1}} =&gt; stop and predict 1, can announce
- * ambiguity {@code {1,2}}</li>
- *
- * <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
- * {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
- * {@code {2}} = {@code {1,2}} =&gt; continue</li>
- *
- * <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
- * {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
- * {@code {3}} = {@code {1,3}} =&gt; continue</li>
- *
- * </ul>
- *
- * <p><strong>EXACT AMBIGUITY DETECTION</strong></p>
- *
- * <p>If all states report the same conflicting set of alternatives, then we
- * know we have the exact ambiguity set.</p>
- *
- * <p><code>|A_<em>i</em>|&gt;1</code> and
- * <code>A_<em>i</em> = A_<em>j</em></code> for all <em>i</em>,
- * <em>j</em>.</p>
- *
- * <p>In other words, we continue examining lookahead until all {@code A_i}
- * have more than one alternative and all {@code A_i} are the same. If
- * {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
- * because the resolved set is {@code {1}}. To determine what the real
- * ambiguity is, we have to know whether the ambiguity is between one and
- * two or one and three so we keep going. We can only stop prediction when
- * we need exact ambiguity detection when the sets look like
- * {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...</p>
- */
- static size_t resolvesToJustOneViableAlt(
- const std::vector<antlrcpp::BitSet>& altsets);
-
- /// <summary>
- /// Determines if every alternative subset in {@code altsets} contains more
- /// than one alternative.
- /// </summary>
- /// <param name="altsets"> a collection of alternative subsets </param>
- /// <returns> {@code true} if every <seealso cref="BitSet"/> in {@code
- /// altsets} has <seealso cref="BitSet#cardinality cardinality"/> &gt; 1,
- /// otherwise {@code false} </returns>
- static bool allSubsetsConflict(const std::vector<antlrcpp::BitSet>& altsets);
-
- /// <summary>
- /// Determines if any single alternative subset in {@code altsets} contains
- /// exactly one alternative.
- /// </summary>
- /// <param name="altsets"> a collection of alternative subsets </param>
- /// <returns> {@code true} if {@code altsets} contains a <seealso
- /// cref="BitSet"/> with
- /// <seealso cref="BitSet#cardinality cardinality"/> 1, otherwise {@code
- /// false}
- /// </returns>
- static bool hasNonConflictingAltSet(
- const std::vector<antlrcpp::BitSet>& altsets);
-
- /// <summary>
- /// Determines if any single alternative subset in {@code altsets} contains
- /// more than one alternative.
- /// </summary>
- /// <param name="altsets"> a collection of alternative subsets </param>
- /// <returns> {@code true} if {@code altsets} contains a <seealso
- /// cref="BitSet"/> with
- /// <seealso cref="BitSet#cardinality cardinality"/> &gt; 1, otherwise {@code
- /// false} </returns>
- static bool hasConflictingAltSet(
- const std::vector<antlrcpp::BitSet>& altsets);
-
- /// <summary>
- /// Determines if every alternative subset in {@code altsets} is equivalent.
- /// </summary>
- /// <param name="altsets"> a collection of alternative subsets </param>
- /// <returns> {@code true} if every member of {@code altsets} is equal to the
- /// others, otherwise {@code false} </returns>
- static bool allSubsetsEqual(const std::vector<antlrcpp::BitSet>& altsets);
-
- /// <summary>
- /// Returns the unique alternative predicted by all alternative subsets in
- /// {@code altsets}. If no such alternative exists, this method returns
- /// <seealso cref="ATN#INVALID_ALT_NUMBER"/>.
- /// </summary>
- /// <param name="altsets"> a collection of alternative subsets </param>
- static size_t getUniqueAlt(const std::vector<antlrcpp::BitSet>& altsets);
-
- /// <summary>
- /// Gets the complete set of represented alternatives for a collection of
- /// alternative subsets. This method returns the union of each <seealso
- /// cref="BitSet"/>
- /// in {@code altsets}.
- /// </summary>
- /// <param name="altsets"> a collection of alternative subsets </param>
- /// <returns> the set of represented alternatives in {@code altsets}
- /// </returns>
- static antlrcpp::BitSet getAlts(const std::vector<antlrcpp::BitSet>& altsets);
-
- /** Get union of all alts from configs. @since 4.5.1 */
- static antlrcpp::BitSet getAlts(ATNConfigSet* configs);
-
- /// <summary>
- /// This function gets the conflicting alt subsets from a configuration set.
- /// For each configuration {@code c} in {@code configs}:
- ///
- /// <pre>
- /// map[c] U= c.<seealso cref="ATNConfig#alt alt"/> # map hash/equals uses s
- /// and x, not alt and not pred
- /// </pre>
- /// </summary>
- static std::vector<antlrcpp::BitSet> getConflictingAltSubsets(
- ATNConfigSet* configs);
-
- /// <summary>
- /// Get a map from state to alt subset from a configuration set. For each
- /// configuration {@code c} in {@code configs}:
- ///
- /// <pre>
- /// map[c.<seealso cref="ATNConfig#state state"/>] U= c.<seealso
- /// cref="ATNConfig#alt alt"/>
- /// </pre>
- /// </summary>
- static std::map<ATNState*, antlrcpp::BitSet> getStateToAltMap(
- ATNConfigSet* configs);
-
- static bool hasStateAssociatedWithOneAlt(ATNConfigSet* configs);
-
- static size_t getSingleViableAlt(
- const std::vector<antlrcpp::BitSet>& altsets);
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.cpp
deleted file mode 100644
index fc74dcf879..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.cpp
+++ /dev/null
@@ -1,221 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Parser.h"
-#include "atn/ATNConfigSet.h"
-#include "atn/LookaheadEventInfo.h"
-#include "atn/PredicateEvalInfo.h"
-#include "support/CPPUtils.h"
-
-#include "atn/ProfilingATNSimulator.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::dfa;
-using namespace antlrcpp;
-
-using namespace std::chrono;
-
-ProfilingATNSimulator::ProfilingATNSimulator(Parser* parser)
- : ParserATNSimulator(
- parser, parser->getInterpreter<ParserATNSimulator>()->atn,
- parser->getInterpreter<ParserATNSimulator>()->decisionToDFA,
- parser->getInterpreter<ParserATNSimulator>()
- ->getSharedContextCache()) {
- for (size_t i = 0; i < atn.decisionToState.size(); i++) {
- _decisions.push_back(DecisionInfo(i));
- }
-}
-
-size_t ProfilingATNSimulator::adaptivePredict(TokenStream* input,
- size_t decision,
- ParserRuleContext* outerContext) {
- auto onExit = finally([this]() {
- _currentDecision = 0; // Originally -1, but that makes no sense (index into
- // a vector and init value is also 0).
- });
-
- _sllStopIndex = -1;
- _llStopIndex = -1;
- _currentDecision = decision;
- high_resolution_clock::time_point start = high_resolution_clock::now();
- size_t alt =
- ParserATNSimulator::adaptivePredict(input, decision, outerContext);
- high_resolution_clock::time_point stop = high_resolution_clock::now();
- _decisions[decision].timeInPrediction +=
- duration_cast<nanoseconds>(stop - start).count();
- _decisions[decision].invocations++;
-
- long long SLL_k = _sllStopIndex - _startIndex + 1;
- _decisions[decision].SLL_TotalLook += SLL_k;
- _decisions[decision].SLL_MinLook =
- _decisions[decision].SLL_MinLook == 0
- ? SLL_k
- : std::min(_decisions[decision].SLL_MinLook, SLL_k);
- if (SLL_k > _decisions[decision].SLL_MaxLook) {
- _decisions[decision].SLL_MaxLook = SLL_k;
- _decisions[decision].SLL_MaxLookEvent =
- std::make_shared<LookaheadEventInfo>(decision, nullptr, alt, input,
- _startIndex, _sllStopIndex, false);
- }
-
- if (_llStopIndex >= 0) {
- long long LL_k = _llStopIndex - _startIndex + 1;
- _decisions[decision].LL_TotalLook += LL_k;
- _decisions[decision].LL_MinLook =
- _decisions[decision].LL_MinLook == 0
- ? LL_k
- : std::min(_decisions[decision].LL_MinLook, LL_k);
- if (LL_k > _decisions[decision].LL_MaxLook) {
- _decisions[decision].LL_MaxLook = LL_k;
- _decisions[decision].LL_MaxLookEvent =
- std::make_shared<LookaheadEventInfo>(decision, nullptr, alt, input,
- _startIndex, _llStopIndex, true);
- }
- }
-
- return alt;
-}
-
-DFAState* ProfilingATNSimulator::getExistingTargetState(DFAState* previousD,
- size_t t) {
- // this method is called after each time the input position advances
- // during SLL prediction
- _sllStopIndex = (int)_input->index();
-
- DFAState* existingTargetState =
- ParserATNSimulator::getExistingTargetState(previousD, t);
- if (existingTargetState != nullptr) {
- _decisions[_currentDecision]
- .SLL_DFATransitions++; // count only if we transition over a DFA state
- if (existingTargetState == ERROR_STATE.get()) {
- _decisions[_currentDecision].errors.push_back(
- ErrorInfo(_currentDecision, previousD->configs.get(), _input,
- _startIndex, _sllStopIndex, false));
- }
- }
-
- _currentState = existingTargetState;
- return existingTargetState;
-}
-
-DFAState* ProfilingATNSimulator::computeTargetState(DFA& dfa,
- DFAState* previousD,
- size_t t) {
- DFAState* state = ParserATNSimulator::computeTargetState(dfa, previousD, t);
- _currentState = state;
- return state;
-}
-
-std::unique_ptr<ATNConfigSet> ProfilingATNSimulator::computeReachSet(
- ATNConfigSet* closure, size_t t, bool fullCtx) {
- if (fullCtx) {
- // this method is called after each time the input position advances
- // during full context prediction
- _llStopIndex = (int)_input->index();
- }
-
- std::unique_ptr<ATNConfigSet> reachConfigs =
- ParserATNSimulator::computeReachSet(closure, t, fullCtx);
- if (fullCtx) {
- _decisions[_currentDecision]
- .LL_ATNTransitions++; // count computation even if error
- if (reachConfigs != nullptr) {
- } else { // no reach on current lookahead symbol. ERROR.
- // TO_DO: does not handle delayed errors per
- // getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule()
- _decisions[_currentDecision].errors.push_back(ErrorInfo(
- _currentDecision, closure, _input, _startIndex, _llStopIndex, true));
- }
- } else {
- ++_decisions[_currentDecision].SLL_ATNTransitions;
- if (reachConfigs != nullptr) {
- } else { // no reach on current lookahead symbol. ERROR.
- _decisions[_currentDecision].errors.push_back(
- ErrorInfo(_currentDecision, closure, _input, _startIndex,
- _sllStopIndex, false));
- }
- }
- return reachConfigs;
-}
-
-bool ProfilingATNSimulator::evalSemanticContext(
- Ref<SemanticContext> const& pred, ParserRuleContext* parserCallStack,
- size_t alt, bool fullCtx) {
- bool result = ParserATNSimulator::evalSemanticContext(pred, parserCallStack,
- alt, fullCtx);
- if (!(std::dynamic_pointer_cast<SemanticContext::PrecedencePredicate>(pred) !=
- nullptr)) {
- bool fullContext = _llStopIndex >= 0;
- int stopIndex = fullContext ? _llStopIndex : _sllStopIndex;
- _decisions[_currentDecision].predicateEvals.push_back(
- PredicateEvalInfo(_currentDecision, _input, _startIndex, stopIndex,
- pred, result, alt, fullCtx));
- }
-
- return result;
-}
-
-void ProfilingATNSimulator::reportAttemptingFullContext(
- DFA& dfa, const BitSet& conflictingAlts, ATNConfigSet* configs,
- size_t startIndex, size_t stopIndex) {
- if (conflictingAlts.count() > 0) {
- conflictingAltResolvedBySLL = conflictingAlts.nextSetBit(0);
- } else {
- conflictingAltResolvedBySLL = configs->getAlts().nextSetBit(0);
- }
- _decisions[_currentDecision].LL_Fallback++;
- ParserATNSimulator::reportAttemptingFullContext(dfa, conflictingAlts, configs,
- startIndex, stopIndex);
-}
-
-void ProfilingATNSimulator::reportContextSensitivity(DFA& dfa,
- size_t prediction,
- ATNConfigSet* configs,
- size_t startIndex,
- size_t stopIndex) {
- if (prediction != conflictingAltResolvedBySLL) {
- _decisions[_currentDecision].contextSensitivities.push_back(
- ContextSensitivityInfo(_currentDecision, configs, _input, startIndex,
- stopIndex));
- }
- ParserATNSimulator::reportContextSensitivity(dfa, prediction, configs,
- startIndex, stopIndex);
-}
-
-void ProfilingATNSimulator::reportAmbiguity(DFA& dfa, DFAState* D,
- size_t startIndex, size_t stopIndex,
- bool exact, const BitSet& ambigAlts,
- ATNConfigSet* configs) {
- size_t prediction;
- if (ambigAlts.count() > 0) {
- prediction = ambigAlts.nextSetBit(0);
- } else {
- prediction = configs->getAlts().nextSetBit(0);
- }
- if (configs->fullCtx && prediction != conflictingAltResolvedBySLL) {
- // Even though this is an ambiguity we are reporting, we can
- // still detect some context sensitivities. Both SLL and LL
- // are showing a conflict, hence an ambiguity, but if they resolve
- // to different minimum alternatives we have also identified a
- // context sensitivity.
- _decisions[_currentDecision].contextSensitivities.push_back(
- ContextSensitivityInfo(_currentDecision, configs, _input, startIndex,
- stopIndex));
- }
- _decisions[_currentDecision].ambiguities.push_back(
- AmbiguityInfo(_currentDecision, configs, ambigAlts, _input, startIndex,
- stopIndex, configs->fullCtx));
- ParserATNSimulator::reportAmbiguity(dfa, D, startIndex, stopIndex, exact,
- ambigAlts, configs);
-}
-
-std::vector<DecisionInfo> ProfilingATNSimulator::getDecisionInfo() const {
- return _decisions;
-}
-
-DFAState* ProfilingATNSimulator::getCurrentState() const {
- return _currentState;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.h
deleted file mode 100644
index 472f7c50f4..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/ProfilingATNSimulator.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionInfo.h"
-#include "atn/ParserATNSimulator.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC ProfilingATNSimulator : public ParserATNSimulator {
- public:
- ProfilingATNSimulator(Parser* parser);
-
- virtual size_t adaptivePredict(TokenStream* input, size_t decision,
- ParserRuleContext* outerContext) override;
-
- virtual std::vector<DecisionInfo> getDecisionInfo() const;
- virtual dfa::DFAState* getCurrentState() const;
-
- protected:
- std::vector<DecisionInfo> _decisions;
-
- int _sllStopIndex = 0;
- int _llStopIndex = 0;
-
- size_t _currentDecision = 0;
- dfa::DFAState* _currentState;
-
- /// <summary>
- /// At the point of LL failover, we record how SLL would resolve the conflict
- /// so that
- /// we can determine whether or not a decision / input pair is
- /// context-sensitive. If LL gives a different result than SLL's predicted
- /// alternative, we have a context sensitivity for sure. The converse is not
- /// necessarily true, however. It's possible that after conflict resolution
- /// chooses minimum alternatives, SLL could get the same answer as LL.
- /// Regardless of whether or not the result indicates an ambiguity, it is not
- /// treated as a context sensitivity because LL prediction was not required
- /// in order to produce a correct prediction for this decision and input
- /// sequence. It may in fact still be a context sensitivity but we don't know
- /// by looking at the minimum alternatives for the current input.
- /// </summary>
- size_t conflictingAltResolvedBySLL = 0;
-
- virtual dfa::DFAState* getExistingTargetState(dfa::DFAState* previousD,
- size_t t) override;
- virtual dfa::DFAState* computeTargetState(dfa::DFA& dfa,
- dfa::DFAState* previousD,
- size_t t) override;
- virtual std::unique_ptr<ATNConfigSet> computeReachSet(ATNConfigSet* closure,
- size_t t,
- bool fullCtx) override;
- virtual bool evalSemanticContext(Ref<SemanticContext> const& pred,
- ParserRuleContext* parserCallStack,
- size_t alt, bool fullCtx) override;
- virtual void reportAttemptingFullContext(
- dfa::DFA& dfa, const antlrcpp::BitSet& conflictingAlts,
- ATNConfigSet* configs, size_t startIndex, size_t stopIndex) override;
- virtual void reportContextSensitivity(dfa::DFA& dfa, size_t prediction,
- ATNConfigSet* configs,
- size_t startIndex,
- size_t stopIndex) override;
- virtual void reportAmbiguity(dfa::DFA& dfa, dfa::DFAState* D,
- size_t startIndex, size_t stopIndex, bool exact,
- const antlrcpp::BitSet& ambigAlts,
- ATNConfigSet* configs) override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RangeTransition.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RangeTransition.cpp
deleted file mode 100644
index 52fabb6189..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RangeTransition.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/IntervalSet.h"
-
-#include "atn/RangeTransition.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-RangeTransition::RangeTransition(ATNState* target, size_t from, size_t to)
- : Transition(target), from(from), to(to) {}
-
-Transition::SerializationType RangeTransition::getSerializationType() const {
- return RANGE;
-}
-
-misc::IntervalSet RangeTransition::label() const {
- return misc::IntervalSet::of((int)from, (int)to);
-}
-
-bool RangeTransition::matches(size_t symbol, size_t /*minVocabSymbol*/,
- size_t /*maxVocabSymbol*/) const {
- return symbol >= from && symbol <= to;
-}
-
-std::string RangeTransition::toString() const {
- return "RANGE " + Transition::toString() +
- " { from: " + std::to_string(from) + ", to: " + std::to_string(to) +
- " }";
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RangeTransition.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RangeTransition.h
deleted file mode 100644
index 9aa6541ca6..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RangeTransition.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/Transition.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC RangeTransition final : public Transition {
- public:
- const size_t from;
- const size_t to;
-
- RangeTransition(ATNState* target, size_t from, size_t to);
-
- virtual SerializationType getSerializationType() const override;
-
- virtual misc::IntervalSet label() const override;
- virtual bool matches(size_t symbol, size_t minVocabSymbol,
- size_t maxVocabSymbol) const override;
-
- virtual std::string toString() const override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleStartState.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleStartState.cpp
deleted file mode 100644
index d68ffabee9..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleStartState.cpp
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/RuleStartState.h"
-
-using namespace antlr4::atn;
-
-RuleStartState::RuleStartState() { isLeftRecursiveRule = false; }
-
-size_t RuleStartState::getStateType() { return RULE_START; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleStartState.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleStartState.h
deleted file mode 100644
index f7a6967b6c..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleStartState.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNState.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC RuleStartState final : public ATNState {
- public:
- RuleStartState();
-
- RuleStopState* stopState = nullptr;
- bool isLeftRecursiveRule = false;
-
- virtual size_t getStateType() override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleStopState.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleStopState.cpp
deleted file mode 100644
index e89f91a4dd..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleStopState.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/RuleStopState.h"
-
-using namespace antlr4::atn;
-
-size_t RuleStopState::getStateType() { return RULE_STOP; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleStopState.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleStopState.h
deleted file mode 100644
index ac3202731c..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleStopState.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNState.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// The last node in the ATN for a rule, unless that rule is the start symbol.
-/// In that case, there is one transition to EOF. Later, we might encode
-/// references to all calls to this rule to compute FOLLOW sets for
-/// error handling.
-class ANTLR4CPP_PUBLIC RuleStopState final : public ATNState {
- public:
- virtual size_t getStateType() override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleTransition.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleTransition.cpp
deleted file mode 100644
index 8504a263a1..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleTransition.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/RuleTransition.h"
-#include "atn/RuleStartState.h"
-
-using namespace antlr4::atn;
-
-RuleTransition::RuleTransition(RuleStartState* ruleStart, size_t ruleIndex,
- ATNState* followState)
- : RuleTransition(ruleStart, ruleIndex, 0, followState) {}
-
-RuleTransition::RuleTransition(RuleStartState* ruleStart, size_t ruleIndex,
- int precedence, ATNState* followState)
- : Transition(ruleStart), ruleIndex(ruleIndex), precedence(precedence) {
- this->followState = followState;
-}
-
-Transition::SerializationType RuleTransition::getSerializationType() const {
- return RULE;
-}
-
-bool RuleTransition::isEpsilon() const { return true; }
-
-bool RuleTransition::matches(size_t /*symbol*/, size_t /*minVocabSymbol*/,
- size_t /*maxVocabSymbol*/) const {
- return false;
-}
-
-std::string RuleTransition::toString() const {
- std::stringstream ss;
- ss << "RULE " << Transition::toString() << " { ruleIndex: " << ruleIndex
- << ", precedence: " << precedence << ", followState: " << std::hex
- << followState << " }";
- return ss.str();
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleTransition.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleTransition.h
deleted file mode 100644
index 2d0bf9a3f4..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/RuleTransition.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/Transition.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC RuleTransition : public Transition {
- public:
- /// Ptr to the rule definition object for this rule ref.
- const size_t ruleIndex; // no Rule object at runtime
-
- const int precedence;
-
- /// What node to begin computations following ref to rule.
- ATNState* followState;
-
- /// @deprecated Use
- /// <seealso cref="#RuleTransition(RuleStartState, size_t, int, ATNState)"/>
- /// instead.
- RuleTransition(RuleStartState* ruleStart, size_t ruleIndex,
- ATNState* followState);
-
- RuleTransition(RuleStartState* ruleStart, size_t ruleIndex, int precedence,
- ATNState* followState);
- RuleTransition(RuleTransition const&) = delete;
- RuleTransition& operator=(RuleTransition const&) = delete;
-
- virtual SerializationType getSerializationType() const override;
-
- virtual bool isEpsilon() const override;
- virtual bool matches(size_t symbol, size_t minVocabSymbol,
- size_t maxVocabSymbol) const override;
-
- virtual std::string toString() const override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SemanticContext.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SemanticContext.cpp
deleted file mode 100644
index db603b9c60..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SemanticContext.cpp
+++ /dev/null
@@ -1,400 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/MurmurHash.h"
-#include "support/Arrays.h"
-#include "support/CPPUtils.h"
-
-#include "SemanticContext.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-//------------------ Predicate
-//-----------------------------------------------------------------------------------------
-
-SemanticContext::Predicate::Predicate()
- : Predicate(INVALID_INDEX, INVALID_INDEX, false) {}
-
-SemanticContext::Predicate::Predicate(size_t ruleIndex, size_t predIndex,
- bool isCtxDependent)
- : ruleIndex(ruleIndex),
- predIndex(predIndex),
- isCtxDependent(isCtxDependent) {}
-
-bool SemanticContext::Predicate::eval(Recognizer* parser,
- RuleContext* parserCallStack) {
- RuleContext* localctx = nullptr;
- if (isCtxDependent) localctx = parserCallStack;
- return parser->sempred(localctx, ruleIndex, predIndex);
-}
-
-size_t SemanticContext::Predicate::hashCode() const {
- size_t hashCode = misc::MurmurHash::initialize();
- hashCode = misc::MurmurHash::update(hashCode, ruleIndex);
- hashCode = misc::MurmurHash::update(hashCode, predIndex);
- hashCode = misc::MurmurHash::update(hashCode, isCtxDependent ? 1 : 0);
- hashCode = misc::MurmurHash::finish(hashCode, 3);
- return hashCode;
-}
-
-bool SemanticContext::Predicate::operator==(
- const SemanticContext& other) const {
- if (this == &other) return true;
-
- const Predicate* p = dynamic_cast<const Predicate*>(&other);
- if (p == nullptr) return false;
-
- return ruleIndex == p->ruleIndex && predIndex == p->predIndex &&
- isCtxDependent == p->isCtxDependent;
-}
-
-std::string SemanticContext::Predicate::toString() const {
- return std::string("{") + std::to_string(ruleIndex) + std::string(":") +
- std::to_string(predIndex) + std::string("}?");
-}
-
-//------------------ PrecedencePredicate
-//-------------------------------------------------------------------------------
-
-SemanticContext::PrecedencePredicate::PrecedencePredicate() : precedence(0) {}
-
-SemanticContext::PrecedencePredicate::PrecedencePredicate(int precedence)
- : precedence(precedence) {}
-
-bool SemanticContext::PrecedencePredicate::eval(Recognizer* parser,
- RuleContext* parserCallStack) {
- return parser->precpred(parserCallStack, precedence);
-}
-
-Ref<SemanticContext> SemanticContext::PrecedencePredicate::evalPrecedence(
- Recognizer* parser, RuleContext* parserCallStack,
- const Ref<SemanticContext>& this_ref) {
- if (parser->precpred(parserCallStack, precedence)) {
- return SemanticContext::NONE;
- } else {
- return nullptr;
- }
-}
-
-int SemanticContext::PrecedencePredicate::compareTo(PrecedencePredicate* o) {
- return precedence - o->precedence;
-}
-
-size_t SemanticContext::PrecedencePredicate::hashCode() const {
- size_t hashCode = 1;
- hashCode = 31 * hashCode + static_cast<size_t>(precedence);
- return hashCode;
-}
-
-bool SemanticContext::PrecedencePredicate::operator==(
- const SemanticContext& other) const {
- if (this == &other) return true;
-
- const PrecedencePredicate* predicate =
- dynamic_cast<const PrecedencePredicate*>(&other);
- if (predicate == nullptr) return false;
-
- return precedence == predicate->precedence;
-}
-
-std::string SemanticContext::PrecedencePredicate::toString() const {
- return "{" + std::to_string(precedence) + ">=prec}?";
-}
-
-//------------------ AND
-//-----------------------------------------------------------------------------------------------
-
-SemanticContext::AND::AND(Ref<SemanticContext> const& a,
- Ref<SemanticContext> const& b) {
- Set operands;
-
- if (is<AND>(a)) {
- for (auto operand : std::dynamic_pointer_cast<AND>(a)->opnds) {
- operands.insert(operand);
- }
- } else {
- operands.insert(a);
- }
-
- if (is<AND>(b)) {
- for (auto operand : std::dynamic_pointer_cast<AND>(b)->opnds) {
- operands.insert(operand);
- }
- } else {
- operands.insert(b);
- }
-
- std::vector<Ref<PrecedencePredicate>> precedencePredicates =
- filterPrecedencePredicates(operands);
-
- if (!precedencePredicates.empty()) {
- // interested in the transition with the lowest precedence
- auto predicate = [](Ref<PrecedencePredicate> const& a,
- Ref<PrecedencePredicate> const& b) {
- return a->precedence < b->precedence;
- };
-
- auto reduced = std::min_element(precedencePredicates.begin(),
- precedencePredicates.end(), predicate);
- operands.insert(*reduced);
- }
-
- std::copy(operands.begin(), operands.end(), std::back_inserter(opnds));
-}
-
-std::vector<Ref<SemanticContext>> SemanticContext::AND::getOperands() const {
- return opnds;
-}
-
-bool SemanticContext::AND::operator==(const SemanticContext& other) const {
- if (this == &other) return true;
-
- const AND* context = dynamic_cast<const AND*>(&other);
- if (context == nullptr) return false;
-
- return Arrays::equals(opnds, context->opnds);
-}
-
-size_t SemanticContext::AND::hashCode() const {
- return misc::MurmurHash::hashCode(opnds, typeid(AND).hash_code());
-}
-
-bool SemanticContext::AND::eval(Recognizer* parser,
- RuleContext* parserCallStack) {
- for (auto opnd : opnds) {
- if (!opnd->eval(parser, parserCallStack)) {
- return false;
- }
- }
- return true;
-}
-
-Ref<SemanticContext> SemanticContext::AND::evalPrecedence(
- Recognizer* parser, RuleContext* parserCallStack,
- const Ref<SemanticContext>& this_ref) {
- bool differs = false;
- std::vector<Ref<SemanticContext>> operands;
- for (auto context : opnds) {
- Ref<SemanticContext> evaluated =
- context->evalPrecedence(parser, parserCallStack, context);
- differs |= (evaluated != context);
- if (evaluated == nullptr) {
- // The AND context is false if any element is false.
- return nullptr;
- } else if (evaluated != NONE) {
- // Reduce the result by skipping true elements.
- operands.push_back(evaluated);
- }
- }
-
- if (!differs) {
- return this_ref;
- }
-
- if (operands.empty()) {
- // All elements were true, so the AND context is true.
- return NONE;
- }
-
- Ref<SemanticContext> result = operands[0];
- for (size_t i = 1; i < operands.size(); ++i) {
- result = SemanticContext::And(result, operands[i]);
- }
-
- return result;
-}
-
-std::string SemanticContext::AND::toString() const {
- std::string tmp;
- for (auto var : opnds) {
- tmp += var->toString() + " && ";
- }
- return tmp;
-}
-
-//------------------ OR
-//------------------------------------------------------------------------------------------------
-
-SemanticContext::OR::OR(Ref<SemanticContext> const& a,
- Ref<SemanticContext> const& b) {
- Set operands;
-
- if (is<OR>(a)) {
- for (auto operand : std::dynamic_pointer_cast<OR>(a)->opnds) {
- operands.insert(operand);
- }
- } else {
- operands.insert(a);
- }
-
- if (is<OR>(b)) {
- for (auto operand : std::dynamic_pointer_cast<OR>(b)->opnds) {
- operands.insert(operand);
- }
- } else {
- operands.insert(b);
- }
-
- std::vector<Ref<PrecedencePredicate>> precedencePredicates =
- filterPrecedencePredicates(operands);
- if (!precedencePredicates.empty()) {
- // interested in the transition with the highest precedence
- auto predicate = [](Ref<PrecedencePredicate> const& a,
- Ref<PrecedencePredicate> const& b) {
- return a->precedence < b->precedence;
- };
- auto reduced = std::max_element(precedencePredicates.begin(),
- precedencePredicates.end(), predicate);
- operands.insert(*reduced);
- }
-
- std::copy(operands.begin(), operands.end(), std::back_inserter(opnds));
-}
-
-std::vector<Ref<SemanticContext>> SemanticContext::OR::getOperands() const {
- return opnds;
-}
-
-bool SemanticContext::OR::operator==(const SemanticContext& other) const {
- if (this == &other) return true;
-
- const OR* context = dynamic_cast<const OR*>(&other);
- if (context == nullptr) return false;
-
- return Arrays::equals(opnds, context->opnds);
-}
-
-size_t SemanticContext::OR::hashCode() const {
- return misc::MurmurHash::hashCode(opnds, typeid(OR).hash_code());
-}
-
-bool SemanticContext::OR::eval(Recognizer* parser,
- RuleContext* parserCallStack) {
- for (auto opnd : opnds) {
- if (opnd->eval(parser, parserCallStack)) {
- return true;
- }
- }
- return false;
-}
-
-Ref<SemanticContext> SemanticContext::OR::evalPrecedence(
- Recognizer* parser, RuleContext* parserCallStack,
- const Ref<SemanticContext>& this_ref) {
- bool differs = false;
- std::vector<Ref<SemanticContext>> operands;
- for (auto context : opnds) {
- Ref<SemanticContext> evaluated =
- context->evalPrecedence(parser, parserCallStack, context);
- differs |= (evaluated != context);
- if (evaluated == NONE) {
- // The OR context is true if any element is true.
- return NONE;
- } else if (evaluated != nullptr) {
- // Reduce the result by skipping false elements.
- operands.push_back(evaluated);
- }
- }
-
- if (!differs) {
- return this_ref;
- }
-
- if (operands.empty()) {
- // All elements were false, so the OR context is false.
- return nullptr;
- }
-
- Ref<SemanticContext> result = operands[0];
- for (size_t i = 1; i < operands.size(); ++i) {
- result = SemanticContext::Or(result, operands[i]);
- }
-
- return result;
-}
-
-std::string SemanticContext::OR::toString() const {
- std::string tmp;
- for (auto var : opnds) {
- tmp += var->toString() + " || ";
- }
- return tmp;
-}
-
-//------------------ SemanticContext
-//-----------------------------------------------------------------------------------
-
-const Ref<SemanticContext> SemanticContext::NONE =
- std::make_shared<Predicate>(INVALID_INDEX, INVALID_INDEX, false);
-
-SemanticContext::~SemanticContext() {}
-
-bool SemanticContext::operator!=(const SemanticContext& other) const {
- return !(*this == other);
-}
-
-Ref<SemanticContext> SemanticContext::evalPrecedence(
- Recognizer* /*parser*/, RuleContext* /*parserCallStack*/,
- const Ref<SemanticContext>& this_ref) {
- return this_ref;
-}
-
-Ref<SemanticContext> SemanticContext::And(Ref<SemanticContext> const& a,
- Ref<SemanticContext> const& b) {
- if (!a || a == NONE) {
- return b;
- }
-
- if (!b || b == NONE) {
- return a;
- }
-
- Ref<AND> result = std::make_shared<AND>(a, b);
- if (result->opnds.size() == 1) {
- return result->opnds[0];
- }
-
- return result;
-}
-
-Ref<SemanticContext> SemanticContext::Or(Ref<SemanticContext> const& a,
- Ref<SemanticContext> const& b) {
- if (!a) {
- return b;
- }
- if (!b) {
- return a;
- }
-
- if (a == NONE || b == NONE) {
- return NONE;
- }
-
- Ref<OR> result = std::make_shared<OR>(a, b);
- if (result->opnds.size() == 1) {
- return result->opnds[0];
- }
-
- return result;
-}
-
-std::vector<Ref<SemanticContext::PrecedencePredicate>>
-SemanticContext::filterPrecedencePredicates(const Set& collection) {
- std::vector<Ref<SemanticContext::PrecedencePredicate>> result;
- for (auto context : collection) {
- if (antlrcpp::is<PrecedencePredicate>(context)) {
- result.push_back(std::dynamic_pointer_cast<PrecedencePredicate>(context));
- }
- }
-
- return result;
-}
-
-//------------------ Operator
-//-----------------------------------------------------------------------------------------
-
-SemanticContext::Operator::~Operator() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SemanticContext.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SemanticContext.h
deleted file mode 100644
index b0397e3909..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SemanticContext.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Recognizer.h"
-#include "support/CPPUtils.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// A tree structure used to record the semantic context in which
-/// an ATN configuration is valid. It's either a single predicate,
-/// a conjunction "p1 && p2", or a sum of products "p1||p2".
-///
-/// I have scoped the AND, OR, and Predicate subclasses of
-/// SemanticContext within the scope of this outer class.
-class ANTLR4CPP_PUBLIC SemanticContext {
- public:
- struct Hasher {
- size_t operator()(Ref<SemanticContext> const& k) const {
- return k->hashCode();
- }
- };
-
- struct Comparer {
- bool operator()(Ref<SemanticContext> const& lhs,
- Ref<SemanticContext> const& rhs) const {
- if (lhs == rhs) return true;
- return (lhs->hashCode() == rhs->hashCode()) && (*lhs == *rhs);
- }
- };
-
- using Set = std::unordered_set<Ref<SemanticContext>, Hasher, Comparer>;
-
- /**
- * The default {@link SemanticContext}, which is semantically equivalent to
- * a predicate of the form {@code {true}?}.
- */
- static const Ref<SemanticContext> NONE;
-
- virtual ~SemanticContext();
-
- virtual size_t hashCode() const = 0;
- virtual std::string toString() const = 0;
- virtual bool operator==(const SemanticContext& other) const = 0;
- virtual bool operator!=(const SemanticContext& other) const;
-
- /// <summary>
- /// For context independent predicates, we evaluate them without a local
- /// context (i.e., null context). That way, we can evaluate them without
- /// having to create proper rule-specific context during prediction (as
- /// opposed to the parser, which creates them naturally). In a practical
- /// sense, this avoids a cast exception from RuleContext to myruleContext.
- /// <p/>
- /// For context dependent predicates, we must pass in a local context so that
- /// references such as $arg evaluate properly as _localctx.arg. We only
- /// capture context dependent predicates in the context in which we begin
- /// prediction, so we passed in the outer context here in case of context
- /// dependent predicate evaluation.
- /// </summary>
- virtual bool eval(Recognizer* parser, RuleContext* parserCallStack) = 0;
-
- /**
- * Evaluate the precedence predicates for the context and reduce the result.
- *
- * @param parser The parser instance.
- * @param parserCallStack
- * @return The simplified semantic context after precedence predicates are
- * evaluated, which will be one of the following values.
- * <ul>
- * <li>{@link #NONE}: if the predicate simplifies to {@code true} after
- * precedence predicates are evaluated.</li>
- * <li>{@code null}: if the predicate simplifies to {@code false} after
- * precedence predicates are evaluated.</li>
- * <li>{@code this}: if the semantic context is not changed as a result of
- * precedence predicate evaluation.</li>
- * <li>A non-{@code null} {@link SemanticContext}: the new simplified
- * semantic context after precedence predicates are evaluated.</li>
- * </ul>
- */
- virtual Ref<SemanticContext> evalPrecedence(
- Recognizer* parser, RuleContext* parserCallStack,
- const Ref<SemanticContext>& this_ref);
-
- static Ref<SemanticContext> And(Ref<SemanticContext> const& a,
- Ref<SemanticContext> const& b);
-
- /// See also: ParserATNSimulator::getPredsForAmbigAlts.
- static Ref<SemanticContext> Or(Ref<SemanticContext> const& a,
- Ref<SemanticContext> const& b);
-
- class Predicate;
- class PrecedencePredicate;
- class Operator;
- class AND;
- class OR;
-
- private:
- static std::vector<Ref<PrecedencePredicate>> filterPrecedencePredicates(
- const Set& collection);
-};
-
-class ANTLR4CPP_PUBLIC SemanticContext::Predicate : public SemanticContext {
- public:
- const size_t ruleIndex;
- const size_t predIndex;
- const bool isCtxDependent; // e.g., $i ref in pred
-
- protected:
- Predicate();
-
- public:
- Predicate(size_t ruleIndex, size_t predIndex, bool isCtxDependent);
-
- virtual bool eval(Recognizer* parser, RuleContext* parserCallStack) override;
- virtual size_t hashCode() const override;
- virtual bool operator==(const SemanticContext& other) const override;
- virtual std::string toString() const override;
-};
-
-class ANTLR4CPP_PUBLIC SemanticContext::PrecedencePredicate
- : public SemanticContext {
- public:
- const int precedence;
-
- protected:
- PrecedencePredicate();
-
- public:
- PrecedencePredicate(int precedence);
-
- virtual bool eval(Recognizer* parser, RuleContext* parserCallStack) override;
- virtual Ref<SemanticContext> evalPrecedence(
- Recognizer* parser, RuleContext* parserCallStack,
- const Ref<SemanticContext>& this_ref) override;
- virtual int compareTo(PrecedencePredicate* o);
- virtual size_t hashCode() const override;
- virtual bool operator==(const SemanticContext& other) const override;
- virtual std::string toString() const override;
-};
-
-/**
- * This is the base class for semantic context "operators", which operate on
- * a collection of semantic context "operands".
- *
- * @since 4.3
- */
-class ANTLR4CPP_PUBLIC SemanticContext::Operator : public SemanticContext {
- public:
- virtual ~Operator() override;
-
- /**
- * Gets the operands for the semantic context operator.
- *
- * @return a collection of {@link SemanticContext} operands for the
- * operator.
- *
- * @since 4.3
- */
-
- virtual std::vector<Ref<SemanticContext>> getOperands() const = 0;
-};
-
-/**
- * A semantic context which is true whenever none of the contained contexts
- * is false.
- */
-class ANTLR4CPP_PUBLIC SemanticContext::AND : public SemanticContext::Operator {
- public:
- std::vector<Ref<SemanticContext>> opnds;
-
- AND(Ref<SemanticContext> const& a, Ref<SemanticContext> const& b);
-
- virtual std::vector<Ref<SemanticContext>> getOperands() const override;
- virtual bool operator==(const SemanticContext& other) const override;
- virtual size_t hashCode() const override;
-
- /**
- * The evaluation of predicates by this context is short-circuiting, but
- * unordered.</p>
- */
- virtual bool eval(Recognizer* parser, RuleContext* parserCallStack) override;
- virtual Ref<SemanticContext> evalPrecedence(
- Recognizer* parser, RuleContext* parserCallStack,
- const Ref<SemanticContext>& this_ref) override;
- virtual std::string toString() const override;
-};
-
-/**
- * A semantic context which is true whenever at least one of the contained
- * contexts is true.
- */
-class ANTLR4CPP_PUBLIC SemanticContext::OR : public SemanticContext::Operator {
- public:
- std::vector<Ref<SemanticContext>> opnds;
-
- OR(Ref<SemanticContext> const& a, Ref<SemanticContext> const& b);
-
- virtual std::vector<Ref<SemanticContext>> getOperands() const override;
- virtual bool operator==(const SemanticContext& other) const override;
- virtual size_t hashCode() const override;
-
- /**
- * The evaluation of predicates by this context is short-circuiting, but
- * unordered.
- */
- virtual bool eval(Recognizer* parser, RuleContext* parserCallStack) override;
- virtual Ref<SemanticContext> evalPrecedence(
- Recognizer* parser, RuleContext* parserCallStack,
- const Ref<SemanticContext>& this_ref) override;
- virtual std::string toString() const override;
-};
-
-} // namespace atn
-} // namespace antlr4
-
-// Hash function for SemanticContext, used in the MurmurHash::update function
-
-namespace std {
-using antlr4::atn::SemanticContext;
-
-template <>
-struct hash<SemanticContext> {
- size_t operator()(SemanticContext& x) const { return x.hashCode(); }
-};
-} // namespace std
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SetTransition.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SetTransition.cpp
deleted file mode 100644
index 6e3bcb05fa..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SetTransition.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Token.h"
-#include "misc/IntervalSet.h"
-
-#include "atn/SetTransition.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-SetTransition::SetTransition(ATNState* target, const misc::IntervalSet& aSet)
- : Transition(target),
- set(aSet.isEmpty() ? misc::IntervalSet::of(Token::INVALID_TYPE) : aSet) {}
-
-Transition::SerializationType SetTransition::getSerializationType() const {
- return SET;
-}
-
-misc::IntervalSet SetTransition::label() const { return set; }
-
-bool SetTransition::matches(size_t symbol, size_t /*minVocabSymbol*/,
- size_t /*maxVocabSymbol*/) const {
- return set.contains(symbol);
-}
-
-std::string SetTransition::toString() const {
- return "SET " + Transition::toString() + " { set: " + set.toString() + "}";
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SetTransition.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SetTransition.h
deleted file mode 100644
index db753a89ec..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SetTransition.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/Transition.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// A transition containing a set of values. </summary>
-class ANTLR4CPP_PUBLIC SetTransition : public Transition {
- public:
- const misc::IntervalSet set;
-
- SetTransition(ATNState* target, const misc::IntervalSet& set);
-
- virtual SerializationType getSerializationType() const override;
-
- virtual misc::IntervalSet label() const override;
- virtual bool matches(size_t symbol, size_t minVocabSymbol,
- size_t maxVocabSymbol) const override;
-
- virtual std::string toString() const override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SingletonPredictionContext.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SingletonPredictionContext.cpp
deleted file mode 100644
index ec72b4eea5..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SingletonPredictionContext.cpp
+++ /dev/null
@@ -1,80 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/EmptyPredictionContext.h"
-
-#include "atn/SingletonPredictionContext.h"
-
-using namespace antlr4::atn;
-
-SingletonPredictionContext::SingletonPredictionContext(
- Ref<PredictionContext> const& parent, size_t returnState)
- : PredictionContext(parent ? calculateHashCode(parent, returnState)
- : calculateEmptyHashCode()),
- parent(parent),
- returnState(returnState) {
- assert(returnState != ATNState::INVALID_STATE_NUMBER);
-}
-
-SingletonPredictionContext::~SingletonPredictionContext() {}
-
-Ref<SingletonPredictionContext> SingletonPredictionContext::create(
- Ref<PredictionContext> const& parent, size_t returnState) {
- if (returnState == EMPTY_RETURN_STATE && parent) {
- // someone can pass in the bits of an array ctx that mean $
- return std::dynamic_pointer_cast<SingletonPredictionContext>(EMPTY);
- }
- return std::make_shared<SingletonPredictionContext>(parent, returnState);
-}
-
-size_t SingletonPredictionContext::size() const { return 1; }
-
-Ref<PredictionContext> SingletonPredictionContext::getParent(
- size_t index) const {
- assert(index == 0);
- ((void)(index)); // Make Release build happy.
- return parent;
-}
-
-size_t SingletonPredictionContext::getReturnState(size_t index) const {
- assert(index == 0);
- ((void)(index)); // Make Release build happy.
- return returnState;
-}
-
-bool SingletonPredictionContext::operator==(const PredictionContext& o) const {
- if (this == &o) {
- return true;
- }
-
- const SingletonPredictionContext* other =
- dynamic_cast<const SingletonPredictionContext*>(&o);
- if (other == nullptr) {
- return false;
- }
-
- if (this->hashCode() != other->hashCode()) {
- return false; // can't be same if hash is different
- }
-
- if (returnState != other->returnState) return false;
-
- if (!parent && !other->parent) return true;
- if (!parent || !other->parent) return false;
-
- return *parent == *other->parent;
-}
-
-std::string SingletonPredictionContext::toString() const {
- // std::string up = !parent.expired() ? parent.lock()->toString() : "";
- std::string up = parent != nullptr ? parent->toString() : "";
- if (up.length() == 0) {
- if (returnState == EMPTY_RETURN_STATE) {
- return "$";
- }
- return std::to_string(returnState);
- }
- return std::to_string(returnState) + " " + up;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SingletonPredictionContext.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SingletonPredictionContext.h
deleted file mode 100644
index 223111875e..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/SingletonPredictionContext.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/PredictionContext.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC SingletonPredictionContext : public PredictionContext {
- public:
- // Usually a parent is linked via a weak ptr. Not so here as we have kinda
- // reverse reference chain. There are no child contexts stored here and often
- // the parent context is left dangling when it's owning ATNState is released.
- // In order to avoid having this context released as well (leaving all other
- // contexts which got this one as parent with a null reference) we use a
- // shared_ptr here instead, to keep those left alone parent contexts alive.
- const Ref<PredictionContext> parent;
- const size_t returnState;
-
- SingletonPredictionContext(Ref<PredictionContext> const& parent,
- size_t returnState);
- virtual ~SingletonPredictionContext();
-
- static Ref<SingletonPredictionContext> create(
- Ref<PredictionContext> const& parent, size_t returnState);
-
- virtual size_t size() const override;
- virtual Ref<PredictionContext> getParent(size_t index) const override;
- virtual size_t getReturnState(size_t index) const override;
- virtual bool operator==(const PredictionContext& o) const override;
- virtual std::string toString() const override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarBlockStartState.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarBlockStartState.cpp
deleted file mode 100644
index a6dec76639..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarBlockStartState.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/StarBlockStartState.h"
-
-using namespace antlr4::atn;
-
-size_t StarBlockStartState::getStateType() { return STAR_BLOCK_START; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarBlockStartState.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarBlockStartState.h
deleted file mode 100644
index c2b6ec0db2..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarBlockStartState.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/BlockStartState.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// The block that begins a closure loop.
-class ANTLR4CPP_PUBLIC StarBlockStartState final : public BlockStartState {
- public:
- virtual size_t getStateType() override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarLoopEntryState.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarLoopEntryState.cpp
deleted file mode 100644
index 3c0b02a795..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarLoopEntryState.cpp
+++ /dev/null
@@ -1,13 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/StarLoopEntryState.h"
-
-using namespace antlr4::atn;
-
-StarLoopEntryState::StarLoopEntryState()
- : DecisionState(), isPrecedenceDecision(false) {}
-
-size_t StarLoopEntryState::getStateType() { return STAR_LOOP_ENTRY; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarLoopEntryState.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarLoopEntryState.h
deleted file mode 100644
index 2621a44e12..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarLoopEntryState.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionState.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC StarLoopEntryState final : public DecisionState {
- public:
- StarLoopEntryState();
-
- /**
- * Indicates whether this state can benefit from a precedence DFA during SLL
- * decision making.
- *
- * <p>This is a computed property that is calculated during ATN
- * deserialization and stored for use in {@link ParserATNSimulator} and
- * {@link ParserInterpreter}.</p>
- *
- * @see DFA#isPrecedenceDfa()
- */
- bool isPrecedenceDecision = false;
-
- StarLoopbackState* loopBackState = nullptr;
-
- virtual size_t getStateType() override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarLoopbackState.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarLoopbackState.cpp
deleted file mode 100644
index fea6d3e207..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarLoopbackState.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/StarLoopEntryState.h"
-#include "atn/Transition.h"
-
-#include "atn/StarLoopbackState.h"
-
-using namespace antlr4::atn;
-
-StarLoopEntryState* StarLoopbackState::getLoopEntryState() {
- return dynamic_cast<StarLoopEntryState*>(transitions[0]->target);
-}
-
-size_t StarLoopbackState::getStateType() { return STAR_LOOP_BACK; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarLoopbackState.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarLoopbackState.h
deleted file mode 100644
index 9ad5230c09..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/StarLoopbackState.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNState.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC StarLoopbackState final : public ATNState {
- public:
- StarLoopEntryState* getLoopEntryState();
-
- virtual size_t getStateType() override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/TokensStartState.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/TokensStartState.cpp
deleted file mode 100644
index 4ea41645dd..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/TokensStartState.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/TokensStartState.h"
-
-using namespace antlr4::atn;
-
-size_t TokensStartState::getStateType() { return TOKEN_START; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/TokensStartState.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/TokensStartState.h
deleted file mode 100644
index c0ad14614d..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/TokensStartState.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionState.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// The Tokens rule start state linking to each lexer rule start state.
-class ANTLR4CPP_PUBLIC TokensStartState final : public DecisionState {
- public:
- virtual size_t getStateType();
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/Transition.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/Transition.cpp
deleted file mode 100644
index 6d0ad9d05a..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/Transition.cpp
+++ /dev/null
@@ -1,42 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-#include "support/Arrays.h"
-
-#include "atn/Transition.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-using namespace antlrcpp;
-
-const std::vector<std::string> Transition::serializationNames = {
- "INVALID", "EPSILON", "RANGE", "RULE", "PREDICATE", "ATOM",
- "ACTION", "SET", "NOT_SET", "WILDCARD", "PRECEDENCE"};
-
-Transition::Transition(ATNState* target) {
- if (target == nullptr) {
- throw NullPointerException("target cannot be null.");
- }
-
- this->target = target;
-}
-
-Transition::~Transition() {}
-
-bool Transition::isEpsilon() const { return false; }
-
-misc::IntervalSet Transition::label() const {
- return misc::IntervalSet::EMPTY_SET;
-}
-
-std::string Transition::toString() const {
- std::stringstream ss;
- ss << "(Transition " << std::hex << this << ", target: " << std::hex << target
- << ')';
-
- return ss.str();
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/Transition.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/Transition.h
deleted file mode 100644
index 2cc9f1706b..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/Transition.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "misc/IntervalSet.h"
-
-namespace antlr4 {
-namespace atn {
-
-/// <summary>
-/// An ATN transition between any two ATN states. Subclasses define
-/// atom, set, epsilon, action, predicate, rule transitions.
-/// <p/>
-/// This is a one way link. It emanates from a state (usually via a list of
-/// transitions) and has a target state.
-/// <p/>
-/// Since we never have to change the ATN transitions once we construct it,
-/// we can fix these transitions as specific classes. The DFA transitions
-/// on the other hand need to update the labels as it adds transitions to
-/// the states. We'll use the term Edge for the DFA to distinguish them from
-/// ATN transitions.
-/// </summary>
-class ANTLR4CPP_PUBLIC Transition {
- public:
- // constants for serialization
- enum SerializationType {
- EPSILON = 1,
- RANGE = 2,
- RULE = 3,
- PREDICATE = 4, // e.g., {isType(input.LT(1))}?
- ATOM = 5,
- ACTION = 6,
- SET = 7, // ~(A|B) or ~atom, wildcard, which convert to next 2
- NOT_SET = 8,
- WILDCARD = 9,
- PRECEDENCE = 10,
- };
-
- static const std::vector<std::string> serializationNames;
-
- /// The target of this transition.
- // ml: this is a reference into the ATN.
- ATNState* target;
-
- virtual ~Transition();
-
- protected:
- Transition(ATNState* target);
-
- public:
- virtual SerializationType getSerializationType() const = 0;
-
- /**
- * Determines if the transition is an "epsilon" transition.
- *
- * <p>The default implementation returns {@code false}.</p>
- *
- * @return {@code true} if traversing this transition in the ATN does not
- * consume an input symbol; otherwise, {@code false} if traversing this
- * transition consumes (matches) an input symbol.
- */
- virtual bool isEpsilon() const;
- virtual misc::IntervalSet label() const;
- virtual bool matches(size_t symbol, size_t minVocabSymbol,
- size_t maxVocabSymbol) const = 0;
-
- virtual std::string toString() const;
-
- Transition(Transition const&) = delete;
- Transition& operator=(Transition const&) = delete;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/WildcardTransition.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/WildcardTransition.cpp
deleted file mode 100644
index da9d80d764..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/WildcardTransition.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATNState.h"
-
-#include "atn/WildcardTransition.h"
-
-using namespace antlr4::atn;
-
-WildcardTransition::WildcardTransition(ATNState* target) : Transition(target) {}
-
-Transition::SerializationType WildcardTransition::getSerializationType() const {
- return WILDCARD;
-}
-
-bool WildcardTransition::matches(size_t symbol, size_t minVocabSymbol,
- size_t maxVocabSymbol) const {
- return symbol >= minVocabSymbol && symbol <= maxVocabSymbol;
-}
-
-std::string WildcardTransition::toString() const {
- return "WILDCARD " + Transition::toString() + " {}";
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/WildcardTransition.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/WildcardTransition.h
deleted file mode 100644
index e3bf8c3073..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/atn/WildcardTransition.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/Transition.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC WildcardTransition final : public Transition {
- public:
- WildcardTransition(ATNState* target);
-
- virtual SerializationType getSerializationType() const override;
-
- virtual bool matches(size_t symbol, size_t minVocabSymbol,
- size_t maxVocabSymbol) const override;
-
- virtual std::string toString() const override;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFA.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFA.cpp
deleted file mode 100644
index 764b73b296..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFA.cpp
+++ /dev/null
@@ -1,125 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATNConfigSet.h"
-#include "atn/StarLoopEntryState.h"
-#include "dfa/DFASerializer.h"
-#include "dfa/LexerDFASerializer.h"
-#include "support/CPPUtils.h"
-
-#include "dfa/DFA.h"
-
-using namespace antlr4;
-using namespace antlr4::dfa;
-using namespace antlrcpp;
-
-DFA::DFA(atn::DecisionState* atnStartState) : DFA(atnStartState, 0) {}
-
-DFA::DFA(atn::DecisionState* atnStartState, size_t decision)
- : atnStartState(atnStartState), s0(nullptr), decision(decision) {
- _precedenceDfa = false;
- if (is<atn::StarLoopEntryState*>(atnStartState)) {
- if (static_cast<atn::StarLoopEntryState*>(atnStartState)
- ->isPrecedenceDecision) {
- _precedenceDfa = true;
- s0 = new DFAState(
- std::unique_ptr<atn::ATNConfigSet>(new atn::ATNConfigSet()));
- s0->isAcceptState = false;
- s0->requiresFullContext = false;
- }
- }
-}
-
-DFA::DFA(DFA&& other)
- : atnStartState(other.atnStartState), decision(other.decision) {
- // Source states are implicitly cleared by the move.
- states = std::move(other.states);
-
- other.atnStartState = nullptr;
- other.decision = 0;
- s0 = other.s0;
- other.s0 = nullptr;
- _precedenceDfa = other._precedenceDfa;
- other._precedenceDfa = false;
-}
-
-DFA::~DFA() {
- bool s0InList = (s0 == nullptr);
- for (auto state : states) {
- if (state == s0) s0InList = true;
- delete state;
- }
-
- if (!s0InList) delete s0;
-}
-
-bool DFA::isPrecedenceDfa() const { return _precedenceDfa; }
-
-DFAState* DFA::getPrecedenceStartState(int precedence) const {
- assert(_precedenceDfa); // Only precedence DFAs may contain a precedence
- // start state.
-
- auto iterator = s0->edges.find(precedence);
- if (iterator == s0->edges.end()) return nullptr;
-
- return iterator->second;
-}
-
-void DFA::setPrecedenceStartState(int precedence, DFAState* startState,
- SingleWriteMultipleReadLock& lock) {
- if (!isPrecedenceDfa()) {
- throw IllegalStateException(
- "Only precedence DFAs may contain a precedence start state.");
- }
-
- if (precedence < 0) {
- return;
- }
-
- {
- lock.writeLock();
- s0->edges[precedence] = startState;
- lock.writeUnlock();
- }
-}
-
-std::vector<DFAState*> DFA::getStates() const {
- std::vector<DFAState*> result;
- for (auto state : states) result.push_back(state);
-
- std::sort(result.begin(), result.end(),
- [](DFAState* o1, DFAState* o2) -> bool {
- return o1->stateNumber < o2->stateNumber;
- });
-
- return result;
-}
-
-std::string DFA::toString(const std::vector<std::string>& tokenNames) {
- if (s0 == nullptr) {
- return "";
- }
- DFASerializer serializer(this, tokenNames);
-
- return serializer.toString();
-}
-
-std::string DFA::toString(const Vocabulary& vocabulary) const {
- if (s0 == nullptr) {
- return "";
- }
-
- DFASerializer serializer(this, vocabulary);
- return serializer.toString();
-}
-
-std::string DFA::toLexerString() {
- if (s0 == nullptr) {
- return "";
- }
- LexerDFASerializer serializer(this);
-
- return serializer.toString();
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFA.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFA.h
deleted file mode 100644
index fd7b62d0f6..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFA.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "dfa/DFAState.h"
-
-namespace antlrcpp {
-class SingleWriteMultipleReadLock;
-}
-
-namespace antlr4 {
-namespace dfa {
-
-class ANTLR4CPP_PUBLIC DFA {
- public:
- /// A set of all DFA states. Use a map so we can get old state back.
- /// Set only allows you to see if it's there.
-
- /// From which ATN state did we create this DFA?
- atn::DecisionState* atnStartState;
- std::unordered_set<DFAState*, DFAState::Hasher, DFAState::Comparer>
- states; // States are owned by this class.
- DFAState* s0;
- size_t decision;
-
- DFA(atn::DecisionState* atnStartState);
- DFA(atn::DecisionState* atnStartState, size_t decision);
- DFA(const DFA& other) = delete;
- DFA(DFA&& other);
- virtual ~DFA();
-
- /**
- * Gets whether this DFA is a precedence DFA. Precedence DFAs use a special
- * start state {@link #s0} which is not stored in {@link #states}. The
- * {@link DFAState#edges} array for this start state contains outgoing edges
- * supplying individual start states corresponding to specific precedence
- * values.
- *
- * @return {@code true} if this is a precedence DFA; otherwise,
- * {@code false}.
- * @see Parser#getPrecedence()
- */
- bool isPrecedenceDfa() const;
-
- /**
- * Get the start state for a specific precedence value.
- *
- * @param precedence The current precedence.
- * @return The start state corresponding to the specified precedence, or
- * {@code null} if no start state exists for the specified precedence.
- *
- * @throws IllegalStateException if this is not a precedence DFA.
- * @see #isPrecedenceDfa()
- */
- DFAState* getPrecedenceStartState(int precedence) const;
-
- /**
- * Set the start state for a specific precedence value.
- *
- * @param precedence The current precedence.
- * @param startState The start state corresponding to the specified
- * precedence.
- *
- * @throws IllegalStateException if this is not a precedence DFA.
- * @see #isPrecedenceDfa()
- */
- void setPrecedenceStartState(int precedence, DFAState* startState,
- antlrcpp::SingleWriteMultipleReadLock& lock);
-
- /// Return a list of all states in this DFA, ordered by state number.
- virtual std::vector<DFAState*> getStates() const;
-
- /**
- * @deprecated Use {@link #toString(Vocabulary)} instead.
- */
- virtual std::string toString(const std::vector<std::string>& tokenNames);
- std::string toString(const Vocabulary& vocabulary) const;
-
- virtual std::string toLexerString();
-
- private:
- /**
- * {@code true} if this DFA is for a precedence decision; otherwise,
- * {@code false}. This is the backing field for {@link #isPrecedenceDfa}.
- */
- bool _precedenceDfa;
-};
-
-} // namespace dfa
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFASerializer.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFASerializer.cpp
deleted file mode 100644
index 035b1fa197..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFASerializer.cpp
+++ /dev/null
@@ -1,68 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Vocabulary.h"
-#include "dfa/DFA.h"
-
-#include "dfa/DFASerializer.h"
-
-using namespace antlr4::dfa;
-
-DFASerializer::DFASerializer(const DFA* dfa,
- const std::vector<std::string>& tokenNames)
- : DFASerializer(dfa, Vocabulary::fromTokenNames(tokenNames)) {}
-
-DFASerializer::DFASerializer(const DFA* dfa, const Vocabulary& vocabulary)
- : _dfa(dfa), _vocabulary(vocabulary) {}
-
-DFASerializer::~DFASerializer() {}
-
-std::string DFASerializer::toString() const {
- if (_dfa->s0 == nullptr) {
- return "";
- }
-
- std::stringstream ss;
- std::vector<DFAState*> states = _dfa->getStates();
- for (auto s : states) {
- for (size_t i = 0; i < s->edges.size(); i++) {
- DFAState* t = s->edges[i];
- if (t != nullptr && t->stateNumber != INT32_MAX) {
- ss << getStateString(s);
- std::string label = getEdgeLabel(i);
- ss << "-" << label << "->" << getStateString(t) << "\n";
- }
- }
- }
-
- return ss.str();
-}
-
-std::string DFASerializer::getEdgeLabel(size_t i) const {
- return _vocabulary.getDisplayName(
- i); // ml: no longer needed -1 as we use a map for edges, without offset.
-}
-
-std::string DFASerializer::getStateString(DFAState* s) const {
- size_t n = s->stateNumber;
-
- const std::string baseStateStr = std::string(s->isAcceptState ? ":" : "") +
- "s" + std::to_string(n) +
- (s->requiresFullContext ? "^" : "");
-
- if (s->isAcceptState) {
- if (!s->predicates.empty()) {
- std::string buf;
- for (size_t i = 0; i < s->predicates.size(); i++) {
- buf.append(s->predicates[i]->toString());
- }
- return baseStateStr + "=>" + buf;
- } else {
- return baseStateStr + "=>" + std::to_string(s->prediction);
- }
- } else {
- return baseStateStr;
- }
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFASerializer.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFASerializer.h
deleted file mode 100644
index d130b313bb..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFASerializer.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Vocabulary.h"
-
-namespace antlr4 {
-namespace dfa {
-
-/// A DFA walker that knows how to dump them to serialized strings.
-class ANTLR4CPP_PUBLIC DFASerializer {
- public:
- DFASerializer(const DFA* dfa, const std::vector<std::string>& tnames);
- DFASerializer(const DFA* dfa, const Vocabulary& vocabulary);
- virtual ~DFASerializer();
-
- virtual std::string toString() const;
-
- protected:
- virtual std::string getEdgeLabel(size_t i) const;
- virtual std::string getStateString(DFAState* s) const;
-
- private:
- const DFA* _dfa;
- const Vocabulary& _vocabulary;
-};
-
-} // namespace dfa
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFAState.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFAState.cpp
deleted file mode 100644
index 253cff1419..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFAState.cpp
+++ /dev/null
@@ -1,95 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATNConfig.h"
-#include "atn/ATNConfigSet.h"
-#include "atn/SemanticContext.h"
-#include "misc/MurmurHash.h"
-
-#include "dfa/DFAState.h"
-
-using namespace antlr4::dfa;
-using namespace antlr4::atn;
-
-DFAState::PredPrediction::PredPrediction(const Ref<SemanticContext>& pred,
- int alt)
- : pred(pred) {
- InitializeInstanceFields();
- this->alt = alt;
-}
-
-DFAState::PredPrediction::~PredPrediction() {}
-
-std::string DFAState::PredPrediction::toString() {
- return std::string("(") + pred->toString() + ", " + std::to_string(alt) + ")";
-}
-
-void DFAState::PredPrediction::InitializeInstanceFields() { alt = 0; }
-
-DFAState::DFAState() { InitializeInstanceFields(); }
-
-DFAState::DFAState(int state) : DFAState() { stateNumber = state; }
-
-DFAState::DFAState(std::unique_ptr<ATNConfigSet> configs_) : DFAState() {
- configs = std::move(configs_);
-}
-
-DFAState::~DFAState() {
- for (auto predicate : predicates) {
- delete predicate;
- }
-}
-
-std::set<size_t> DFAState::getAltSet() {
- std::set<size_t> alts;
- if (configs != nullptr) {
- for (size_t i = 0; i < configs->size(); i++) {
- alts.insert(configs->get(i)->alt);
- }
- }
- return alts;
-}
-
-size_t DFAState::hashCode() const {
- size_t hash = misc::MurmurHash::initialize(7);
- hash = misc::MurmurHash::update(hash, configs->hashCode());
- hash = misc::MurmurHash::finish(hash, 1);
- return hash;
-}
-
-bool DFAState::operator==(const DFAState& o) const {
- // compare set of ATN configurations in this set with other
- if (this == &o) {
- return true;
- }
-
- return *configs == *o.configs;
-}
-
-std::string DFAState::toString() {
- std::stringstream ss;
- ss << stateNumber;
- if (configs) {
- ss << ":" << configs->toString();
- }
- if (isAcceptState) {
- ss << " => ";
- if (!predicates.empty()) {
- for (size_t i = 0; i < predicates.size(); i++) {
- ss << predicates[i]->toString();
- }
- } else {
- ss << prediction;
- }
- }
- return ss.str();
-}
-
-void DFAState::InitializeInstanceFields() {
- stateNumber = -1;
- isAcceptState = false;
- prediction = 0;
- requiresFullContext = false;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFAState.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFAState.h
deleted file mode 100644
index e0c45dedbd..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/DFAState.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace dfa {
-
-/// <summary>
-/// A DFA state represents a set of possible ATN configurations.
-/// As Aho, Sethi, Ullman p. 117 says "The DFA uses its state
-/// to keep track of all possible states the ATN can be in after
-/// reading each input symbol. That is to say, after reading
-/// input a1a2..an, the DFA is in a state that represents the
-/// subset T of the states of the ATN that are reachable from the
-/// ATN's start state along some path labeled a1a2..an."
-/// In conventional NFA->DFA conversion, therefore, the subset T
-/// would be a bitset representing the set of states the
-/// ATN could be in. We need to track the alt predicted by each
-/// state as well, however. More importantly, we need to maintain
-/// a stack of states, tracking the closure operations as they
-/// jump from rule to rule, emulating rule invocations (method calls).
-/// I have to add a stack to simulate the proper lookahead sequences for
-/// the underlying LL grammar from which the ATN was derived.
-/// <p/>
-/// I use a set of ATNConfig objects not simple states. An ATNConfig
-/// is both a state (ala normal conversion) and a RuleContext describing
-/// the chain of rules (if any) followed to arrive at that state.
-/// <p/>
-/// A DFA state may have multiple references to a particular state,
-/// but with different ATN contexts (with same or different alts)
-/// meaning that state was reached via a different set of rule invocations.
-/// </summary>
-class ANTLR4CPP_PUBLIC DFAState {
- public:
- class PredPrediction {
- public:
- Ref<atn::SemanticContext>
- pred; // never null; at least SemanticContext.NONE
- int alt;
-
- PredPrediction(const Ref<atn::SemanticContext>& pred, int alt);
- virtual ~PredPrediction();
-
- virtual std::string toString();
-
- private:
- void InitializeInstanceFields();
- };
-
- int stateNumber;
-
- std::unique_ptr<atn::ATNConfigSet> configs;
-
- /// {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1)
- /// <seealso cref="Token#EOF"/> maps to {@code edges[0]}.
- // ml: this is a sparse list, so we use a map instead of a vector.
- // Watch out: we no longer have the -1 offset, as it isn't needed anymore.
- std::unordered_map<size_t, DFAState*> edges;
-
- bool isAcceptState;
-
- /// if accept state, what ttype do we match or alt do we predict?
- /// This is set to <seealso cref="ATN#INVALID_ALT_NUMBER"/> when <seealso
- /// cref="#predicates"/>{@code !=null} or <seealso
- /// cref="#requiresFullContext"/>.
- size_t prediction;
-
- Ref<atn::LexerActionExecutor> lexerActionExecutor;
-
- /// <summary>
- /// Indicates that this state was created during SLL prediction that
- /// discovered a conflict between the configurations in the state. Future
- /// <seealso cref="ParserATNSimulator#execATN"/> invocations immediately
- /// jumped doing full context prediction if this field is true.
- /// </summary>
- bool requiresFullContext;
-
- /// <summary>
- /// During SLL parsing, this is a list of predicates associated with the
- /// ATN configurations of the DFA state. When we have predicates,
- /// <seealso cref="#requiresFullContext"/> is {@code false} since full
- /// context prediction evaluates predicates on-the-fly. If this is not null,
- /// then <seealso cref="#prediction"/> is <seealso
- /// cref="ATN#INVALID_ALT_NUMBER"/>.
- /// <p/>
- /// We only use these for non-<seealso cref="#requiresFullContext"/> but
- /// conflicting states. That means we know from the context (it's $ or we
- /// don't dip into outer context) that it's an ambiguity not a conflict.
- /// <p/>
- /// This list is computed by <seealso
- /// cref="ParserATNSimulator#predicateDFAState"/>.
- /// </summary>
- std::vector<PredPrediction*> predicates;
-
- /// Map a predicate to a predicted alternative.
- DFAState();
- DFAState(int state);
- DFAState(std::unique_ptr<atn::ATNConfigSet> configs);
- virtual ~DFAState();
-
- /// <summary>
- /// Get the set of all alts mentioned by all ATN configurations in this
- /// DFA state.
- /// </summary>
- virtual std::set<size_t> getAltSet();
-
- virtual size_t hashCode() const;
-
- /// Two DFAState instances are equal if their ATN configuration sets
- /// are the same. This method is used to see if a state already exists.
- ///
- /// Because the number of alternatives and number of ATN configurations are
- /// finite, there is a finite number of DFA states that can be processed.
- /// This is necessary to show that the algorithm terminates.
- ///
- /// Cannot test the DFA state numbers here because in
- /// ParserATNSimulator#addDFAState we need to know if any other state
- /// exists that has this exact set of ATN configurations. The
- /// stateNumber is irrelevant.
- bool operator==(const DFAState& o) const;
-
- virtual std::string toString();
-
- struct Hasher {
- size_t operator()(DFAState* k) const { return k->hashCode(); }
- };
-
- struct Comparer {
- bool operator()(DFAState* lhs, DFAState* rhs) const { return *lhs == *rhs; }
- };
-
- private:
- void InitializeInstanceFields();
-};
-
-} // namespace dfa
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/LexerDFASerializer.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/LexerDFASerializer.cpp
deleted file mode 100644
index 26c7c1b5d7..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/LexerDFASerializer.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Vocabulary.h"
-
-#include "dfa/LexerDFASerializer.h"
-
-using namespace antlr4::dfa;
-
-LexerDFASerializer::LexerDFASerializer(DFA* dfa)
- : DFASerializer(dfa, Vocabulary::EMPTY_VOCABULARY) {}
-
-LexerDFASerializer::~LexerDFASerializer() {}
-
-std::string LexerDFASerializer::getEdgeLabel(size_t i) const {
- return std::string("'") + static_cast<char>(i) + "'";
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/LexerDFASerializer.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/LexerDFASerializer.h
deleted file mode 100644
index 8749dc8b8f..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/dfa/LexerDFASerializer.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "dfa/DFASerializer.h"
-
-namespace antlr4 {
-namespace dfa {
-
-class ANTLR4CPP_PUBLIC LexerDFASerializer : public DFASerializer {
- public:
- LexerDFASerializer(DFA* dfa);
- virtual ~LexerDFASerializer();
-
- protected:
- virtual std::string getEdgeLabel(size_t i) const override;
-};
-
-} // namespace dfa
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/InterpreterDataReader.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/InterpreterDataReader.cpp
deleted file mode 100644
index 6eb130bc98..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/InterpreterDataReader.cpp
+++ /dev/null
@@ -1,119 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Vocabulary.h"
-#include "atn/ATN.h"
-#include "atn/ATNDeserializer.h"
-
-#include "misc/InterpreterDataReader.h"
-
-using namespace antlr4::dfa;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-
-InterpreterData::InterpreterData(std::vector<std::string> const& literalNames,
- std::vector<std::string> const& symbolicNames)
- : vocabulary(literalNames, symbolicNames) {}
-
-InterpreterData InterpreterDataReader::parseFile(std::string const& fileName) {
- // The structure of the data file is very simple. Everything is line based
- // with empty lines separating the different parts. For lexers the layout is:
- // token literal names:
- // ...
- //
- // token symbolic names:
- // ...
- //
- // rule names:
- // ...
- //
- // channel names:
- // ...
- //
- // mode names:
- // ...
- //
- // atn:
- // <a single line with comma separated int values> enclosed in a pair of
- // squared brackets.
- //
- // Data for a parser does not contain channel and mode names.
-
- std::ifstream input(fileName);
- if (!input.good()) return {};
-
- std::vector<std::string> literalNames;
- std::vector<std::string> symbolicNames;
-
- std::string line;
-
- std::getline(input, line, '\n');
- assert(line == "token literal names:");
- while (true) {
- std::getline(input, line, '\n');
- if (line.empty()) break;
-
- literalNames.push_back(line == "null" ? "" : line);
- };
-
- std::getline(input, line, '\n');
- assert(line == "token symbolic names:");
- while (true) {
- std::getline(input, line, '\n');
- if (line.empty()) break;
-
- symbolicNames.push_back(line == "null" ? "" : line);
- };
- InterpreterData result(literalNames, symbolicNames);
-
- std::getline(input, line, '\n');
- assert(line == "rule names:");
- while (true) {
- std::getline(input, line, '\n');
- if (line.empty()) break;
-
- result.ruleNames.push_back(line);
- };
-
- std::getline(input, line, '\n');
- if (line == "channel names:") {
- while (true) {
- std::getline(input, line, '\n');
- if (line.empty()) break;
-
- result.channels.push_back(line);
- };
-
- std::getline(input, line, '\n');
- assert(line == "mode names:");
- while (true) {
- std::getline(input, line, '\n');
- if (line.empty()) break;
-
- result.modes.push_back(line);
- };
- }
-
- std::vector<uint16_t> serializedATN;
-
- std::getline(input, line, '\n');
- assert(line == "atn:");
- std::getline(input, line, '\n');
- std::stringstream tokenizer(line);
- std::string value;
- while (tokenizer.good()) {
- std::getline(tokenizer, value, ',');
- unsigned long number;
- if (value[0] == '[')
- number = std::strtoul(&value[1], nullptr, 10);
- else
- number = std::strtoul(value.c_str(), nullptr, 10);
- serializedATN.push_back(static_cast<uint16_t>(number));
- }
-
- ATNDeserializer deserializer;
- result.atn = deserializer.deserialize(serializedATN);
- return result;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/InterpreterDataReader.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/InterpreterDataReader.h
deleted file mode 100644
index 755010100a..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/InterpreterDataReader.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace misc {
-
-struct InterpreterData {
- atn::ATN atn;
- dfa::Vocabulary vocabulary;
- std::vector<std::string> ruleNames;
- std::vector<std::string> channels; // Only valid for lexer grammars.
- std::vector<std::string> modes; // ditto
-
- InterpreterData(){}; // For invalid content.
- InterpreterData(std::vector<std::string> const& literalNames,
- std::vector<std::string> const& symbolicNames);
-};
-
-// A class to read plain text interpreter data produced by ANTLR.
-class ANTLR4CPP_PUBLIC InterpreterDataReader {
- public:
- static InterpreterData parseFile(std::string const& fileName);
-};
-
-} // namespace misc
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/Interval.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/Interval.cpp
deleted file mode 100644
index 2eab06d624..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/Interval.cpp
+++ /dev/null
@@ -1,88 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/Interval.h"
-
-using namespace antlr4::misc;
-
-size_t antlr4::misc::numericToSymbol(ssize_t v) {
- return static_cast<size_t>(v);
-}
-
-ssize_t antlr4::misc::symbolToNumeric(size_t v) {
- return static_cast<ssize_t>(v);
-}
-
-Interval const Interval::INVALID;
-
-Interval::Interval()
- : Interval(static_cast<ssize_t>(-1),
- -2) { // Need an explicit cast here for VS.
-}
-
-Interval::Interval(size_t a_, size_t b_)
- : Interval(symbolToNumeric(a_), symbolToNumeric(b_)) {}
-
-Interval::Interval(ssize_t a_, ssize_t b_) : a(a_), b(b_) {}
-
-size_t Interval::length() const {
- if (b < a) {
- return 0;
- }
- return size_t(b - a + 1);
-}
-
-bool Interval::operator==(const Interval& other) const {
- return a == other.a && b == other.b;
-}
-
-size_t Interval::hashCode() const {
- size_t hash = 23;
- hash = hash * 31 + static_cast<size_t>(a);
- hash = hash * 31 + static_cast<size_t>(b);
- return hash;
-}
-
-bool Interval::startsBeforeDisjoint(const Interval& other) const {
- return a < other.a && b < other.a;
-}
-
-bool Interval::startsBeforeNonDisjoint(const Interval& other) const {
- return a <= other.a && b >= other.a;
-}
-
-bool Interval::startsAfter(const Interval& other) const { return a > other.a; }
-
-bool Interval::startsAfterDisjoint(const Interval& other) const {
- return a > other.b;
-}
-
-bool Interval::startsAfterNonDisjoint(const Interval& other) const {
- return a > other.a && a <= other.b; // b >= other.b implied
-}
-
-bool Interval::disjoint(const Interval& other) const {
- return startsBeforeDisjoint(other) || startsAfterDisjoint(other);
-}
-
-bool Interval::adjacent(const Interval& other) const {
- return a == other.b + 1 || b == other.a - 1;
-}
-
-bool Interval::properlyContains(const Interval& other) const {
- return other.a >= a && other.b <= b;
-}
-
-Interval Interval::Union(const Interval& other) const {
- return Interval(std::min(a, other.a), std::max(b, other.b));
-}
-
-Interval Interval::intersection(const Interval& other) const {
- return Interval(std::max(a, other.a), std::min(b, other.b));
-}
-
-std::string Interval::toString() const {
- return std::to_string(a) + ".." + std::to_string(b);
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/Interval.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/Interval.h
deleted file mode 100644
index 2556cc9e78..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/Interval.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace misc {
-
-// Helpers to convert certain unsigned symbols (e.g. Token::EOF) to their
-// original numeric value (e.g. -1) and vice versa. This is needed mostly for
-// intervals to keep their original order and for toString() methods to print
-// the original numeric value (e.g. for tests).
-size_t numericToSymbol(ssize_t v);
-ssize_t symbolToNumeric(size_t v);
-
-/// An immutable inclusive interval a..b
-class ANTLR4CPP_PUBLIC Interval {
- public:
- static const Interval INVALID;
-
- // Must stay signed to guarantee the correct sort order.
- ssize_t a;
- ssize_t b;
-
- Interval();
- explicit Interval(size_t a_, size_t b_); // For unsigned -> signed mappings.
- Interval(ssize_t a_, ssize_t b_);
-
- /// return number of elements between a and b inclusively. x..x is length 1.
- /// if b < a, then length is 0. 9..10 has length 2.
- size_t length() const;
-
- bool operator==(const Interval& other) const;
-
- size_t hashCode() const;
-
- /// <summary>
- /// Does this start completely before other? Disjoint </summary>
- bool startsBeforeDisjoint(const Interval& other) const;
-
- /// <summary>
- /// Does this start at or before other? Nondisjoint </summary>
- bool startsBeforeNonDisjoint(const Interval& other) const;
-
- /// <summary>
- /// Does this.a start after other.b? May or may not be disjoint </summary>
- bool startsAfter(const Interval& other) const;
-
- /// <summary>
- /// Does this start completely after other? Disjoint </summary>
- bool startsAfterDisjoint(const Interval& other) const;
-
- /// <summary>
- /// Does this start after other? NonDisjoint </summary>
- bool startsAfterNonDisjoint(const Interval& other) const;
-
- /// <summary>
- /// Are both ranges disjoint? I.e., no overlap? </summary>
- bool disjoint(const Interval& other) const;
-
- /// <summary>
- /// Are two intervals adjacent such as 0..41 and 42..42? </summary>
- bool adjacent(const Interval& other) const;
-
- bool properlyContains(const Interval& other) const;
-
- /// <summary>
- /// Return the interval computed from combining this and other </summary>
- Interval Union(const Interval& other) const;
-
- /// <summary>
- /// Return the interval in common between this and o </summary>
- Interval intersection(const Interval& other) const;
-
- std::string toString() const;
-
- private:
-};
-
-} // namespace misc
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/IntervalSet.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/IntervalSet.cpp
deleted file mode 100644
index 3a12e9dbbc..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/IntervalSet.cpp
+++ /dev/null
@@ -1,516 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-#include "Lexer.h"
-#include "Vocabulary.h"
-#include "misc/MurmurHash.h"
-
-#include "misc/IntervalSet.h"
-
-using namespace antlr4;
-using namespace antlr4::misc;
-
-IntervalSet const IntervalSet::COMPLETE_CHAR_SET =
- IntervalSet::of(Lexer::MIN_CHAR_VALUE, Lexer::MAX_CHAR_VALUE);
-
-IntervalSet const IntervalSet::EMPTY_SET;
-
-IntervalSet::IntervalSet() : _intervals() {}
-
-IntervalSet::IntervalSet(const IntervalSet& set) : IntervalSet() {
- _intervals = set._intervals;
-}
-
-IntervalSet::IntervalSet(IntervalSet&& set)
- : IntervalSet(std::move(set._intervals)) {}
-
-IntervalSet::IntervalSet(std::vector<Interval>&& intervals)
- : _intervals(std::move(intervals)) {}
-
-IntervalSet& IntervalSet::operator=(const IntervalSet& other) {
- _intervals = other._intervals;
- return *this;
-}
-
-IntervalSet& IntervalSet::operator=(IntervalSet&& other) {
- _intervals = move(other._intervals);
- return *this;
-}
-
-IntervalSet IntervalSet::of(ssize_t a) { return IntervalSet({Interval(a, a)}); }
-
-IntervalSet IntervalSet::of(ssize_t a, ssize_t b) {
- return IntervalSet({Interval(a, b)});
-}
-
-void IntervalSet::clear() { _intervals.clear(); }
-
-void IntervalSet::add(ssize_t el) { add(el, el); }
-
-void IntervalSet::add(ssize_t a, ssize_t b) { add(Interval(a, b)); }
-
-void IntervalSet::add(const Interval& addition) {
- if (addition.b < addition.a) {
- return;
- }
-
- // find position in list
- for (auto iterator = _intervals.begin(); iterator != _intervals.end();
- ++iterator) {
- Interval r = *iterator;
- if (addition == r) {
- return;
- }
-
- if (addition.adjacent(r) || !addition.disjoint(r)) {
- // next to each other, make a single larger interval
- Interval bigger = addition.Union(r);
- *iterator = bigger;
-
- // make sure we didn't just create an interval that
- // should be merged with next interval in list
- while (iterator + 1 != _intervals.end()) {
- Interval next = *++iterator;
- if (!bigger.adjacent(next) && bigger.disjoint(next)) {
- break;
- }
-
- // if we bump up against or overlap next, merge
- iterator = _intervals.erase(iterator); // remove this one
- --iterator; // move backwards to what we just set
- *iterator = bigger.Union(next); // set to 3 merged ones
- // ml: no need to advance iterator, we do that in the next round anyway.
- // ++iterator; // first call to next after previous duplicates the
- // result
- }
- return;
- }
-
- if (addition.startsBeforeDisjoint(r)) {
- // insert before r
- //--iterator;
- _intervals.insert(iterator, addition);
- return;
- }
-
- // if disjoint and after r, a future iteration will handle it
- }
-
- // ok, must be after last interval (and disjoint from last interval)
- // just add it
- _intervals.push_back(addition);
-}
-
-IntervalSet IntervalSet::Or(const std::vector<IntervalSet>& sets) {
- IntervalSet result;
- for (auto& s : sets) {
- result.addAll(s);
- }
- return result;
-}
-
-IntervalSet& IntervalSet::addAll(const IntervalSet& set) {
- // walk set and add each interval
- for (auto const& interval : set._intervals) {
- add(interval);
- }
- return *this;
-}
-
-IntervalSet IntervalSet::complement(ssize_t minElement,
- ssize_t maxElement) const {
- return complement(IntervalSet::of(minElement, maxElement));
-}
-
-IntervalSet IntervalSet::complement(const IntervalSet& vocabulary) const {
- return vocabulary.subtract(*this);
-}
-
-IntervalSet IntervalSet::subtract(const IntervalSet& other) const {
- return subtract(*this, other);
-}
-
-IntervalSet IntervalSet::subtract(const IntervalSet& left,
- const IntervalSet& right) {
- if (left.isEmpty()) {
- return IntervalSet();
- }
-
- if (right.isEmpty()) {
- // right set has no elements; just return the copy of the current set
- return left;
- }
-
- IntervalSet result(left);
- size_t resultI = 0;
- size_t rightI = 0;
- while (resultI < result._intervals.size() &&
- rightI < right._intervals.size()) {
- Interval& resultInterval = result._intervals[resultI];
- const Interval& rightInterval = right._intervals[rightI];
-
- // operation: (resultInterval - rightInterval) and update indexes
-
- if (rightInterval.b < resultInterval.a) {
- rightI++;
- continue;
- }
-
- if (rightInterval.a > resultInterval.b) {
- resultI++;
- continue;
- }
-
- Interval beforeCurrent;
- Interval afterCurrent;
- if (rightInterval.a > resultInterval.a) {
- beforeCurrent = Interval(resultInterval.a, rightInterval.a - 1);
- }
-
- if (rightInterval.b < resultInterval.b) {
- afterCurrent = Interval(rightInterval.b + 1, resultInterval.b);
- }
-
- if (beforeCurrent.a > -1) { // -1 is the default value
- if (afterCurrent.a > -1) {
- // split the current interval into two
- result._intervals[resultI] = beforeCurrent;
- result._intervals.insert(result._intervals.begin() + resultI + 1,
- afterCurrent);
- resultI++;
- rightI++;
- } else {
- // replace the current interval
- result._intervals[resultI] = beforeCurrent;
- resultI++;
- }
- } else {
- if (afterCurrent.a > -1) {
- // replace the current interval
- result._intervals[resultI] = afterCurrent;
- rightI++;
- } else {
- // remove the current interval (thus no need to increment resultI)
- result._intervals.erase(result._intervals.begin() + resultI);
- }
- }
- }
-
- // If rightI reached right.intervals.size(), no more intervals to subtract
- // from result. If resultI reached result.intervals.size(), we would be
- // subtracting from an empty set. Either way, we are done.
- return result;
-}
-
-IntervalSet IntervalSet::Or(const IntervalSet& a) const {
- IntervalSet result;
- result.addAll(*this);
- result.addAll(a);
- return result;
-}
-
-IntervalSet IntervalSet::And(const IntervalSet& other) const {
- IntervalSet intersection;
- size_t i = 0;
- size_t j = 0;
-
- // iterate down both interval lists looking for nondisjoint intervals
- while (i < _intervals.size() && j < other._intervals.size()) {
- Interval mine = _intervals[i];
- Interval theirs = other._intervals[j];
-
- if (mine.startsBeforeDisjoint(theirs)) {
- // move this iterator looking for interval that might overlap
- i++;
- } else if (theirs.startsBeforeDisjoint(mine)) {
- // move other iterator looking for interval that might overlap
- j++;
- } else if (mine.properlyContains(theirs)) {
- // overlap, add intersection, get next theirs
- intersection.add(mine.intersection(theirs));
- j++;
- } else if (theirs.properlyContains(mine)) {
- // overlap, add intersection, get next mine
- intersection.add(mine.intersection(theirs));
- i++;
- } else if (!mine.disjoint(theirs)) {
- // overlap, add intersection
- intersection.add(mine.intersection(theirs));
-
- // Move the iterator of lower range [a..b], but not
- // the upper range as it may contain elements that will collide
- // with the next iterator. So, if mine=[0..115] and
- // theirs=[115..200], then intersection is 115 and move mine
- // but not theirs as theirs may collide with the next range
- // in thisIter.
- // move both iterators to next ranges
- if (mine.startsAfterNonDisjoint(theirs)) {
- j++;
- } else if (theirs.startsAfterNonDisjoint(mine)) {
- i++;
- }
- }
- }
-
- return intersection;
-}
-
-bool IntervalSet::contains(size_t el) const {
- return contains(symbolToNumeric(el));
-}
-
-bool IntervalSet::contains(ssize_t el) const {
- if (_intervals.empty()) return false;
-
- if (el < _intervals[0]
- .a) // list is sorted and el is before first interval; not here
- return false;
-
- for (auto& interval : _intervals) {
- if (el >= interval.a && el <= interval.b) {
- return true; // found in this interval
- }
- }
- return false;
-}
-
-bool IntervalSet::isEmpty() const { return _intervals.empty(); }
-
-ssize_t IntervalSet::getSingleElement() const {
- if (_intervals.size() == 1) {
- if (_intervals[0].a == _intervals[0].b) {
- return _intervals[0].a;
- }
- }
-
- return Token::INVALID_TYPE; // XXX: this value is 0, but 0 is a valid
- // interval range, how can that work?
-}
-
-ssize_t IntervalSet::getMaxElement() const {
- if (_intervals.empty()) {
- return Token::INVALID_TYPE;
- }
-
- return _intervals.back().b;
-}
-
-ssize_t IntervalSet::getMinElement() const {
- if (_intervals.empty()) {
- return Token::INVALID_TYPE;
- }
-
- return _intervals[0].a;
-}
-
-std::vector<Interval> const& IntervalSet::getIntervals() const {
- return _intervals;
-}
-
-size_t IntervalSet::hashCode() const {
- size_t hash = MurmurHash::initialize();
- for (auto& interval : _intervals) {
- hash = MurmurHash::update(hash, interval.a);
- hash = MurmurHash::update(hash, interval.b);
- }
-
- return MurmurHash::finish(hash, _intervals.size() * 2);
-}
-
-bool IntervalSet::operator==(const IntervalSet& other) const {
- if (_intervals.empty() && other._intervals.empty()) return true;
-
- if (_intervals.size() != other._intervals.size()) return false;
-
- return std::equal(_intervals.begin(), _intervals.end(),
- other._intervals.begin());
-}
-
-std::string IntervalSet::toString() const { return toString(false); }
-
-std::string IntervalSet::toString(bool elemAreChar) const {
- if (_intervals.empty()) {
- return "{}";
- }
-
- std::stringstream ss;
- size_t effectiveSize = size();
- if (effectiveSize > 1) {
- ss << "{";
- }
-
- bool firstEntry = true;
- for (auto& interval : _intervals) {
- if (!firstEntry) ss << ", ";
- firstEntry = false;
-
- ssize_t a = interval.a;
- ssize_t b = interval.b;
- if (a == b) {
- if (a == -1) {
- ss << "<EOF>";
- } else if (elemAreChar) {
- ss << "'" << static_cast<char>(a) << "'";
- } else {
- ss << a;
- }
- } else {
- if (elemAreChar) {
- ss << "'" << static_cast<char>(a) << "'..'" << static_cast<char>(b)
- << "'";
- } else {
- ss << a << ".." << b;
- }
- }
- }
- if (effectiveSize > 1) {
- ss << "}";
- }
-
- return ss.str();
-}
-
-std::string IntervalSet::toString(
- const std::vector<std::string>& tokenNames) const {
- return toString(dfa::Vocabulary::fromTokenNames(tokenNames));
-}
-
-std::string IntervalSet::toString(const dfa::Vocabulary& vocabulary) const {
- if (_intervals.empty()) {
- return "{}";
- }
-
- std::stringstream ss;
- size_t effectiveSize = size();
- if (effectiveSize > 1) {
- ss << "{";
- }
-
- bool firstEntry = true;
- for (auto& interval : _intervals) {
- if (!firstEntry) ss << ", ";
- firstEntry = false;
-
- ssize_t a = interval.a;
- ssize_t b = interval.b;
- if (a == b) {
- ss << elementName(vocabulary, a);
- } else {
- for (ssize_t i = a; i <= b; i++) {
- if (i > a) {
- ss << ", ";
- }
- ss << elementName(vocabulary, i);
- }
- }
- }
- if (effectiveSize > 1) {
- ss << "}";
- }
-
- return ss.str();
-}
-
-std::string IntervalSet::elementName(const std::vector<std::string>& tokenNames,
- ssize_t a) const {
- return elementName(dfa::Vocabulary::fromTokenNames(tokenNames), a);
-}
-
-std::string IntervalSet::elementName(const dfa::Vocabulary& vocabulary,
- ssize_t a) const {
- if (a == -1) {
- return "<EOF>";
- } else if (a == -2) {
- return "<EPSILON>";
- } else {
- return vocabulary.getDisplayName(a);
- }
-}
-
-size_t IntervalSet::size() const {
- size_t result = 0;
- for (auto& interval : _intervals) {
- result += size_t(interval.b - interval.a + 1);
- }
- return result;
-}
-
-std::vector<ssize_t> IntervalSet::toList() const {
- std::vector<ssize_t> result;
- for (auto& interval : _intervals) {
- ssize_t a = interval.a;
- ssize_t b = interval.b;
- for (ssize_t v = a; v <= b; v++) {
- result.push_back(v);
- }
- }
- return result;
-}
-
-std::set<ssize_t> IntervalSet::toSet() const {
- std::set<ssize_t> result;
- for (auto& interval : _intervals) {
- ssize_t a = interval.a;
- ssize_t b = interval.b;
- for (ssize_t v = a; v <= b; v++) {
- result.insert(v);
- }
- }
- return result;
-}
-
-ssize_t IntervalSet::get(size_t i) const {
- size_t index = 0;
- for (auto& interval : _intervals) {
- ssize_t a = interval.a;
- ssize_t b = interval.b;
- for (ssize_t v = a; v <= b; v++) {
- if (index == i) {
- return v;
- }
- index++;
- }
- }
- return -1;
-}
-
-void IntervalSet::remove(size_t el) { remove(symbolToNumeric(el)); }
-
-void IntervalSet::remove(ssize_t el) {
- for (size_t i = 0; i < _intervals.size(); ++i) {
- Interval& interval = _intervals[i];
- ssize_t a = interval.a;
- ssize_t b = interval.b;
- if (el < a) {
- break; // list is sorted and el is before this interval; not here
- }
-
- // if whole interval x..x, rm
- if (el == a && el == b) {
- _intervals.erase(_intervals.begin() + (long)i);
- break;
- }
- // if on left edge x..b, adjust left
- if (el == a) {
- interval.a++;
- break;
- }
- // if on right edge a..x, adjust right
- if (el == b) {
- interval.b--;
- break;
- }
- // if in middle a..x..b, split interval
- if (el > a && el < b) { // found in this interval
- ssize_t oldb = interval.b;
- interval.b = el - 1; // [a..x-1]
- add(el + 1, oldb); // add [x+1..b]
-
- break; // ml: not in the Java code but I believe we also should stop
- // searching here, as we found x.
- }
- }
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/IntervalSet.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/IntervalSet.h
deleted file mode 100644
index 37c9acfa53..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/IntervalSet.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Exceptions.h"
-#include "misc/Interval.h"
-
-namespace antlr4 {
-namespace misc {
-
-/**
- * This class implements the {@link IntSet} backed by a sorted array of
- * non-overlapping intervals. It is particularly efficient for representing
- * large collections of numbers, where the majority of elements appear as part
- * of a sequential range of numbers that are all part of the set. For example,
- * the set { 1, 2, 3, 4, 7, 8 } may be represented as { [1, 4], [7, 8] }.
- *
- * <p>
- * This class is able to represent sets containing any combination of values in
- * the range {@link Integer#MIN_VALUE} to {@link Integer#MAX_VALUE}
- * (inclusive).</p>
- */
-class ANTLR4CPP_PUBLIC IntervalSet {
- public:
- static IntervalSet const COMPLETE_CHAR_SET;
- static IntervalSet const EMPTY_SET;
-
- private:
- /// The list of sorted, disjoint intervals.
- std::vector<Interval> _intervals;
-
- explicit IntervalSet(std::vector<Interval>&& intervals);
-
- public:
- IntervalSet();
- IntervalSet(IntervalSet const& set);
- IntervalSet(IntervalSet&& set);
-
- template <typename T1, typename... T_NEXT>
- IntervalSet(int, T1 t1, T_NEXT&&... next) : IntervalSet() {
- // The first int argument is an ignored count for compatibility
- // with the previous varargs based interface.
- addItems(t1, std::forward<T_NEXT>(next)...);
- }
-
- IntervalSet& operator=(IntervalSet const& set);
- IntervalSet& operator=(IntervalSet&& set);
-
- /// Create a set with a single element, el.
- static IntervalSet of(ssize_t a);
-
- /// Create a set with all ints within range [a..b] (inclusive)
- static IntervalSet of(ssize_t a, ssize_t b);
-
- void clear();
-
- /// Add a single element to the set. An isolated element is stored
- /// as a range el..el.
- void add(ssize_t el);
-
- /// Add interval; i.e., add all integers from a to b to set.
- /// If b<a, do nothing.
- /// Keep list in sorted order (by left range value).
- /// If overlap, combine ranges. For example,
- /// If this is {1..5, 10..20}, adding 6..7 yields
- /// {1..5, 6..7, 10..20}. Adding 4..8 yields {1..8, 10..20}.
- void add(ssize_t a, ssize_t b);
-
- /// combine all sets in the array returned the or'd value
- static IntervalSet Or(const std::vector<IntervalSet>& sets);
-
- // Copy on write so we can cache a..a intervals and sets of that.
- void add(const Interval& addition);
- IntervalSet& addAll(const IntervalSet& set);
-
- template <typename T1, typename... T_NEXT>
- void addItems(T1 t1, T_NEXT&&... next) {
- add(t1);
- addItems(std::forward<T_NEXT>(next)...);
- }
-
- IntervalSet complement(ssize_t minElement, ssize_t maxElement) const;
-
- /// Given the set of possible values (rather than, say UNICODE or MAXINT),
- /// return a new set containing all elements in vocabulary, but not in
- /// this. The computation is (vocabulary - this).
- ///
- /// 'this' is assumed to be either a subset or equal to vocabulary.
- IntervalSet complement(const IntervalSet& vocabulary) const;
-
- /// Compute this-other via this&~other.
- /// Return a new set containing all elements in this but not in other.
- /// other is assumed to be a subset of this;
- /// anything that is in other but not in this will be ignored.
- IntervalSet subtract(const IntervalSet& other) const;
-
- /**
- * Compute the set difference between two interval sets. The specific
- * operation is {@code left - right}. If either of the input sets is
- * {@code null}, it is treated as though it was an empty set.
- */
- static IntervalSet subtract(const IntervalSet& left,
- const IntervalSet& right);
-
- IntervalSet Or(const IntervalSet& a) const;
-
- /// Return a new set with the intersection of this set with other. Because
- /// the intervals are sorted, we can use an iterator for each list and
- /// just walk them together. This is roughly O(min(n,m)) for interval
- /// list lengths n and m.
- IntervalSet And(const IntervalSet& other) const;
-
- /// Is el in any range of this set?
- bool contains(size_t el) const; // For mapping of e.g. Token::EOF to -1 etc.
- bool contains(ssize_t el) const;
-
- /// return true if this set has no members
- bool isEmpty() const;
-
- /// If this set is a single integer, return it otherwise Token.INVALID_TYPE.
- ssize_t getSingleElement() const;
-
- /**
- * Returns the maximum value contained in the set.
- *
- * @return the maximum value contained in the set. If the set is empty, this
- * method returns {@link Token#INVALID_TYPE}.
- */
- ssize_t getMaxElement() const;
-
- /**
- * Returns the minimum value contained in the set.
- *
- * @return the minimum value contained in the set. If the set is empty, this
- * method returns {@link Token#INVALID_TYPE}.
- */
- ssize_t getMinElement() const;
-
- /// <summary>
- /// Return a list of Interval objects. </summary>
- std::vector<Interval> const& getIntervals() const;
-
- size_t hashCode() const;
-
- /// Are two IntervalSets equal? Because all intervals are sorted
- /// and disjoint, equals is a simple linear walk over both lists
- /// to make sure they are the same.
- bool operator==(const IntervalSet& other) const;
- std::string toString() const;
- std::string toString(bool elemAreChar) const;
-
- /**
- * @deprecated Use {@link #toString(Vocabulary)} instead.
- */
- std::string toString(const std::vector<std::string>& tokenNames) const;
- std::string toString(const dfa::Vocabulary& vocabulary) const;
-
- protected:
- /**
- * @deprecated Use {@link #elementName(Vocabulary, int)} instead.
- */
- std::string elementName(const std::vector<std::string>& tokenNames,
- ssize_t a) const;
- std::string elementName(const dfa::Vocabulary& vocabulary, ssize_t a) const;
-
- public:
- size_t size() const;
- std::vector<ssize_t> toList() const;
- std::set<ssize_t> toSet() const;
-
- /// Get the ith element of ordered set. Used only by RandomPhrase so
- /// don't bother to implement if you're not doing that for a new
- /// ANTLR code gen target.
- ssize_t get(size_t i) const;
- void remove(size_t el); // For mapping of e.g. Token::EOF to -1 etc.
- void remove(ssize_t el);
-
- private:
- void addItems() { /* No-op */
- }
-};
-
-} // namespace misc
-} // namespace antlr4
-
-// Hash function for IntervalSet.
-
-namespace std {
-using antlr4::misc::IntervalSet;
-
-template <>
-struct hash<IntervalSet> {
- size_t operator()(const IntervalSet& x) const { return x.hashCode(); }
-};
-} // namespace std
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/MurmurHash.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/MurmurHash.cpp
deleted file mode 100644
index d073079c5e..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/MurmurHash.cpp
+++ /dev/null
@@ -1,127 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/MurmurHash.h"
-
-using namespace antlr4::misc;
-
-// A variation of the MurmurHash3 implementation
-// (https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp) Here
-// we unrolled the loop used there into individual calls to update(), as we
-// usually hash object fields instead of entire buffers.
-
-// Platform-specific functions and macros
-
-// Microsoft Visual Studio
-
-#if defined(_MSC_VER)
-
-#define FORCE_INLINE __forceinline
-
-#include <stdlib.h>
-
-#define ROTL32(x, y) _rotl(x, y)
-#define ROTL64(x, y) _rotl64(x, y)
-
-#define BIG_CONSTANT(x) (x)
-
-#else // defined(_MSC_VER)
-
-// Other compilers
-
-#define FORCE_INLINE inline __attribute__((always_inline))
-
-inline uint32_t rotl32(uint32_t x, int8_t r) {
- return (x << r) | (x >> (32 - r));
-}
-
-inline uint64_t rotl64(uint64_t x, int8_t r) {
- return (x << r) | (x >> (64 - r));
-}
-
-#define ROTL32(x, y) rotl32(x, y)
-#define ROTL64(x, y) rotl64(x, y)
-
-#define BIG_CONSTANT(x) (x##LLU)
-
-#endif // !defined(_MSC_VER)
-
-size_t MurmurHash::initialize() { return initialize(DEFAULT_SEED); }
-
-size_t MurmurHash::initialize(size_t seed) { return seed; }
-
-#if defined(_WIN32) || defined(_WIN64)
-#if _WIN64
-#define ENVIRONMENT64
-#else
-#define ENVIRONMENT32
-#endif
-#endif
-
-#if defined(__GNUC__)
-#if defined(__x86_64__) || defined(__ppc64__)
-#define ENVIRONMENT64
-#else
-#define ENVIRONMENT32
-#endif
-#endif
-
-#if defined(ENVIRONMENT32)
-
-size_t MurmurHash::update(size_t hash, size_t value) {
- static const size_t c1 = 0xCC9E2D51;
- static const size_t c2 = 0x1B873593;
-
- size_t k1 = value;
- k1 *= c1;
- k1 = ROTL32(k1, 15);
- k1 *= c2;
-
- hash ^= k1;
- hash = ROTL32(hash, 13);
- hash = hash * 5 + 0xE6546B64;
-
- return hash;
-}
-
-size_t MurmurHash::finish(size_t hash, size_t entryCount) {
- hash ^= entryCount * 4;
- hash ^= hash >> 16;
- hash *= 0x85EBCA6B;
- hash ^= hash >> 13;
- hash *= 0xC2B2AE35;
- hash ^= hash >> 16;
- return hash;
-}
-
-#else
-
-size_t MurmurHash::update(size_t hash, size_t value) {
- static const size_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
- static const size_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
-
- size_t k1 = value;
- k1 *= c1;
- k1 = ROTL64(k1, 31);
- k1 *= c2;
-
- hash ^= k1;
- hash = ROTL64(hash, 27);
- hash = hash * 5 + 0x52dce729;
-
- return hash;
-}
-
-size_t MurmurHash::finish(size_t hash, size_t entryCount) {
- hash ^= entryCount * 8;
- hash ^= hash >> 33;
- hash *= 0xff51afd7ed558ccd;
- hash ^= hash >> 33;
- hash *= 0xc4ceb9fe1a85ec53;
- hash ^= hash >> 33;
- return hash;
-}
-
-#endif
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/MurmurHash.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/MurmurHash.h
deleted file mode 100644
index fe7fe55131..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/MurmurHash.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace misc {
-
-class ANTLR4CPP_PUBLIC MurmurHash {
- private:
- static const size_t DEFAULT_SEED = 0;
-
- /// Initialize the hash using the default seed value.
- /// Returns the intermediate hash value.
- public:
- static size_t initialize();
-
- /// Initialize the hash using the specified seed.
- static size_t initialize(size_t seed);
-
- /// Update the intermediate hash value for the next input {@code value}.
- /// <param name="hash"> the intermediate hash value </param>
- /// <param name="value"> the value to add to the current hash </param>
- /// Returns the updated intermediate hash value.
- static size_t update(size_t hash, size_t value);
-
- /**
- * Update the intermediate hash value for the next input {@code value}.
- *
- * @param hash the intermediate hash value
- * @param value the value to add to the current hash
- * @return the updated intermediate hash value
- */
- template <class T>
- static size_t update(size_t hash, Ref<T> const& value) {
- return update(hash, value != nullptr ? value->hashCode() : 0);
- }
-
- template <class T>
- static size_t update(size_t hash, T* value) {
- return update(hash, value != nullptr ? value->hashCode() : 0);
- }
-
- /// <summary>
- /// Apply the final computation steps to the intermediate value {@code hash}
- /// to form the final result of the MurmurHash 3 hash function.
- /// </summary>
- /// <param name="hash"> the intermediate hash value </param>
- /// <param name="entryCount"> the number of calls to update() before calling
- /// finish() </param> <returns> the final hash result </returns>
- static size_t finish(size_t hash, size_t entryCount);
-
- /// Utility function to compute the hash code of an array using the
- /// MurmurHash3 algorithm.
- ///
- /// @param <T> the array element type </param>
- /// <param name="data"> the array data </param>
- /// <param name="seed"> the seed for the MurmurHash algorithm </param>
- /// <returns> the hash code of the data </returns>
- template <typename T> // where T is C array type
- static size_t hashCode(const std::vector<Ref<T>>& data, size_t seed) {
- size_t hash = initialize(seed);
- for (auto entry : data) {
- hash = update(hash, entry->hashCode());
- }
-
- return finish(hash, data.size());
- }
-};
-
-} // namespace misc
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/Predicate.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/Predicate.cpp
deleted file mode 100644
index 8002562743..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/Predicate.cpp
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "misc/Predicate.h"
-
-antlr4::misc::Predicate::~Predicate() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/Predicate.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/Predicate.h
deleted file mode 100644
index b577a61cfa..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/misc/Predicate.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace misc {
-
-class ANTLR4CPP_PUBLIC Predicate {
- public:
- virtual ~Predicate();
-
- virtual bool test(tree::ParseTree* t) = 0;
-};
-
-} // namespace misc
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Any.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Any.cpp
deleted file mode 100644
index 23129ab44c..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Any.cpp
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Any.h"
-
-using namespace antlrcpp;
-
-Any::~Any() { delete _ptr; }
-
-Any::Base::~Base() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Any.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Any.h
deleted file mode 100644
index 874048fef0..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Any.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-// A standard C++ class loosely modeled after boost::Any.
-
-#pragma once
-
-#include "antlr4-common.h"
-
-#ifdef _MSC_VER
-#pragma warning(push)
-#pragma warning( \
- disable : 4521) // 'antlrcpp::Any': multiple copy constructors specified
-#endif
-
-namespace antlrcpp {
-
-template <class T>
-using StorageType = typename std::decay<T>::type;
-
-struct ANTLR4CPP_PUBLIC Any {
- bool isNull() const { return _ptr == nullptr; }
- bool isNotNull() const { return _ptr != nullptr; }
-
- Any() : _ptr(nullptr) {}
-
- Any(Any& that) : _ptr(that.clone()) {}
-
- Any(Any&& that) : _ptr(that._ptr) { that._ptr = nullptr; }
-
- Any(const Any& that) : _ptr(that.clone()) {}
-
- Any(const Any&& that) : _ptr(that.clone()) {}
-
- template <typename U>
- Any(U&& value) : _ptr(new Derived<StorageType<U>>(std::forward<U>(value))) {}
-
- template <class U>
- bool is() const {
- typedef StorageType<U> T;
-
- auto derived = dynamic_cast<Derived<T>*>(_ptr);
-
- return derived != nullptr;
- }
-
- template <class U>
- StorageType<U>& as() {
- typedef StorageType<U> T;
-
- auto derived = dynamic_cast<Derived<T>*>(_ptr);
-
- if (!derived) throw std::bad_cast();
-
- return derived->value;
- }
-
- template <class U>
- operator U() {
- return as<StorageType<U>>();
- }
-
- Any& operator=(const Any& a) {
- if (_ptr == a._ptr) return *this;
-
- auto old_ptr = _ptr;
- _ptr = a.clone();
-
- if (old_ptr) delete old_ptr;
-
- return *this;
- }
-
- Any& operator=(Any&& a) {
- if (_ptr == a._ptr) return *this;
-
- std::swap(_ptr, a._ptr);
-
- return *this;
- }
-
- virtual ~Any();
-
- virtual bool equals(Any other) const { return _ptr == other._ptr; }
-
- private:
- struct Base {
- virtual ~Base();
- virtual Base* clone() const = 0;
- };
-
- template <typename T>
- struct Derived : Base {
- template <typename U>
- Derived(U&& value_) : value(std::forward<U>(value_)) {}
-
- T value;
-
- Base* clone() const { return new Derived<T>(value); }
- };
-
- Base* clone() const {
- if (_ptr)
- return _ptr->clone();
- else
- return nullptr;
- }
-
- Base* _ptr;
-};
-
-template <>
-inline Any::Any(std::nullptr_t&&) : _ptr(nullptr) {}
-
-} // namespace antlrcpp
-
-#ifdef _MSC_VER
-#pragma warning(pop)
-#endif
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Arrays.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Arrays.cpp
deleted file mode 100644
index 426d3fca44..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Arrays.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-#include "tree/ParseTree.h"
-
-#include "support/Arrays.h"
-
-using namespace antlrcpp;
-
-std::string Arrays::listToString(const std::vector<std::string>& list,
- const std::string& separator) {
- std::stringstream ss;
- bool firstEntry = true;
-
- ss << '[';
- for (auto& entry : list) {
- ss << entry;
- if (firstEntry) {
- ss << separator;
- firstEntry = false;
- }
- }
-
- ss << ']';
- return ss.str();
-}
-
-template <>
-std::string Arrays::toString(
- const std::vector<antlr4::tree::ParseTree*>& source) {
- std::string result = "[";
- bool firstEntry = true;
- for (auto value : source) {
- result += value->toStringTree();
- if (firstEntry) {
- result += ", ";
- firstEntry = false;
- }
- }
- return result + "]";
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Arrays.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Arrays.h
deleted file mode 100644
index ed0cc21323..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Arrays.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlrcpp {
-
-class ANTLR4CPP_PUBLIC Arrays {
- public:
- static std::string listToString(const std::vector<std::string>& list,
- const std::string& separator);
-
- template <typename T>
- static bool equals(const std::vector<T>& a, const std::vector<T>& b) {
- if (a.size() != b.size()) return false;
-
- for (size_t i = 0; i < a.size(); ++i)
- if (!(a[i] == b[i])) return false;
-
- return true;
- }
-
- template <typename T>
- static bool equals(const std::vector<T*>& a, const std::vector<T*>& b) {
- if (a.size() != b.size()) return false;
-
- for (size_t i = 0; i < a.size(); ++i) {
- if (a[i] == b[i]) continue;
- if (!(*a[i] == *b[i])) return false;
- }
-
- return true;
- }
-
- template <typename T>
- static bool equals(const std::vector<Ref<T>>& a,
- const std::vector<Ref<T>>& b) {
- if (a.size() != b.size()) return false;
-
- for (size_t i = 0; i < a.size(); ++i) {
- if (!a[i] && !b[i]) continue;
- if (!a[i] || !b[i]) return false;
- if (a[i] == b[i]) continue;
-
- if (!(*a[i] == *b[i])) return false;
- }
-
- return true;
- }
-
- template <typename T>
- static std::string toString(const std::vector<T>& source) {
- std::string result = "[";
- bool firstEntry = true;
- for (auto& value : source) {
- result += value.toString();
- if (firstEntry) {
- result += ", ";
- firstEntry = false;
- }
- }
- return result + "]";
- }
-
- template <typename T>
- static std::string toString(const std::vector<Ref<T>>& source) {
- std::string result = "[";
- bool firstEntry = true;
- for (auto& value : source) {
- result += value->toString();
- if (firstEntry) {
- result += ", ";
- firstEntry = false;
- }
- }
- return result + "]";
- }
-
- template <typename T>
- static std::string toString(const std::vector<T*>& source) {
- std::string result = "[";
- bool firstEntry = true;
- for (auto value : source) {
- result += value->toString();
- if (firstEntry) {
- result += ", ";
- firstEntry = false;
- }
- }
- return result + "]";
- }
-};
-
-template <>
-std::string Arrays::toString(
- const std::vector<antlr4::tree::ParseTree*>& source);
-} // namespace antlrcpp
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/BitSet.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/BitSet.h
deleted file mode 100644
index dad5e8a3ae..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/BitSet.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlrcpp {
-
-class ANTLR4CPP_PUBLIC BitSet : public std::bitset<1024> {
- public:
- size_t nextSetBit(size_t pos) const {
- for (size_t i = pos; i < size(); i++) {
- if (test(i)) {
- return i;
- }
- }
-
- return INVALID_INDEX;
- }
-
- // Prints a list of every index for which the bitset contains a bit in true.
- friend std::wostream& operator<<(std::wostream& os, const BitSet& obj) {
- os << "{";
- size_t total = obj.count();
- for (size_t i = 0; i < obj.size(); i++) {
- if (obj.test(i)) {
- os << i;
- --total;
- if (total > 1) {
- os << ", ";
- }
- }
- }
-
- os << "}";
- return os;
- }
-
- static std::string subStringRepresentation(
- const std::vector<BitSet>::iterator& begin,
- const std::vector<BitSet>::iterator& end) {
- std::string result;
- std::vector<BitSet>::iterator vectorIterator;
-
- for (vectorIterator = begin; vectorIterator != end; vectorIterator++) {
- result += vectorIterator->toString();
- }
- // Grab the end
- result += end->toString();
-
- return result;
- }
-
- std::string toString() {
- std::stringstream stream;
- stream << "{";
- bool valueAdded = false;
- for (size_t i = 0; i < size(); ++i) {
- if (test(i)) {
- if (valueAdded) {
- stream << ", ";
- }
- stream << i;
- valueAdded = true;
- }
- }
-
- stream << "}";
- return stream.str();
- }
-};
-} // namespace antlrcpp
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/CPPUtils.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/CPPUtils.cpp
deleted file mode 100644
index 87c82a3b81..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/CPPUtils.cpp
+++ /dev/null
@@ -1,237 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "support/CPPUtils.h"
-
-namespace antlrcpp {
-
-std::string join(std::vector<std::string> strings,
- const std::string& separator) {
- std::string str;
- bool firstItem = true;
- for (std::string s : strings) {
- if (!firstItem) {
- str.append(separator);
- }
- firstItem = false;
- str.append(s);
- }
- return str;
-}
-
-std::map<std::string, size_t> toMap(const std::vector<std::string>& keys) {
- std::map<std::string, size_t> result;
- for (size_t i = 0; i < keys.size(); ++i) {
- result.insert({keys[i], i});
- }
- return result;
-}
-
-std::string escapeWhitespace(std::string str, bool escapeSpaces) {
- std::string result;
- for (auto c : str) {
- switch (c) {
- case '\n':
- result += "\\n";
- break;
-
- case '\r':
- result += "\\r";
- break;
-
- case '\t':
- result += "\\t";
- break;
-
- case ' ':
- if (escapeSpaces) {
- result += "·";
- break;
- }
- result += c;
- break;
-
- default:
- result += c;
- }
- }
-
- return result;
-}
-
-std::string toHexString(const int t) {
- std::stringstream stream;
- stream << std::uppercase << std::hex << t;
- return stream.str();
-}
-
-std::string arrayToString(const std::vector<std::string>& data) {
- std::string answer;
- for (auto sub : data) {
- answer += sub;
- }
- return answer;
-}
-
-std::string replaceString(const std::string& s, const std::string& from,
- const std::string& to) {
- std::string::size_type p;
- std::string ss, res;
-
- ss = s;
- p = ss.find(from);
- while (p != std::string::npos) {
- if (p > 0)
- res.append(ss.substr(0, p)).append(to);
- else
- res.append(to);
- ss = ss.substr(p + from.size());
- p = ss.find(from);
- }
- res.append(ss);
-
- return res;
-}
-
-std::vector<std::string> split(const std::string& s, const std::string& sep,
- int count) {
- std::vector<std::string> parts;
- std::string ss = s;
-
- std::string::size_type p;
-
- if (s.empty()) return parts;
-
- if (count == 0) count = -1;
-
- p = ss.find(sep);
- while (!ss.empty() && p != std::string::npos && (count < 0 || count > 0)) {
- parts.push_back(ss.substr(0, p));
- ss = ss.substr(p + sep.size());
-
- --count;
- p = ss.find(sep);
- }
- parts.push_back(ss);
-
- return parts;
-}
-
-//--------------------------------------------------------------------------------------------------
-
-// Debugging helper. Adds indentation to all lines in the given string.
-std::string indent(const std::string& s, const std::string& indentation,
- bool includingFirst) {
- std::vector<std::string> parts = split(s, "\n", -1);
- for (size_t i = 0; i < parts.size(); ++i) {
- if (i == 0 && !includingFirst) continue;
- parts[i].insert(0, indentation);
- }
-
- return join(parts, "\n");
-}
-
-//--------------------------------------------------------------------------------------------------
-
-// Recursively get the error from a, possibly nested, exception.
-#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023026
-// No nested exceptions before VS 2015.
-template <typename T>
-std::exception_ptr get_nested(const T& /*e*/) {
- try {
- return nullptr;
- } catch (const std::bad_cast&) {
- return nullptr;
- }
-}
-#else
-template <typename T>
-std::exception_ptr get_nested(const T& e) {
- try {
- auto nested = dynamic_cast<const std::nested_exception&>(e);
- return nested.nested_ptr();
- } catch (const std::bad_cast&) {
- return nullptr;
- }
-}
-#endif
-
-std::string what(std::exception_ptr eptr) {
- if (!eptr) {
- throw std::bad_exception();
- }
-
- std::string result;
- std::size_t nestCount = 0;
-
-next : {
- try {
- std::exception_ptr yeptr;
- std::swap(eptr, yeptr);
- std::rethrow_exception(yeptr);
- } catch (const std::exception& e) {
- result += e.what();
- eptr = get_nested(e);
- } catch (const std::string& e) {
- result += e;
- } catch (const char* e) {
- result += e;
- } catch (...) {
- result += "cannot be determined";
- }
-
- if (eptr) {
- result += " (";
- ++nestCount;
- goto next;
- }
-}
-
- result += std::string(nestCount, ')');
- return result;
-}
-
-//----------------- FinallyAction
-//------------------------------------------------------------------------------------
-
-FinalAction finally(std::function<void()> f) { return FinalAction(f); }
-
-//----------------- SingleWriteMultipleRead
-//--------------------------------------------------------------------------
-
-void SingleWriteMultipleReadLock::readLock() {
- std::unique_lock<std::mutex> lock(_mutex);
- while (_waitingWriters != 0) _readerGate.wait(lock);
- ++_activeReaders;
- lock.unlock();
-}
-
-void SingleWriteMultipleReadLock::readUnlock() {
- std::unique_lock<std::mutex> lock(_mutex);
- --_activeReaders;
- lock.unlock();
- _writerGate.notify_one();
-}
-
-void SingleWriteMultipleReadLock::writeLock() {
- std::unique_lock<std::mutex> lock(_mutex);
- ++_waitingWriters;
- while (_activeReaders != 0 || _activeWriters != 0) _writerGate.wait(lock);
- ++_activeWriters;
- lock.unlock();
-}
-
-void SingleWriteMultipleReadLock::writeUnlock() {
- std::unique_lock<std::mutex> lock(_mutex);
- --_waitingWriters;
- --_activeWriters;
- if (_waitingWriters > 0)
- _writerGate.notify_one();
- else
- _readerGate.notify_all();
- lock.unlock();
-}
-
-} // namespace antlrcpp
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/CPPUtils.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/CPPUtils.h
deleted file mode 100644
index 0320baf6ba..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/CPPUtils.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlrcpp {
-
-std::string join(std::vector<std::string> strings,
- const std::string& separator);
-std::map<std::string, size_t> toMap(const std::vector<std::string>& keys);
-std::string escapeWhitespace(std::string str, bool escapeSpaces);
-std::string toHexString(const int t);
-std::string arrayToString(const std::vector<std::string>& data);
-std::string replaceString(const std::string& s, const std::string& from,
- const std::string& to);
-std::vector<std::string> split(const std::string& s, const std::string& sep,
- int count);
-std::string indent(const std::string& s, const std::string& indentation,
- bool includingFirst = true);
-
-// Using RAII + a lambda to implement a "finally" replacement.
-struct FinalAction {
- FinalAction(std::function<void()> f) : _cleanUp{f} {}
- FinalAction(FinalAction&& other)
- : _cleanUp(std::move(other._cleanUp)), _enabled(other._enabled) {
- other._enabled =
- false; // Don't trigger the lambda after ownership has moved.
- }
- ~FinalAction() {
- if (_enabled) _cleanUp();
- }
-
- void disable() { _enabled = false; }
-
- private:
- std::function<void()> _cleanUp;
- bool _enabled{true};
-};
-
-ANTLR4CPP_PUBLIC FinalAction finally(std::function<void()> f);
-
-// Convenience functions to avoid lengthy dynamic_cast() != nullptr checks in
-// many places.
-template <typename T1, typename T2>
-inline bool is(T2* obj) { // For pointer types.
- return dynamic_cast<typename std::add_const<T1>::type>(obj) != nullptr;
-}
-
-template <typename T1, typename T2>
-inline bool is(Ref<T2> const& obj) { // For shared pointers.
- return dynamic_cast<T1*>(obj.get()) != nullptr;
-}
-
-template <typename T>
-std::string toString(const T& o) {
- std::stringstream ss;
- // typeid gives the mangled class name, but that's all what's possible
- // in a portable way.
- ss << typeid(o).name() << "@" << std::hex << reinterpret_cast<uintptr_t>(&o);
- return ss.str();
-}
-
-// Get the error text from an exception pointer or the current exception.
-std::string what(std::exception_ptr eptr = std::current_exception());
-
-class SingleWriteMultipleReadLock {
- public:
- void readLock();
- void readUnlock();
- void writeLock();
- void writeUnlock();
-
- private:
- std::condition_variable _readerGate;
- std::condition_variable _writerGate;
-
- std::mutex _mutex;
- size_t _activeReaders = 0;
- size_t _waitingWriters = 0;
- size_t _activeWriters = 0;
-};
-
-} // namespace antlrcpp
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Declarations.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Declarations.h
deleted file mode 100644
index c5269cc1ba..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/Declarations.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-namespace antlr4 {
-class ANTLRErrorListener;
-class ANTLRErrorStrategy;
-class ANTLRFileStream;
-class ANTLRInputStream;
-class BailErrorStrategy;
-class BaseErrorListener;
-class BufferedTokenStream;
-class CharStream;
-class CommonToken;
-class CommonTokenFactory;
-class CommonTokenStream;
-class ConsoleErrorListener;
-class DefaultErrorStrategy;
-class DiagnosticErrorListener;
-class EmptyStackException;
-class FailedPredicateException;
-class IllegalArgumentException;
-class IllegalStateException;
-class InputMismatchException;
-class IntStream;
-class InterpreterRuleContext;
-class Lexer;
-class LexerInterpreter;
-class LexerNoViableAltException;
-class ListTokenSource;
-class NoSuchElementException;
-class NoViableAltException;
-class NullPointerException;
-class ParseCancellationException;
-class Parser;
-class ParserInterpreter;
-class ParserRuleContext;
-class ProxyErrorListener;
-class RecognitionException;
-class Recognizer;
-class RuleContext;
-class Token;
-template <typename Symbol>
-class TokenFactory;
-class TokenSource;
-class TokenStream;
-class TokenStreamRewriter;
-class UnbufferedCharStream;
-class UnbufferedTokenStream;
-class WritableToken;
-
-namespace misc {
-class InterpreterDataReader;
-class Interval;
-class IntervalSet;
-class MurmurHash;
-class Utils;
-class Predicate;
-} // namespace misc
-namespace atn {
-class ATN;
-class ATNConfig;
-class ATNConfigSet;
-class ATNDeserializationOptions;
-class ATNDeserializer;
-class ATNSerializer;
-class ATNSimulator;
-class ATNState;
-enum class ATNType;
-class AbstractPredicateTransition;
-class ActionTransition;
-class ArrayPredictionContext;
-class AtomTransition;
-class BasicBlockStartState;
-class BasicState;
-class BlockEndState;
-class BlockStartState;
-class DecisionState;
-class EmptyPredictionContext;
-class EpsilonTransition;
-class LL1Analyzer;
-class LexerAction;
-class LexerActionExecutor;
-class LexerATNConfig;
-class LexerATNSimulator;
-class LexerMoreAction;
-class LexerPopModeAction;
-class LexerSkipAction;
-class LookaheadEventInfo;
-class LoopEndState;
-class NotSetTransition;
-class OrderedATNConfigSet;
-class ParseInfo;
-class ParserATNSimulator;
-class PlusBlockStartState;
-class PlusLoopbackState;
-class PrecedencePredicateTransition;
-class PredicateTransition;
-class PredictionContext;
-enum class PredictionMode;
-class PredictionModeClass;
-class RangeTransition;
-class RuleStartState;
-class RuleStopState;
-class RuleTransition;
-class SemanticContext;
-class SetTransition;
-class SingletonPredictionContext;
-class StarBlockStartState;
-class StarLoopEntryState;
-class StarLoopbackState;
-class TokensStartState;
-class Transition;
-class WildcardTransition;
-} // namespace atn
-namespace dfa {
-class DFA;
-class DFASerializer;
-class DFAState;
-class LexerDFASerializer;
-class Vocabulary;
-} // namespace dfa
-namespace tree {
-class AbstractParseTreeVisitor;
-class ErrorNode;
-class ErrorNodeImpl;
-class ParseTree;
-class ParseTreeListener;
-template <typename T>
-class ParseTreeProperty;
-class ParseTreeVisitor;
-class ParseTreeWalker;
-class SyntaxTree;
-class TerminalNode;
-class TerminalNodeImpl;
-class Tree;
-class Trees;
-
-namespace pattern {
-class Chunk;
-class ParseTreeMatch;
-class ParseTreePattern;
-class ParseTreePatternMatcher;
-class RuleTagToken;
-class TagChunk;
-class TextChunk;
-class TokenTagToken;
-} // namespace pattern
-
-namespace xpath {
-class XPath;
-class XPathElement;
-class XPathLexerErrorListener;
-class XPathRuleAnywhereElement;
-class XPathRuleElement;
-class XPathTokenAnywhereElement;
-class XPathTokenElement;
-class XPathWildcardAnywhereElement;
-class XPathWildcardElement;
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/StringUtils.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/StringUtils.cpp
deleted file mode 100644
index f55c8836d2..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/StringUtils.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "support/StringUtils.h"
-
-namespace antlrcpp {
-
-void replaceAll(std::string& str, std::string const& from,
- std::string const& to) {
- if (from.empty()) return;
-
- size_t start_pos = 0;
- while ((start_pos = str.find(from, start_pos)) != std::string::npos) {
- str.replace(start_pos, from.length(), to);
- start_pos += to.length(); // In case 'to' contains 'from', like replacing
- // 'x' with 'yx'.
- }
-}
-
-std::string ws2s(std::wstring const& wstr) {
- // This is a shim-implementation breaking non-ASCII characters.
- std::string s;
- for (wchar_t c : wstr) s.push_back(static_cast<char>(c));
- return s;
-}
-
-std::wstring s2ws(const std::string& str) {
- // This is a shim-implementation breaking non-ASCII characters.
- std::wstring s;
- for (char c : str) s.push_back(c);
- return s;
-}
-
-} // namespace antlrcpp
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/StringUtils.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/StringUtils.h
deleted file mode 100644
index bd0cf00027..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/StringUtils.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlrcpp {
-
-template <class T>
-inline std::string utf32_to_utf8(const T& data) {
- // This is a shim-implementation breaking non-ASCII characters.
- std::string s;
- for (auto c : data) s.push_back(static_cast<char>(c));
- return s;
-}
-
-inline UTF32String utf8_to_utf32(const char* first, const char* last) {
- // This is a shim-implementation breaking non-ASCII characters.
- UTF32String s;
- while (first != last) {
- s.push_back(*(first++));
- }
- return s;
-}
-
-void replaceAll(std::string& str, std::string const& from,
- std::string const& to);
-
-// string <-> wstring conversion (UTF-16), e.g. for use with Window's wide APIs.
-ANTLR4CPP_PUBLIC std::string ws2s(std::wstring const& wstr);
-ANTLR4CPP_PUBLIC std::wstring s2ws(std::string const& str);
-} // namespace antlrcpp
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/guid.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/guid.cpp
deleted file mode 100644
index 523ac43fb5..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/guid.cpp
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- The MIT License (MIT)
-
- Copyright (c) 2014 Graeme Hill (http://graemehill.ca)
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- THE SOFTWARE.
- */
-
-#include "guid.h"
-
-#ifdef GUID_LIBUUID
-#include <uuid/uuid.h>
-#endif
-
-#ifdef GUID_CFUUID
-#include <CoreFoundation/CFUUID.h>
-#endif
-
-#ifdef GUID_WINDOWS
-#include <objbase.h>
-#endif
-
-#ifdef GUID_ANDROID
-#include <jni.h>
-#endif
-
-using namespace std;
-
-// overload << so that it's easy to convert to a string
-ostream& operator<<(ostream& s, const Guid& guid) {
- return s << hex << setfill('0') << setw(2) << (int)guid._bytes[0] << setw(2)
- << (int)guid._bytes[1] << setw(2) << (int)guid._bytes[2] << setw(2)
- << (int)guid._bytes[3] << "-" << setw(2) << (int)guid._bytes[4]
- << setw(2) << (int)guid._bytes[5] << "-" << setw(2)
- << (int)guid._bytes[6] << setw(2) << (int)guid._bytes[7] << "-"
- << setw(2) << (int)guid._bytes[8] << setw(2) << (int)guid._bytes[9]
- << "-" << setw(2) << (int)guid._bytes[10] << setw(2)
- << (int)guid._bytes[11] << setw(2) << (int)guid._bytes[12] << setw(2)
- << (int)guid._bytes[13] << setw(2) << (int)guid._bytes[14] << setw(2)
- << (int)guid._bytes[15];
-}
-
-// create a guid from vector of bytes
-Guid::Guid(const vector<unsigned char>& bytes) { _bytes = bytes; }
-
-// create a guid from array of bytes
-Guid::Guid(const unsigned char* bytes) { _bytes.assign(bytes, bytes + 16); }
-
-// create a guid from array of words
-Guid::Guid(const uint16_t* bytes, bool reverse) {
- if (reverse) {
- for (size_t i = 8; i > 0; --i) {
- _bytes.push_back(bytes[i - 1] >> 8);
- _bytes.push_back(bytes[i - 1] & 0xFF);
- }
- } else {
- for (size_t i = 0; i < 8; ++i) {
- _bytes.push_back(bytes[i] & 0xFF);
- _bytes.push_back(bytes[i] >> 8);
- }
- }
-}
-
-// converts a single hex char to a number (0 - 15)
-static unsigned char hexDigitToChar(char ch) {
- if (ch > 47 && ch < 58) return (unsigned char)(ch - 48);
-
- if (ch > 96 && ch < 103) return (unsigned char)(ch - 87);
-
- if (ch > 64 && ch < 71) return (unsigned char)(ch - 55);
-
- return 0;
-}
-
-// converts the two hexadecimal characters to an unsigned char (a byte)
-static unsigned char hexPairToChar(char a, char b) {
- return hexDigitToChar(a) * 16 + hexDigitToChar(b);
-}
-
-// create a guid from string
-Guid::Guid(const string& fromString) {
- _bytes.clear();
-
- char charOne = 0, charTwo;
- bool lookingForFirstChar = true;
-
- for (const char& ch : fromString) {
- if (ch == '-') continue;
-
- if (lookingForFirstChar) {
- charOne = ch;
- lookingForFirstChar = false;
- } else {
- charTwo = ch;
- auto byte = hexPairToChar(charOne, charTwo);
- _bytes.push_back(byte);
- lookingForFirstChar = true;
- }
- }
-}
-
-// create empty guid
-Guid::Guid() { _bytes = vector<unsigned char>(16, 0); }
-
-// copy constructor
-Guid::Guid(const Guid& other) { _bytes = other._bytes; }
-
-// overload assignment operator
-Guid& Guid::operator=(const Guid& other) {
- _bytes = other._bytes;
- return *this;
-}
-
-// overload equality operator
-bool Guid::operator==(const Guid& other) const {
- return _bytes == other._bytes;
-}
-
-// overload inequality operator
-bool Guid::operator!=(const Guid& other) const { return !((*this) == other); }
-
-const std::string Guid::toString() const {
- std::stringstream os;
- os << *this;
- return os.str();
-}
-
-// This is the linux friendly implementation, but it could work on other
-// systems that have libuuid available
-#ifdef GUID_LIBUUID
-Guid GuidGenerator::newGuid() {
- uuid_t id;
- uuid_generate(id);
- return id;
-}
-#endif
-
-// this is the mac and ios version
-#ifdef GUID_CFUUID
-Guid GuidGenerator::newGuid() {
- auto newId = CFUUIDCreate(NULL);
- auto bytes = CFUUIDGetUUIDBytes(newId);
- CFRelease(newId);
-
- const unsigned char byteArray[16] = {
- bytes.byte0, bytes.byte1, bytes.byte2, bytes.byte3,
- bytes.byte4, bytes.byte5, bytes.byte6, bytes.byte7,
- bytes.byte8, bytes.byte9, bytes.byte10, bytes.byte11,
- bytes.byte12, bytes.byte13, bytes.byte14, bytes.byte15};
- return byteArray;
-}
-#endif
-
-// obviously this is the windows version
-#ifdef GUID_WINDOWS
-Guid GuidGenerator::newGuid() {
- GUID newId;
- CoCreateGuid(&newId);
-
- const unsigned char bytes[16] = {(newId.Data1 >> 24) & 0xFF,
- (newId.Data1 >> 16) & 0xFF,
- (newId.Data1 >> 8) & 0xFF,
- (newId.Data1) & 0xff,
-
- (newId.Data2 >> 8) & 0xFF,
- (newId.Data2) & 0xff,
-
- (newId.Data3 >> 8) & 0xFF,
- (newId.Data3) & 0xFF,
-
- newId.Data4[0],
- newId.Data4[1],
- newId.Data4[2],
- newId.Data4[3],
- newId.Data4[4],
- newId.Data4[5],
- newId.Data4[6],
- newId.Data4[7]};
-
- return bytes;
-}
-#endif
-
-// android version that uses a call to a java api
-#ifdef GUID_ANDROID
-GuidGenerator::GuidGenerator(JNIEnv* env) {
- _env = env;
- _uuidClass = env->FindClass("java/util/UUID");
- _newGuidMethod =
- env->GetStaticMethodID(_uuidClass, "randomUUID", "()Ljava/util/UUID;");
- _mostSignificantBitsMethod =
- env->GetMethodID(_uuidClass, "getMostSignificantBits", "()J");
- _leastSignificantBitsMethod =
- env->GetMethodID(_uuidClass, "getLeastSignificantBits", "()J");
-}
-
-Guid GuidGenerator::newGuid() {
- jobject javaUuid = _env->CallStaticObjectMethod(_uuidClass, _newGuidMethod);
- jlong mostSignificant =
- _env->CallLongMethod(javaUuid, _mostSignificantBitsMethod);
- jlong leastSignificant =
- _env->CallLongMethod(javaUuid, _leastSignificantBitsMethod);
-
- unsigned char bytes[16] = {
- (mostSignificant >> 56) & 0xFF, (mostSignificant >> 48) & 0xFF,
- (mostSignificant >> 40) & 0xFF, (mostSignificant >> 32) & 0xFF,
- (mostSignificant >> 24) & 0xFF, (mostSignificant >> 16) & 0xFF,
- (mostSignificant >> 8) & 0xFF, (mostSignificant)&0xFF,
- (leastSignificant >> 56) & 0xFF, (leastSignificant >> 48) & 0xFF,
- (leastSignificant >> 40) & 0xFF, (leastSignificant >> 32) & 0xFF,
- (leastSignificant >> 24) & 0xFF, (leastSignificant >> 16) & 0xFF,
- (leastSignificant >> 8) & 0xFF, (leastSignificant)&0xFF,
- };
- return bytes;
-}
-#endif
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/guid.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/guid.h
deleted file mode 100644
index d0c2e91cf3..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/support/guid.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- The MIT License (MIT)
-
- Copyright (c) 2014 Graeme Hill (http://graemehill.ca)
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- THE SOFTWARE.
- */
-#pragma once
-
-#include <stdint.h>
-#include <iomanip>
-#include <iostream>
-#include <sstream>
-#include <string>
-#include <vector>
-
-#ifdef GUID_ANDROID
-#include <jni.h>
-#endif
-
-// Class to represent a GUID/UUID. Each instance acts as a wrapper around a
-// 16 byte value that can be passed around by value. It also supports
-// conversion to string (via the stream operator <<) and conversion from a
-// string via constructor.
-class Guid {
- public:
- // create a guid from vector of bytes
- Guid(const std::vector<unsigned char>& bytes);
-
- // create a guid from array of bytes
- Guid(const unsigned char* bytes);
-
- // Create a guid from array of words.
- Guid(const uint16_t* bytes, bool reverse);
-
- // create a guid from string
- Guid(const std::string& fromString);
-
- // create empty guid
- Guid();
-
- // copy constructor
- Guid(const Guid& other);
-
- // overload assignment operator
- Guid& operator=(const Guid& other);
-
- // overload equality and inequality operator
- bool operator==(const Guid& other) const;
- bool operator!=(const Guid& other) const;
-
- const std::string toString() const;
- std::vector<unsigned char>::const_iterator begin() { return _bytes.begin(); }
- std::vector<unsigned char>::const_iterator end() { return _bytes.end(); }
- std::vector<unsigned char>::const_reverse_iterator rbegin() {
- return _bytes.rbegin();
- }
- std::vector<unsigned char>::const_reverse_iterator rend() {
- return _bytes.rend();
- }
-
- private:
- // actual data
- std::vector<unsigned char> _bytes;
-
- // make the << operator a friend so it can access _bytes
- friend std::ostream& operator<<(std::ostream& s, const Guid& guid);
-};
-
-// Class that can create new guids. The only reason this exists instead of
-// just a global "newGuid" function is because some platforms will require
-// that there is some attached context. In the case of android, we need to
-// know what JNIEnv is being used to call back to Java, but the newGuid()
-// function would no longer be cross-platform if we parameterized the android
-// version. Instead, construction of the GuidGenerator may be different on
-// each platform, but the use of newGuid is uniform.
-class GuidGenerator {
- public:
-#ifdef GUID_ANDROID
- GuidGenerator(JNIEnv* env);
-#else
- GuidGenerator() {}
-#endif
-
- Guid newGuid();
-
-#ifdef GUID_ANDROID
- private:
- JNIEnv* _env;
- jclass _uuidClass;
- jmethodID _newGuidMethod;
- jmethodID _mostSignificantBitsMethod;
- jmethodID _leastSignificantBitsMethod;
-#endif
-};
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/AbstractParseTreeVisitor.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/AbstractParseTreeVisitor.h
deleted file mode 100644
index b84d5c2d09..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/AbstractParseTreeVisitor.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "tree/ParseTreeVisitor.h"
-
-namespace antlr4 {
-namespace tree {
-
-class ANTLR4CPP_PUBLIC AbstractParseTreeVisitor : public ParseTreeVisitor {
- public:
- /// The default implementation calls <seealso cref="ParseTree#accept"/> on the
- /// specified tree.
- virtual antlrcpp::Any visit(ParseTree* tree) override {
- return tree->accept(this);
- }
-
- /**
- * <p>The default implementation initializes the aggregate result to
- * {@link #defaultResult defaultResult()}. Before visiting each child, it
- * calls {@link #shouldVisitNextChild shouldVisitNextChild}; if the result
- * is {@code false} no more children are visited and the current aggregate
- * result is returned. After visiting a child, the aggregate result is
- * updated by calling {@link #aggregateResult aggregateResult} with the
- * previous aggregate result and the result of visiting the child.</p>
- *
- * <p>The default implementation is not safe for use in visitors that modify
- * the tree structure. Visitors that modify the tree should override this
- * method to behave properly in respect to the specific algorithm in use.</p>
- */
- virtual antlrcpp::Any visitChildren(ParseTree* node) override {
- antlrcpp::Any result = defaultResult();
- size_t n = node->children.size();
- for (size_t i = 0; i < n; i++) {
- if (!shouldVisitNextChild(node, result)) {
- break;
- }
-
- antlrcpp::Any childResult = node->children[i]->accept(this);
- result = aggregateResult(result, childResult);
- }
-
- return result;
- }
-
- /// The default implementation returns the result of
- /// <seealso cref="#defaultResult defaultResult"/>.
- virtual antlrcpp::Any visitTerminal(TerminalNode* /*node*/) override {
- return defaultResult();
- }
-
- /// The default implementation returns the result of
- /// <seealso cref="#defaultResult defaultResult"/>.
- virtual antlrcpp::Any visitErrorNode(ErrorNode* /*node*/) override {
- return defaultResult();
- }
-
- protected:
- /// <summary>
- /// Gets the default value returned by visitor methods. This value is
- /// returned by the default implementations of
- /// <seealso cref="#visitTerminal visitTerminal"/>, <seealso
- /// cref="#visitErrorNode visitErrorNode"/>. The default implementation of
- /// <seealso cref="#visitChildren visitChildren"/> initializes its aggregate
- /// result to this value. <p/> The base implementation returns {@code null}.
- /// </summary>
- /// <returns> The default value returned by visitor methods. </returns>
- virtual antlrcpp::Any defaultResult() {
- return nullptr; // support isNotNull
- }
-
- /// <summary>
- /// Aggregates the results of visiting multiple children of a node. After
- /// either all children are visited or <seealso cref="#shouldVisitNextChild"/>
- /// returns
- /// {@code false}, the aggregate value is returned as the result of
- /// <seealso cref="#visitChildren"/>.
- /// <p/>
- /// The default implementation returns {@code nextResult}, meaning
- /// <seealso cref="#visitChildren"/> will return the result of the last child
- /// visited (or return the initial value if the node has no children).
- /// </summary>
- /// <param name="aggregate"> The previous aggregate value. In the default
- /// implementation, the aggregate value is initialized to
- /// <seealso cref="#defaultResult"/>, which is passed as the {@code aggregate}
- /// argument to this method after the first child node is visited. </param>
- /// <param name="nextResult"> The result of the immediately preceeding call to
- /// visit a child node.
- /// </param>
- /// <returns> The updated aggregate result. </returns>
- virtual antlrcpp::Any aggregateResult(antlrcpp::Any /*aggregate*/,
- const antlrcpp::Any& nextResult) {
- return nextResult;
- }
-
- /// <summary>
- /// This method is called after visiting each child in
- /// <seealso cref="#visitChildren"/>. This method is first called before the
- /// first child is visited; at that point {@code currentResult} will be the
- /// initial value (in the default implementation, the initial value is
- /// returned by a call to <seealso cref="#defaultResult"/>. This method is not
- /// called after the last child is visited. <p/> The default implementation
- /// always returns {@code true}, indicating that
- /// {@code visitChildren} should only return after all children are visited.
- /// One reason to override this method is to provide a "short circuit"
- /// evaluation option for situations where the result of visiting a single
- /// child has the potential to determine the result of the visit operation as
- /// a whole.
- /// </summary>
- /// <param name="node"> The <seealso cref="ParseTree"/> whose children are
- /// currently being visited. </param> <param name="currentResult"> The current
- /// aggregate result of the children visited to the current point.
- /// </param>
- /// <returns> {@code true} to continue visiting children. Otherwise return
- /// {@code false} to stop visiting children and immediately return the
- /// current aggregate result from <seealso cref="#visitChildren"/>. </returns>
- virtual bool shouldVisitNextChild(ParseTree* /*node*/,
- const antlrcpp::Any& /*currentResult*/) {
- return true;
- }
-};
-
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ErrorNode.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ErrorNode.cpp
deleted file mode 100644
index fdf4d22475..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ErrorNode.cpp
+++ /dev/null
@@ -1,8 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/ErrorNode.h"
-
-antlr4::tree::ErrorNode::~ErrorNode() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ErrorNode.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ErrorNode.h
deleted file mode 100644
index 65e8d77ff9..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ErrorNode.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "tree/TerminalNode.h"
-
-namespace antlr4 {
-namespace tree {
-
-class ANTLR4CPP_PUBLIC ErrorNode : public virtual TerminalNode {
- public:
- ~ErrorNode() override;
-};
-
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ErrorNodeImpl.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ErrorNodeImpl.cpp
deleted file mode 100644
index 2ca06c7d29..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ErrorNodeImpl.cpp
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-#include "tree/ParseTreeVisitor.h"
-
-#include "tree/ErrorNodeImpl.h"
-
-using namespace antlr4;
-using namespace antlr4::misc;
-using namespace antlr4::tree;
-
-ErrorNodeImpl::ErrorNodeImpl(Token* token) : TerminalNodeImpl(token) {}
-
-ErrorNodeImpl::~ErrorNodeImpl() {}
-
-antlrcpp::Any ErrorNodeImpl::accept(ParseTreeVisitor* visitor) {
- return visitor->visitErrorNode(this);
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ErrorNodeImpl.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ErrorNodeImpl.h
deleted file mode 100644
index 1f406e550e..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ErrorNodeImpl.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "misc/Interval.h"
-#include "tree/ErrorNode.h"
-#include "tree/TerminalNodeImpl.h"
-
-#include "support/Any.h"
-
-namespace antlr4 {
-namespace tree {
-
-/// <summary>
-/// Represents a token that was consumed during resynchronization
-/// rather than during a valid match operation. For example,
-/// we will create this kind of a node during single token insertion
-/// and deletion as well as during "consume until error recovery set"
-/// upon no viable alternative exceptions.
-/// </summary>
-class ANTLR4CPP_PUBLIC ErrorNodeImpl : public virtual TerminalNodeImpl,
- public virtual ErrorNode {
- public:
- ErrorNodeImpl(Token* token);
- ~ErrorNodeImpl() override;
-
- virtual antlrcpp::Any accept(ParseTreeVisitor* visitor) override;
-};
-
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp
deleted file mode 100644
index 66cfc91d1d..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.cpp
+++ /dev/null
@@ -1,71 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "support/CPPUtils.h"
-
-#include "tree/ErrorNode.h"
-#include "tree/ParseTree.h"
-#include "tree/ParseTreeListener.h"
-
-#include "IterativeParseTreeWalker.h"
-
-using namespace antlr4::tree;
-
-void IterativeParseTreeWalker::walk(ParseTreeListener* listener,
- ParseTree* t) const {
- std::vector<ParseTree*> nodeStack;
- std::vector<size_t> indexStack;
-
- ParseTree* currentNode = t;
- size_t currentIndex = 0;
-
- while (currentNode != nullptr) {
- // pre-order visit
- if (antlrcpp::is<ErrorNode*>(currentNode)) {
- listener->visitErrorNode(dynamic_cast<ErrorNode*>(currentNode));
- } else if (antlrcpp::is<TerminalNode*>(currentNode)) {
- listener->visitTerminal((TerminalNode*)currentNode);
- } else {
- enterRule(listener, currentNode);
- }
-
- // Move down to first child, if it exists.
- if (!currentNode->children.empty()) {
- nodeStack.push_back(currentNode);
- indexStack.push_back(currentIndex);
- currentIndex = 0;
- currentNode = currentNode->children[0];
- continue;
- }
-
- // No child nodes, so walk tree.
- do {
- // post-order visit
- if (!antlrcpp::is<TerminalNode*>(currentNode)) {
- exitRule(listener, currentNode);
- }
-
- // No parent, so no siblings.
- if (nodeStack.empty()) {
- currentNode = nullptr;
- currentIndex = 0;
- break;
- }
-
- // Move to next sibling if possible.
- if (nodeStack.back()->children.size() > ++currentIndex) {
- currentNode = nodeStack.back()->children[currentIndex];
- break;
- }
-
- // No next sibling, so move up.
- currentNode = nodeStack.back();
- nodeStack.pop_back();
- currentIndex = indexStack.back();
- indexStack.pop_back();
-
- } while (currentNode != nullptr);
- }
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.h
deleted file mode 100644
index ec22cabf98..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/IterativeParseTreeWalker.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2012 Terence Parr
- * Copyright (c) 2012 Sam Harwell
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-#include "tree/ParseTreeWalker.h"
-
-namespace antlr4 {
-namespace tree {
-
-class ParseTreeListener;
-
-/**
- * An iterative (read: non-recursive) pre-order and post-order tree walker that
- * doesn't use the thread stack but heap-based stacks. Makes it possible to
- * process deeply nested parse trees.
- */
-class ANTLR4CPP_PUBLIC IterativeParseTreeWalker : public ParseTreeWalker {
- public:
- virtual void walk(ParseTreeListener* listener, ParseTree* t) const override;
-};
-
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTree.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTree.cpp
deleted file mode 100644
index 65b5490486..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTree.cpp
+++ /dev/null
@@ -1,14 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/ParseTree.h"
-
-using namespace antlr4::tree;
-
-ParseTree::ParseTree() : parent(nullptr) {}
-
-bool ParseTree::operator==(const ParseTree& other) const {
- return &other == this;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTree.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTree.h
deleted file mode 100644
index a9588637d2..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTree.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "support/Any.h"
-
-namespace antlr4 {
-namespace tree {
-
-/// An interface to access the tree of <seealso cref="RuleContext"/> objects
-/// created during a parse that makes the data structure look like a simple
-/// parse tree. This node represents both internal nodes, rule invocations, and
-/// leaf nodes, token matches.
-///
-/// The payload is either a <seealso cref="Token"/> or a <seealso
-/// cref="RuleContext"/> object.
-// ml: This class unites 4 Java classes: RuleNode, ParseTree, SyntaxTree and
-// Tree.
-class ANTLR4CPP_PUBLIC ParseTree {
- public:
- ParseTree();
- ParseTree(ParseTree const&) = delete;
- virtual ~ParseTree() {}
-
- ParseTree& operator=(ParseTree const&) = delete;
-
- /// The parent of this node. If the return value is null, then this
- /// node is the root of the tree.
- ParseTree* parent;
-
- /// If we are debugging or building a parse tree for a visitor,
- /// we need to track all of the tokens and rule invocations associated
- /// with this rule's context. This is empty for parsing w/o tree constr.
- /// operation because we don't the need to track the details about
- /// how we parse this rule.
- // ml: memory is not managed here, but by the owning class. This is just for
- // the structure.
- std::vector<ParseTree*> children;
-
- /// Print out a whole tree, not just a node, in LISP format
- /// {@code (root child1 .. childN)}. Print just a node if this is a leaf.
- virtual std::string toStringTree() = 0;
- virtual std::string toString() = 0;
-
- /// Specialize toStringTree so that it can print out more information
- /// based upon the parser.
- virtual std::string toStringTree(Parser* parser) = 0;
-
- virtual bool operator==(const ParseTree& other) const;
-
- /// The <seealso cref="ParseTreeVisitor"/> needs a double dispatch method.
- // ml: This has been changed to use Any instead of a template parameter, to
- // avoid the need of a virtual template function.
- virtual antlrcpp::Any accept(ParseTreeVisitor* visitor) = 0;
-
- /// Return the combined text of all leaf nodes. Does not get any
- /// off-channel tokens (if any) so won't return whitespace and
- /// comments if they are sent to parser on hidden channel.
- virtual std::string getText() = 0;
-
- /**
- * Return an {@link Interval} indicating the index in the
- * {@link TokenStream} of the first and last token associated with this
- * subtree. If this node is a leaf, then the interval represents a single
- * token and has interval i..i for token index i.
- *
- * <p>An interval of i..i-1 indicates an empty interval at position
- * i in the input stream, where 0 &lt;= i &lt;= the size of the input
- * token stream. Currently, the code base can only have i=0..n-1 but
- * in concept one could have an empty interval after EOF. </p>
- *
- * <p>If source interval is unknown, this returns {@link
- * Interval#INVALID}.</p>
- *
- * <p>As a weird special case, the source interval for rules matched after
- * EOF is unspecified.</p>
- */
- virtual misc::Interval getSourceInterval() = 0;
-};
-
-// A class to help managing ParseTree instances without the need of a
-// shared_ptr.
-class ANTLR4CPP_PUBLIC ParseTreeTracker {
- public:
- template <typename T, typename... Args>
- T* createInstance(Args&&... args) {
- static_assert(std::is_base_of<ParseTree, T>::value,
- "Argument must be a parse tree type");
- T* result = new T(args...);
- _allocated.push_back(result);
- return result;
- }
-
- void reset() {
- for (auto entry : _allocated) delete entry;
- _allocated.clear();
- }
-
- private:
- std::vector<ParseTree*> _allocated;
-};
-
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeListener.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeListener.cpp
deleted file mode 100644
index 2ca7f36cc3..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeListener.cpp
+++ /dev/null
@@ -1,8 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ParseTreeListener.h"
-
-antlr4::tree::ParseTreeListener::~ParseTreeListener() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeListener.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeListener.h
deleted file mode 100644
index 8b9a7c0cdc..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeListener.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace tree {
-
-/** This interface describes the minimal core of methods triggered
- * by {@link ParseTreeWalker}. E.g.,
- *
- * ParseTreeWalker walker = new ParseTreeWalker();
- * walker.walk(myParseTreeListener, myParseTree); <-- triggers
- *events in your listener
- *
- * If you want to trigger events in multiple listeners during a single
- * tree walk, you can use the ParseTreeDispatcher object available at
- *
- * https://github.com/antlr/antlr4/issues/841
- */
-class ANTLR4CPP_PUBLIC ParseTreeListener {
- public:
- virtual ~ParseTreeListener();
-
- virtual void visitTerminal(TerminalNode* node) = 0;
- virtual void visitErrorNode(ErrorNode* node) = 0;
- virtual void enterEveryRule(ParserRuleContext* ctx) = 0;
- virtual void exitEveryRule(ParserRuleContext* ctx) = 0;
-
- bool operator==(const ParseTreeListener& other) { return this == &other; }
-};
-
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeProperty.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeProperty.h
deleted file mode 100644
index c7dcbd1a4e..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeProperty.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace tree {
-
-/// <summary>
-/// Associate a property with a parse tree node. Useful with parse tree
-/// listeners that need to associate values with particular tree nodes, kind of
-/// like specifying a return value for the listener event method that visited a
-/// particular node. Example:
-///
-/// <pre>
-/// ParseTreeProperty&lt;Integer&gt; values = new
-/// ParseTreeProperty&lt;Integer&gt;(); values.put(tree, 36); int x =
-/// values.get(tree); values.removeFrom(tree);
-/// </pre>
-///
-/// You would make one decl (values here) in the listener and use lots of times
-/// in your event methods.
-/// </summary>
-template <typename V>
-class ANTLR4CPP_PUBLIC ParseTreeProperty {
- public:
- virtual V get(ParseTree* node) { return _annotations[node]; }
- virtual void put(ParseTree* node, V value) { _annotations[node] = value; }
- virtual V removeFrom(ParseTree* node) {
- auto value = _annotations[node];
- _annotations.erase(node);
- return value;
- }
-
- protected:
- std::map<ParseTree*, V> _annotations;
-};
-
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.cpp
deleted file mode 100644
index 4bcd54892e..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.cpp
+++ /dev/null
@@ -1,8 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ParseTreeVisitor.h"
-
-antlr4::tree::ParseTreeVisitor::~ParseTreeVisitor() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.h
deleted file mode 100644
index c331537b38..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeVisitor.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "support/Any.h"
-
-namespace antlr4 {
-namespace tree {
-
-/// <summary>
-/// This interface defines the basic notion of a parse tree visitor. Generated
-/// visitors implement this interface and the {@code XVisitor} interface for
-/// grammar {@code X}.
-/// </summary>
-/// @param <T> The return type of the visit operation. Use <seealso
-/// cref="Void"/> for operations with no return type. </param>
-// ml: no template parameter here, to avoid the need for virtual template
-// functions. Instead we have our Any class.
-class ANTLR4CPP_PUBLIC ParseTreeVisitor {
- public:
- virtual ~ParseTreeVisitor();
-
- /// <summary>
- /// Visit a parse tree, and return a user-defined result of the operation.
- /// </summary>
- /// <param name="tree"> The <seealso cref="ParseTree"/> to visit. </param>
- /// <returns> The result of visiting the parse tree. </returns>
- virtual antlrcpp::Any visit(ParseTree* tree) = 0;
-
- /// <summary>
- /// Visit the children of a node, and return a user-defined result of the
- /// operation.
- /// </summary>
- /// <param name="node"> The <seealso cref="ParseTree"/> whose children should
- /// be visited. </param> <returns> The result of visiting the children of the
- /// node. </returns>
- virtual antlrcpp::Any visitChildren(ParseTree* node) = 0;
-
- /// <summary>
- /// Visit a terminal node, and return a user-defined result of the operation.
- /// </summary>
- /// <param name="node"> The <seealso cref="TerminalNode"/> to visit. </param>
- /// <returns> The result of visiting the node. </returns>
- virtual antlrcpp::Any visitTerminal(TerminalNode* node) = 0;
-
- /// <summary>
- /// Visit an error node, and return a user-defined result of the operation.
- /// </summary>
- /// <param name="node"> The <seealso cref="ErrorNode"/> to visit. </param>
- /// <returns> The result of visiting the node. </returns>
- virtual antlrcpp::Any visitErrorNode(ErrorNode* node) = 0;
-};
-
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeWalker.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeWalker.cpp
deleted file mode 100644
index c2debf13b0..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeWalker.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ParserRuleContext.h"
-#include "support/CPPUtils.h"
-#include "tree/ErrorNode.h"
-#include "tree/ParseTreeListener.h"
-
-#include "tree/IterativeParseTreeWalker.h"
-#include "tree/ParseTreeWalker.h"
-
-using namespace antlr4::tree;
-using namespace antlrcpp;
-
-static IterativeParseTreeWalker defaultWalker;
-ParseTreeWalker& ParseTreeWalker::DEFAULT = defaultWalker;
-
-ParseTreeWalker::~ParseTreeWalker() {}
-
-void ParseTreeWalker::walk(ParseTreeListener* listener, ParseTree* t) const {
- if (is<ErrorNode*>(t)) {
- listener->visitErrorNode(dynamic_cast<ErrorNode*>(t));
- return;
- } else if (is<TerminalNode*>(t)) {
- listener->visitTerminal(dynamic_cast<TerminalNode*>(t));
- return;
- }
-
- enterRule(listener, t);
- for (auto& child : t->children) {
- walk(listener, child);
- }
- exitRule(listener, t);
-}
-
-void ParseTreeWalker::enterRule(ParseTreeListener* listener,
- ParseTree* r) const {
- ParserRuleContext* ctx = dynamic_cast<ParserRuleContext*>(r);
- listener->enterEveryRule(ctx);
- ctx->enterRule(listener);
-}
-
-void ParseTreeWalker::exitRule(ParseTreeListener* listener,
- ParseTree* r) const {
- ParserRuleContext* ctx = dynamic_cast<ParserRuleContext*>(r);
- ctx->exitRule(listener);
- listener->exitEveryRule(ctx);
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeWalker.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeWalker.h
deleted file mode 100644
index c1be60f52b..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/ParseTreeWalker.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace tree {
-
-class ANTLR4CPP_PUBLIC ParseTreeWalker {
- public:
- static ParseTreeWalker& DEFAULT;
-
- virtual ~ParseTreeWalker();
-
- virtual void walk(ParseTreeListener* listener, ParseTree* t) const;
-
- protected:
- /// The discovery of a rule node, involves sending two events: the generic
- /// <seealso cref="ParseTreeListener#enterEveryRule"/> and a
- /// <seealso cref="RuleContext"/>-specific event. First we trigger the generic
- /// and then the rule specific. We do them in reverse order upon finishing the
- /// node.
- virtual void enterRule(ParseTreeListener* listener, ParseTree* r) const;
- virtual void exitRule(ParseTreeListener* listener, ParseTree* r) const;
-};
-
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/TerminalNode.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/TerminalNode.cpp
deleted file mode 100644
index db17ea3b92..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/TerminalNode.cpp
+++ /dev/null
@@ -1,8 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/TerminalNode.h"
-
-antlr4::tree::TerminalNode::~TerminalNode() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/TerminalNode.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/TerminalNode.h
deleted file mode 100644
index 0eff7ea1dc..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/TerminalNode.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "tree/ParseTree.h"
-
-namespace antlr4 {
-namespace tree {
-
-class ANTLR4CPP_PUBLIC TerminalNode : public ParseTree {
- public:
- ~TerminalNode() override;
-
- virtual Token* getSymbol() = 0;
-
- /** Set the parent for this leaf node.
- *
- * Technically, this is not backward compatible as it changes
- * the interface but no one was able to create custom
- * TerminalNodes anyway so I'm adding as it improves internal
- * code quality.
- *
- * @since 4.7
- */
- virtual void setParent(RuleContext* parent) = 0;
-};
-
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.cpp
deleted file mode 100644
index 434fe74d47..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "RuleContext.h"
-#include "Token.h"
-#include "misc/Interval.h"
-#include "tree/ParseTreeVisitor.h"
-
-#include "tree/TerminalNodeImpl.h"
-
-using namespace antlr4;
-using namespace antlr4::tree;
-
-TerminalNodeImpl::TerminalNodeImpl(Token* symbol_) : symbol(symbol_) {}
-
-Token* TerminalNodeImpl::getSymbol() { return symbol; }
-
-void TerminalNodeImpl::setParent(RuleContext* parent_) {
- this->parent = parent_;
-}
-
-misc::Interval TerminalNodeImpl::getSourceInterval() {
- if (symbol == nullptr) {
- return misc::Interval::INVALID;
- }
-
- size_t tokenIndex = symbol->getTokenIndex();
- return misc::Interval(tokenIndex, tokenIndex);
-}
-
-antlrcpp::Any TerminalNodeImpl::accept(ParseTreeVisitor* visitor) {
- return visitor->visitTerminal(this);
-}
-
-std::string TerminalNodeImpl::getText() { return symbol->getText(); }
-
-std::string TerminalNodeImpl::toStringTree(Parser* /*parser*/) {
- return toString();
-}
-
-std::string TerminalNodeImpl::toString() {
- if (symbol->getType() == Token::EOF) {
- return "<EOF>";
- }
- return symbol->getText();
-}
-
-std::string TerminalNodeImpl::toStringTree() { return toString(); }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.h
deleted file mode 100644
index 37d14f80a4..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/TerminalNodeImpl.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "tree/TerminalNode.h"
-
-namespace antlr4 {
-namespace tree {
-
-class ANTLR4CPP_PUBLIC TerminalNodeImpl : public virtual TerminalNode {
- public:
- Token* symbol;
-
- TerminalNodeImpl(Token* symbol);
-
- virtual Token* getSymbol() override;
- virtual void setParent(RuleContext* parent) override;
- virtual misc::Interval getSourceInterval() override;
-
- virtual antlrcpp::Any accept(ParseTreeVisitor* visitor) override;
-
- virtual std::string getText() override;
- virtual std::string toStringTree(Parser* parser) override;
- virtual std::string toString() override;
- virtual std::string toStringTree() override;
-};
-
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/Trees.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/Trees.cpp
deleted file mode 100644
index 0863e2e60b..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/Trees.cpp
+++ /dev/null
@@ -1,241 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "CommonToken.h"
-#include "Parser.h"
-#include "ParserRuleContext.h"
-#include "Token.h"
-#include "atn/ATN.h"
-#include "misc/Interval.h"
-#include "misc/Predicate.h"
-#include "support/CPPUtils.h"
-#include "tree/ErrorNode.h"
-#include "tree/TerminalNodeImpl.h"
-
-#include "tree/Trees.h"
-
-using namespace antlr4;
-using namespace antlr4::misc;
-using namespace antlr4::tree;
-
-using namespace antlrcpp;
-
-Trees::Trees() {}
-
-std::string Trees::toStringTree(ParseTree* t) {
- return toStringTree(t, nullptr);
-}
-
-std::string Trees::toStringTree(ParseTree* t, Parser* recog) {
- if (recog == nullptr) return toStringTree(t, std::vector<std::string>());
- return toStringTree(t, recog->getRuleNames());
-}
-
-std::string Trees::toStringTree(ParseTree* t,
- const std::vector<std::string>& ruleNames) {
- std::string temp =
- antlrcpp::escapeWhitespace(Trees::getNodeText(t, ruleNames), false);
- if (t->children.empty()) {
- return temp;
- }
-
- std::stringstream ss;
- ss << "(" << temp << ' ';
-
- // Implement the recursive walk as iteration to avoid trouble with deep
- // nesting.
- std::stack<size_t> stack;
- size_t childIndex = 0;
- ParseTree* run = t;
- while (childIndex < run->children.size()) {
- if (childIndex > 0) {
- ss << ' ';
- }
- ParseTree* child = run->children[childIndex];
- temp =
- antlrcpp::escapeWhitespace(Trees::getNodeText(child, ruleNames), false);
- if (!child->children.empty()) {
- // Go deeper one level.
- stack.push(childIndex);
- run = child;
- childIndex = 0;
- ss << "(" << temp << " ";
- } else {
- ss << temp;
- while (++childIndex == run->children.size()) {
- if (stack.size() > 0) {
- // Reached the end of the current level. See if we can step up from
- // here.
- childIndex = stack.top();
- stack.pop();
- run = run->parent;
- ss << ")";
- } else {
- break;
- }
- }
- }
- }
-
- ss << ")";
- return ss.str();
-}
-
-std::string Trees::getNodeText(ParseTree* t, Parser* recog) {
- return getNodeText(t, recog->getRuleNames());
-}
-
-std::string Trees::getNodeText(ParseTree* t,
- const std::vector<std::string>& ruleNames) {
- if (ruleNames.size() > 0) {
- if (is<RuleContext*>(t)) {
- size_t ruleIndex = dynamic_cast<RuleContext*>(t)->getRuleIndex();
- std::string ruleName = ruleNames[ruleIndex];
- size_t altNumber = dynamic_cast<RuleContext*>(t)->getAltNumber();
- if (altNumber != atn::ATN::INVALID_ALT_NUMBER) {
- return ruleName + ":" + std::to_string(altNumber);
- }
- return ruleName;
- } else if (is<ErrorNode*>(t)) {
- return t->toString();
- } else if (is<TerminalNode*>(t)) {
- Token* symbol = dynamic_cast<TerminalNode*>(t)->getSymbol();
- if (symbol != nullptr) {
- std::string s = symbol->getText();
- return s;
- }
- }
- }
- // no recog for rule names
- if (is<RuleContext*>(t)) {
- return dynamic_cast<RuleContext*>(t)->getText();
- }
-
- if (is<TerminalNodeImpl*>(t)) {
- return dynamic_cast<TerminalNodeImpl*>(t)->getSymbol()->getText();
- }
-
- return "";
-}
-
-std::vector<ParseTree*> Trees::getAncestors(ParseTree* t) {
- std::vector<ParseTree*> ancestors;
- ParseTree* parent = t->parent;
- while (parent != nullptr) {
- ancestors.insert(ancestors.begin(), parent); // insert at start
- parent = parent->parent;
- }
- return ancestors;
-}
-
-template <typename T>
-static void _findAllNodes(ParseTree* t, size_t index, bool findTokens,
- std::vector<T>& nodes) {
- // check this node (the root) first
- if (findTokens && is<TerminalNode*>(t)) {
- TerminalNode* tnode = dynamic_cast<TerminalNode*>(t);
- if (tnode->getSymbol()->getType() == index) {
- nodes.push_back(t);
- }
- } else if (!findTokens && is<ParserRuleContext*>(t)) {
- ParserRuleContext* ctx = dynamic_cast<ParserRuleContext*>(t);
- if (ctx->getRuleIndex() == index) {
- nodes.push_back(t);
- }
- }
- // check children
- for (size_t i = 0; i < t->children.size(); i++) {
- _findAllNodes(t->children[i], index, findTokens, nodes);
- }
-}
-
-bool Trees::isAncestorOf(ParseTree* t, ParseTree* u) {
- if (t == nullptr || u == nullptr || t->parent == nullptr) {
- return false;
- }
-
- ParseTree* p = u->parent;
- while (p != nullptr) {
- if (t == p) {
- return true;
- }
- p = p->parent;
- }
- return false;
-}
-
-std::vector<ParseTree*> Trees::findAllTokenNodes(ParseTree* t, size_t ttype) {
- return findAllNodes(t, ttype, true);
-}
-
-std::vector<ParseTree*> Trees::findAllRuleNodes(ParseTree* t,
- size_t ruleIndex) {
- return findAllNodes(t, ruleIndex, false);
-}
-
-std::vector<ParseTree*> Trees::findAllNodes(ParseTree* t, size_t index,
- bool findTokens) {
- std::vector<ParseTree*> nodes;
- _findAllNodes<ParseTree*>(t, index, findTokens, nodes);
- return nodes;
-}
-
-std::vector<ParseTree*> Trees::getDescendants(ParseTree* t) {
- std::vector<ParseTree*> nodes;
- nodes.push_back(t);
- std::size_t n = t->children.size();
- for (size_t i = 0; i < n; i++) {
- auto descentants = getDescendants(t->children[i]);
- for (auto entry : descentants) {
- nodes.push_back(entry);
- }
- }
- return nodes;
-}
-
-std::vector<ParseTree*> Trees::descendants(ParseTree* t) {
- return getDescendants(t);
-}
-
-ParserRuleContext* Trees::getRootOfSubtreeEnclosingRegion(
- ParseTree* t, size_t startTokenIndex, size_t stopTokenIndex) {
- size_t n = t->children.size();
- for (size_t i = 0; i < n; i++) {
- ParserRuleContext* r = getRootOfSubtreeEnclosingRegion(
- t->children[i], startTokenIndex, stopTokenIndex);
- if (r != nullptr) {
- return r;
- }
- }
-
- if (is<ParserRuleContext*>(t)) {
- ParserRuleContext* r = dynamic_cast<ParserRuleContext*>(t);
- if (startTokenIndex >=
- r->getStart()->getTokenIndex() && // is range fully contained in t?
- (r->getStop() == nullptr ||
- stopTokenIndex <= r->getStop()->getTokenIndex())) {
- // note: r.getStop()==null likely implies that we bailed out of parser and
- // there's nothing to the right
- return r;
- }
- }
- return nullptr;
-}
-
-ParseTree* Trees::findNodeSuchThat(ParseTree* t, Ref<Predicate> const& pred) {
- if (pred->test(t)) {
- return t;
- }
-
- size_t n = t->children.size();
- for (size_t i = 0; i < n; ++i) {
- ParseTree* u = findNodeSuchThat(t->children[i], pred);
- if (u != nullptr) {
- return u;
- }
- }
-
- return nullptr;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/Trees.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/Trees.h
deleted file mode 100644
index 378fc74be2..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/Trees.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "ParserRuleContext.h"
-#include "Recognizer.h"
-#include "tree/TerminalNode.h"
-
-namespace antlr4 {
-namespace tree {
-
-/// A set of utility routines useful for all kinds of ANTLR trees.
-class ANTLR4CPP_PUBLIC Trees {
- public:
- /// Print out a whole tree in LISP form. getNodeText is used on the
- /// node payloads to get the text for the nodes. Detect
- /// parse trees and extract data appropriately.
- static std::string toStringTree(ParseTree* t);
-
- /// Print out a whole tree in LISP form. getNodeText is used on the
- /// node payloads to get the text for the nodes. Detect
- /// parse trees and extract data appropriately.
- static std::string toStringTree(ParseTree* t, Parser* recog);
-
- /// Print out a whole tree in LISP form. getNodeText is used on the
- /// node payloads to get the text for the nodes. Detect
- /// parse trees and extract data appropriately.
- static std::string toStringTree(ParseTree* t,
- const std::vector<std::string>& ruleNames);
- static std::string getNodeText(ParseTree* t, Parser* recog);
- static std::string getNodeText(ParseTree* t,
- const std::vector<std::string>& ruleNames);
-
- /// Return a list of all ancestors of this node. The first node of
- /// list is the root and the last is the parent of this node.
- static std::vector<ParseTree*> getAncestors(ParseTree* t);
-
- /** Return true if t is u's parent or a node on path to root from u.
- * Use == not equals().
- *
- * @since 4.5.1
- */
- static bool isAncestorOf(ParseTree* t, ParseTree* u);
- static std::vector<ParseTree*> findAllTokenNodes(ParseTree* t, size_t ttype);
- static std::vector<ParseTree*> findAllRuleNodes(ParseTree* t,
- size_t ruleIndex);
- static std::vector<ParseTree*> findAllNodes(ParseTree* t, size_t index,
- bool findTokens);
-
- /** Get all descendents; includes t itself.
- *
- * @since 4.5.1
- */
- static std::vector<ParseTree*> getDescendants(ParseTree* t);
-
- /** @deprecated */
- static std::vector<ParseTree*> descendants(ParseTree* t);
-
- /** Find smallest subtree of t enclosing range startTokenIndex..stopTokenIndex
- * inclusively using postorder traversal. Recursive depth-first-search.
- *
- * @since 4.5.1
- */
- static ParserRuleContext* getRootOfSubtreeEnclosingRegion(
- ParseTree* t,
- size_t startTokenIndex, // inclusive
- size_t stopTokenIndex); // inclusive
-
- /** Return first node satisfying the pred
- *
- * @since 4.5.1
- */
- static ParseTree* findNodeSuchThat(ParseTree* t,
- Ref<misc::Predicate> const& pred);
-
- private:
- Trees();
-};
-
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/Chunk.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/Chunk.cpp
deleted file mode 100644
index eb6b1e1f06..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/Chunk.cpp
+++ /dev/null
@@ -1,8 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/pattern/Chunk.h"
-
-antlr4::tree::pattern::Chunk::~Chunk() {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/Chunk.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/Chunk.h
deleted file mode 100644
index 00ce87264c..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/Chunk.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace tree {
-namespace pattern {
-
-/// <summary>
-/// A chunk is either a token tag, a rule tag, or a span of literal text within
-/// a tree pattern. <p/> The method <seealso
-/// cref="ParseTreePatternMatcher#split(String)"/> returns a list of chunks in
-/// preparation for creating a token stream by <seealso
-/// cref="ParseTreePatternMatcher#tokenize(String)"/>. From there, we get a
-/// parse tree from with <seealso cref="ParseTreePatternMatcher#compile(String,
-/// int)"/>. These chunks are converted to <seealso cref="RuleTagToken"/>,
-/// <seealso cref="TokenTagToken"/>, or the regular tokens of the text
-/// surrounding the tags.
-/// </summary>
-class ANTLR4CPP_PUBLIC Chunk {
- public:
- Chunk() = default;
- Chunk(Chunk const&) = default;
- virtual ~Chunk();
-
- Chunk& operator=(Chunk const&) = default;
-
- /// This method returns a text representation of the tag chunk. Labeled tags
- /// are returned in the form {@code label:tag}, and unlabeled tags are
- /// returned as just the tag name.
- virtual std::string toString() {
- std::string str;
- return str;
- }
-};
-
-} // namespace pattern
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.cpp
deleted file mode 100644
index 4060e9814f..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-
-#include "tree/pattern/ParseTreeMatch.h"
-
-using namespace antlr4::tree;
-using namespace antlr4::tree::pattern;
-
-ParseTreeMatch::ParseTreeMatch(
- ParseTree* tree, const ParseTreePattern& pattern,
- const std::map<std::string, std::vector<ParseTree*>>& labels,
- ParseTree* mismatchedNode)
- : _tree(tree),
- _pattern(pattern),
- _labels(labels),
- _mismatchedNode(mismatchedNode) {
- if (tree == nullptr) {
- throw IllegalArgumentException("tree cannot be nul");
- }
-}
-
-ParseTreeMatch::~ParseTreeMatch() {}
-
-ParseTree* ParseTreeMatch::get(const std::string& label) {
- auto iterator = _labels.find(label);
- if (iterator == _labels.end() || iterator->second.empty()) {
- return nullptr;
- }
-
- return iterator->second.back(); // return last if multiple
-}
-
-std::vector<ParseTree*> ParseTreeMatch::getAll(const std::string& label) {
- auto iterator = _labels.find(label);
- if (iterator == _labels.end()) {
- return {};
- }
-
- return iterator->second;
-}
-
-std::map<std::string, std::vector<ParseTree*>>& ParseTreeMatch::getLabels() {
- return _labels;
-}
-
-ParseTree* ParseTreeMatch::getMismatchedNode() { return _mismatchedNode; }
-
-bool ParseTreeMatch::succeeded() { return _mismatchedNode == nullptr; }
-
-const ParseTreePattern& ParseTreeMatch::getPattern() { return _pattern; }
-
-ParseTree* ParseTreeMatch::getTree() { return _tree; }
-
-std::string ParseTreeMatch::toString() {
- if (succeeded()) {
- return "Match succeeded; found " + std::to_string(_labels.size()) +
- " labels";
- } else {
- return "Match failed; found " + std::to_string(_labels.size()) + " labels";
- }
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.h
deleted file mode 100644
index f8463a14aa..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreeMatch.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace tree {
-namespace pattern {
-
-/// Represents the result of matching a ParseTree against a tree pattern.
-class ANTLR4CPP_PUBLIC ParseTreeMatch {
- private:
- /// This is the backing field for getTree().
- ParseTree* _tree;
-
- /// This is the backing field for getPattern().
- const ParseTreePattern& _pattern;
-
- /// This is the backing field for getLabels().
- std::map<std::string, std::vector<ParseTree*>> _labels;
-
- /// This is the backing field for getMismatchedNode().
- ParseTree* _mismatchedNode;
-
- public:
- /// <summary>
- /// Constructs a new instance of <seealso cref="ParseTreeMatch"/> from the
- /// specified parse tree and pattern.
- /// </summary>
- /// <param name="tree"> The parse tree to match against the pattern. </param>
- /// <param name="pattern"> The parse tree pattern. </param>
- /// <param name="labels"> A mapping from label names to collections of
- /// <seealso cref="ParseTree"/> objects located by the tree pattern matching
- /// process. </param> <param name="mismatchedNode"> The first node which
- /// failed to match the tree pattern during the matching process.
- /// </param>
- /// <exception cref="IllegalArgumentException"> if {@code tree} is {@code
- /// null} </exception> <exception cref="IllegalArgumentException"> if {@code
- /// pattern} is {@code null} </exception> <exception
- /// cref="IllegalArgumentException"> if {@code labels} is {@code null}
- /// </exception>
- ParseTreeMatch(ParseTree* tree, ParseTreePattern const& pattern,
- const std::map<std::string, std::vector<ParseTree*>>& labels,
- ParseTree* mismatchedNode);
- ParseTreeMatch(ParseTreeMatch const&) = default;
- virtual ~ParseTreeMatch();
- ParseTreeMatch& operator=(ParseTreeMatch const&) = default;
-
- /// <summary>
- /// Get the last node associated with a specific {@code label}.
- /// <p/>
- /// For example, for pattern {@code <id:ID>}, {@code get("id")} returns the
- /// node matched for that {@code ID}. If more than one node
- /// matched the specified label, only the last is returned. If there is
- /// no node associated with the label, this returns {@code null}.
- /// <p/>
- /// Pattern tags like {@code <ID>} and {@code <expr>} without labels are
- /// considered to be labeled with {@code ID} and {@code expr}, respectively.
- /// </summary>
- /// <param name="labe"> The label to check.
- /// </param>
- /// <returns> The last <seealso cref="ParseTree"/> to match a tag with the
- /// specified label, or {@code null} if no parse tree matched a tag with the
- /// label. </returns>
- virtual ParseTree* get(const std::string& label);
-
- /// <summary>
- /// Return all nodes matching a rule or token tag with the specified label.
- /// <p/>
- /// If the {@code label} is the name of a parser rule or token in the
- /// grammar, the resulting list will contain both the parse trees matching
- /// rule or tags explicitly labeled with the label and the complete set of
- /// parse trees matching the labeled and unlabeled tags in the pattern for
- /// the parser rule or token. For example, if {@code label} is {@code "foo"},
- /// the result will contain <em>all</em> of the following.
- ///
- /// <ul>
- /// <li>Parse tree nodes matching tags of the form {@code <foo:anyRuleName>}
- /// and
- /// {@code <foo:AnyTokenName>}.</li>
- /// <li>Parse tree nodes matching tags of the form {@code
- /// <anyLabel:foo>}.</li> <li>Parse tree nodes matching tags of the form
- /// {@code <foo>}.</li>
- /// </ul>
- /// </summary>
- /// <param name="labe"> The label.
- /// </param>
- /// <returns> A collection of all <seealso cref="ParseTree"/> nodes matching
- /// tags with the specified {@code label}. If no nodes matched the label, an
- /// empty list is returned. </returns>
- virtual std::vector<ParseTree*> getAll(const std::string& label);
-
- /// <summary>
- /// Return a mapping from label &rarr; [list of nodes].
- /// <p/>
- /// The map includes special entries corresponding to the names of rules and
- /// tokens referenced in tags in the original pattern. For additional
- /// information, see the description of <seealso cref="#getAll(String)"/>.
- /// </summary>
- /// <returns> A mapping from labels to parse tree nodes. If the parse tree
- /// pattern did not contain any rule or token tags, this map will be empty.
- /// </returns>
- virtual std::map<std::string, std::vector<ParseTree*>>& getLabels();
-
- /// <summary>
- /// Get the node at which we first detected a mismatch.
- /// </summary>
- /// <returns> the node at which we first detected a mismatch, or {@code null}
- /// if the match was successful. </returns>
- virtual ParseTree* getMismatchedNode();
-
- /// <summary>
- /// Gets a value indicating whether the match operation succeeded.
- /// </summary>
- /// <returns> {@code true} if the match operation succeeded; otherwise,
- /// {@code false}. </returns>
- virtual bool succeeded();
-
- /// <summary>
- /// Get the tree pattern we are matching against.
- /// </summary>
- /// <returns> The tree pattern we are matching against. </returns>
- virtual const ParseTreePattern& getPattern();
-
- /// <summary>
- /// Get the parse tree we are trying to match to a pattern.
- /// </summary>
- /// <returns> The <seealso cref="ParseTree"/> we are trying to match to a
- /// pattern. </returns>
- virtual ParseTree* getTree();
-
- virtual std::string toString();
-};
-
-} // namespace pattern
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.cpp
deleted file mode 100644
index e4de4b21c3..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/ParseTree.h"
-#include "tree/pattern/ParseTreeMatch.h"
-#include "tree/pattern/ParseTreePatternMatcher.h"
-
-#include "tree/xpath/XPath.h"
-#include "tree/xpath/XPathElement.h"
-
-#include "tree/pattern/ParseTreePattern.h"
-
-using namespace antlr4::tree;
-using namespace antlr4::tree::pattern;
-
-using namespace antlrcpp;
-
-ParseTreePattern::ParseTreePattern(ParseTreePatternMatcher* matcher,
- const std::string& pattern,
- int patternRuleIndex_,
- ParseTree* patternTree)
- : patternRuleIndex(patternRuleIndex_),
- _pattern(pattern),
- _patternTree(patternTree),
- _matcher(matcher) {}
-
-ParseTreePattern::~ParseTreePattern() {}
-
-ParseTreeMatch ParseTreePattern::match(ParseTree* tree) {
- return _matcher->match(tree, *this);
-}
-
-bool ParseTreePattern::matches(ParseTree* tree) {
- return _matcher->match(tree, *this).succeeded();
-}
-
-std::vector<ParseTreeMatch> ParseTreePattern::findAll(
- ParseTree* tree, const std::string& xpath) {
- xpath::XPath finder(_matcher->getParser(), xpath);
- std::vector<ParseTree*> subtrees = finder.evaluate(tree);
- std::vector<ParseTreeMatch> matches;
- for (auto t : subtrees) {
- ParseTreeMatch aMatch = match(t);
- if (aMatch.succeeded()) {
- matches.push_back(aMatch);
- }
- }
- return matches;
-}
-
-ParseTreePatternMatcher* ParseTreePattern::getMatcher() const {
- return _matcher;
-}
-
-std::string ParseTreePattern::getPattern() const { return _pattern; }
-
-int ParseTreePattern::getPatternRuleIndex() const { return patternRuleIndex; }
-
-ParseTree* ParseTreePattern::getPatternTree() const { return _patternTree; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.h
deleted file mode 100644
index 912a540dcd..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreePattern.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace tree {
-namespace pattern {
-
-/// <summary>
-/// A pattern like {@code <ID> = <expr>;} converted to a <seealso
-/// cref="ParseTree"/> by <seealso cref="ParseTreePatternMatcher#compile(String,
-/// int)"/>.
-/// </summary>
-class ANTLR4CPP_PUBLIC ParseTreePattern {
- public:
- /// <summary>
- /// Construct a new instance of the <seealso cref="ParseTreePattern"/> class.
- /// </summary>
- /// <param name="matcher"> The <seealso cref="ParseTreePatternMatcher"/> which
- /// created this tree pattern. </param> <param name="pattern"> The tree
- /// pattern in concrete syntax form. </param> <param name="patternRuleIndex">
- /// The parser rule which serves as the root of the tree pattern. </param>
- /// <param name="patternTree"> The tree pattern in <seealso cref="ParseTree"/>
- /// form. </param>
- ParseTreePattern(ParseTreePatternMatcher* matcher, const std::string& pattern,
- int patternRuleIndex, ParseTree* patternTree);
- ParseTreePattern(ParseTreePattern const&) = default;
- virtual ~ParseTreePattern();
- ParseTreePattern& operator=(ParseTreePattern const&) = default;
-
- /// <summary>
- /// Match a specific parse tree against this tree pattern.
- /// </summary>
- /// <param name="tree"> The parse tree to match against this tree pattern.
- /// </param> <returns> A <seealso cref="ParseTreeMatch"/> object describing
- /// the result of the match operation. The <seealso
- /// cref="ParseTreeMatch#succeeded()"/> method can be used to determine
- /// whether or not the match was successful. </returns>
- virtual ParseTreeMatch match(ParseTree* tree);
-
- /// <summary>
- /// Determine whether or not a parse tree matches this tree pattern.
- /// </summary>
- /// <param name="tree"> The parse tree to match against this tree pattern.
- /// </param> <returns> {@code true} if {@code tree} is a match for the current
- /// tree pattern; otherwise, {@code false}. </returns>
- virtual bool matches(ParseTree* tree);
-
- /// Find all nodes using XPath and then try to match those subtrees against
- /// this tree pattern.
- /// @param tree The ParseTree to match against this pattern.
- /// @param xpath An expression matching the nodes
- ///
- /// @returns A collection of ParseTreeMatch objects describing the
- /// successful matches. Unsuccessful matches are omitted from the result,
- /// regardless of the reason for the failure.
- virtual std::vector<ParseTreeMatch> findAll(ParseTree* tree,
- const std::string& xpath);
-
- /// <summary>
- /// Get the <seealso cref="ParseTreePatternMatcher"/> which created this tree
- /// pattern.
- /// </summary>
- /// <returns> The <seealso cref="ParseTreePatternMatcher"/> which created this
- /// tree pattern. </returns>
- virtual ParseTreePatternMatcher* getMatcher() const;
-
- /// <summary>
- /// Get the tree pattern in concrete syntax form.
- /// </summary>
- /// <returns> The tree pattern in concrete syntax form. </returns>
- virtual std::string getPattern() const;
-
- /// <summary>
- /// Get the parser rule which serves as the outermost rule for the tree
- /// pattern.
- /// </summary>
- /// <returns> The parser rule which serves as the outermost rule for the tree
- /// pattern. </returns>
- virtual int getPatternRuleIndex() const;
-
- /// <summary>
- /// Get the tree pattern as a <seealso cref="ParseTree"/>. The rule and token
- /// tags from the pattern are present in the parse tree as terminal nodes with
- /// a symbol of type <seealso cref="RuleTagToken"/> or <seealso
- /// cref="TokenTagToken"/>.
- /// </summary>
- /// <returns> The tree pattern as a <seealso cref="ParseTree"/>. </returns>
- virtual ParseTree* getPatternTree() const;
-
- private:
- const int patternRuleIndex;
-
- /// This is the backing field for <seealso cref="#getPattern()"/>.
- const std::string _pattern;
-
- /// This is the backing field for <seealso cref="#getPatternTree()"/>.
- ParseTree* _patternTree;
-
- /// This is the backing field for <seealso cref="#getMatcher()"/>.
- ParseTreePatternMatcher* const _matcher;
-};
-
-} // namespace pattern
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.cpp
deleted file mode 100644
index f644f6bf9b..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.cpp
+++ /dev/null
@@ -1,388 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "BailErrorStrategy.h"
-#include "CommonTokenStream.h"
-#include "Lexer.h"
-#include "ParserInterpreter.h"
-#include "ParserRuleContext.h"
-#include "atn/ATN.h"
-#include "tree/TerminalNode.h"
-#include "tree/pattern/ParseTreeMatch.h"
-#include "tree/pattern/ParseTreePattern.h"
-#include "tree/pattern/RuleTagToken.h"
-#include "tree/pattern/TagChunk.h"
-#include "tree/pattern/TokenTagToken.h"
-
-#include "ANTLRInputStream.h"
-#include "Exceptions.h"
-#include "ListTokenSource.h"
-#include "support/Arrays.h"
-#include "support/CPPUtils.h"
-#include "support/StringUtils.h"
-#include "tree/pattern/TextChunk.h"
-
-#include "tree/pattern/ParseTreePatternMatcher.h"
-
-using namespace antlr4;
-using namespace antlr4::tree;
-using namespace antlr4::tree::pattern;
-using namespace antlrcpp;
-
-ParseTreePatternMatcher::CannotInvokeStartRule::CannotInvokeStartRule(
- const RuntimeException& e)
- : RuntimeException(e.what()) {}
-
-ParseTreePatternMatcher::CannotInvokeStartRule::~CannotInvokeStartRule() {}
-
-ParseTreePatternMatcher::StartRuleDoesNotConsumeFullPattern::
- ~StartRuleDoesNotConsumeFullPattern() {}
-
-ParseTreePatternMatcher::ParseTreePatternMatcher(Lexer* lexer, Parser* parser)
- : _lexer(lexer), _parser(parser) {
- InitializeInstanceFields();
-}
-
-ParseTreePatternMatcher::~ParseTreePatternMatcher() {}
-
-void ParseTreePatternMatcher::setDelimiters(const std::string& start,
- const std::string& stop,
- const std::string& escapeLeft) {
- if (start.empty()) {
- throw IllegalArgumentException("start cannot be null or empty");
- }
-
- if (stop.empty()) {
- throw IllegalArgumentException("stop cannot be null or empty");
- }
-
- _start = start;
- _stop = stop;
- _escape = escapeLeft;
-}
-
-bool ParseTreePatternMatcher::matches(ParseTree* tree,
- const std::string& pattern,
- int patternRuleIndex) {
- ParseTreePattern p = compile(pattern, patternRuleIndex);
- return matches(tree, p);
-}
-
-bool ParseTreePatternMatcher::matches(ParseTree* tree,
- const ParseTreePattern& pattern) {
- std::map<std::string, std::vector<ParseTree*>> labels;
- ParseTree* mismatchedNode = matchImpl(tree, pattern.getPatternTree(), labels);
- return mismatchedNode == nullptr;
-}
-
-ParseTreeMatch ParseTreePatternMatcher::match(ParseTree* tree,
- const std::string& pattern,
- int patternRuleIndex) {
- ParseTreePattern p = compile(pattern, patternRuleIndex);
- return match(tree, p);
-}
-
-ParseTreeMatch ParseTreePatternMatcher::match(ParseTree* tree,
- const ParseTreePattern& pattern) {
- std::map<std::string, std::vector<ParseTree*>> labels;
- tree::ParseTree* mismatchedNode =
- matchImpl(tree, pattern.getPatternTree(), labels);
- return ParseTreeMatch(tree, pattern, labels, mismatchedNode);
-}
-
-ParseTreePattern ParseTreePatternMatcher::compile(const std::string& pattern,
- int patternRuleIndex) {
- ListTokenSource tokenSrc(tokenize(pattern));
- CommonTokenStream tokens(&tokenSrc);
-
- ParserInterpreter parserInterp(
- _parser->getGrammarFileName(), _parser->getVocabulary(),
- _parser->getRuleNames(), _parser->getATNWithBypassAlts(), &tokens);
-
- ParserRuleContext* tree = nullptr;
- try {
- parserInterp.setErrorHandler(std::make_shared<BailErrorStrategy>());
- tree = parserInterp.parse(patternRuleIndex);
- } catch (ParseCancellationException& e) {
-#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023026
- // rethrow_if_nested is not available before VS 2015.
- throw e;
-#else
- std::rethrow_if_nested(e); // Unwrap the nested exception.
-#endif
- } catch (RecognitionException& re) {
- throw re;
- } catch (std::exception& e) {
- // throw_with_nested is not available before VS 2015.
- throw e;
- }
-
- // Make sure tree pattern compilation checks for a complete parse
- if (tokens.LA(1) != Token::EOF) {
- throw StartRuleDoesNotConsumeFullPattern();
- }
-
- return ParseTreePattern(this, pattern, patternRuleIndex, tree);
-}
-
-Lexer* ParseTreePatternMatcher::getLexer() { return _lexer; }
-
-Parser* ParseTreePatternMatcher::getParser() { return _parser; }
-
-ParseTree* ParseTreePatternMatcher::matchImpl(
- ParseTree* tree, ParseTree* patternTree,
- std::map<std::string, std::vector<ParseTree*>>& labels) {
- if (tree == nullptr) {
- throw IllegalArgumentException("tree cannot be nul");
- }
-
- if (patternTree == nullptr) {
- throw IllegalArgumentException("patternTree cannot be nul");
- }
-
- // x and <ID>, x and y, or x and x; or could be mismatched types
- if (is<TerminalNode*>(tree) && is<TerminalNode*>(patternTree)) {
- TerminalNode* t1 = dynamic_cast<TerminalNode*>(tree);
- TerminalNode* t2 = dynamic_cast<TerminalNode*>(patternTree);
-
- ParseTree* mismatchedNode = nullptr;
- // both are tokens and they have same type
- if (t1->getSymbol()->getType() == t2->getSymbol()->getType()) {
- if (is<TokenTagToken*>(t2->getSymbol())) { // x and <ID>
- TokenTagToken* tokenTagToken =
- dynamic_cast<TokenTagToken*>(t2->getSymbol());
-
- // track label->list-of-nodes for both token name and label (if any)
- labels[tokenTagToken->getTokenName()].push_back(tree);
- if (tokenTagToken->getLabel() != "") {
- labels[tokenTagToken->getLabel()].push_back(tree);
- }
- } else if (t1->getText() == t2->getText()) {
- // x and x
- } else {
- // x and y
- if (mismatchedNode == nullptr) {
- mismatchedNode = t1;
- }
- }
- } else {
- if (mismatchedNode == nullptr) {
- mismatchedNode = t1;
- }
- }
-
- return mismatchedNode;
- }
-
- if (is<ParserRuleContext*>(tree) && is<ParserRuleContext*>(patternTree)) {
- ParserRuleContext* r1 = dynamic_cast<ParserRuleContext*>(tree);
- ParserRuleContext* r2 = dynamic_cast<ParserRuleContext*>(patternTree);
- ParseTree* mismatchedNode = nullptr;
-
- // (expr ...) and <expr>
- RuleTagToken* ruleTagToken = getRuleTagToken(r2);
- if (ruleTagToken != nullptr) {
- // ParseTreeMatch *m = nullptr; // unused?
- if (r1->getRuleIndex() == r2->getRuleIndex()) {
- // track label->list-of-nodes for both rule name and label (if any)
- labels[ruleTagToken->getRuleName()].push_back(tree);
- if (ruleTagToken->getLabel() != "") {
- labels[ruleTagToken->getLabel()].push_back(tree);
- }
- } else {
- if (!mismatchedNode) {
- mismatchedNode = r1;
- }
- }
-
- return mismatchedNode;
- }
-
- // (expr ...) and (expr ...)
- if (r1->children.size() != r2->children.size()) {
- if (mismatchedNode == nullptr) {
- mismatchedNode = r1;
- }
-
- return mismatchedNode;
- }
-
- std::size_t n = r1->children.size();
- for (size_t i = 0; i < n; i++) {
- ParseTree* childMatch =
- matchImpl(r1->children[i], patternTree->children[i], labels);
- if (childMatch) {
- return childMatch;
- }
- }
-
- return mismatchedNode;
- }
-
- // if nodes aren't both tokens or both rule nodes, can't match
- return tree;
-}
-
-RuleTagToken* ParseTreePatternMatcher::getRuleTagToken(ParseTree* t) {
- if (t->children.size() == 1 && is<TerminalNode*>(t->children[0])) {
- TerminalNode* c = dynamic_cast<TerminalNode*>(t->children[0]);
- if (is<RuleTagToken*>(c->getSymbol())) {
- return dynamic_cast<RuleTagToken*>(c->getSymbol());
- }
- }
- return nullptr;
-}
-
-std::vector<std::unique_ptr<Token>> ParseTreePatternMatcher::tokenize(
- const std::string& pattern) {
- // split pattern into chunks: sea (raw input) and islands (<ID>, <expr>)
- std::vector<Chunk> chunks = split(pattern);
-
- // create token stream from text and tags
- std::vector<std::unique_ptr<Token>> tokens;
- for (auto chunk : chunks) {
- if (is<TagChunk*>(&chunk)) {
- TagChunk& tagChunk = (TagChunk&)chunk;
- // add special rule token or conjure up new token from name
- if (isupper(tagChunk.getTag()[0])) {
- size_t ttype = _parser->getTokenType(tagChunk.getTag());
- if (ttype == Token::INVALID_TYPE) {
- throw IllegalArgumentException("Unknown token " + tagChunk.getTag() +
- " in pattern: " + pattern);
- }
- tokens.emplace_back(new TokenTagToken(tagChunk.getTag(), (int)ttype,
- tagChunk.getLabel()));
- } else if (islower(tagChunk.getTag()[0])) {
- size_t ruleIndex = _parser->getRuleIndex(tagChunk.getTag());
- if (ruleIndex == INVALID_INDEX) {
- throw IllegalArgumentException("Unknown rule " + tagChunk.getTag() +
- " in pattern: " + pattern);
- }
- size_t ruleImaginaryTokenType =
- _parser->getATNWithBypassAlts().ruleToTokenType[ruleIndex];
- tokens.emplace_back(new RuleTagToken(
- tagChunk.getTag(), ruleImaginaryTokenType, tagChunk.getLabel()));
- } else {
- throw IllegalArgumentException("invalid tag: " + tagChunk.getTag() +
- " in pattern: " + pattern);
- }
- } else {
- TextChunk& textChunk = (TextChunk&)chunk;
- ANTLRInputStream input(textChunk.getText());
- _lexer->setInputStream(&input);
- std::unique_ptr<Token> t(_lexer->nextToken());
- while (t->getType() != Token::EOF) {
- tokens.push_back(std::move(t));
- t = _lexer->nextToken();
- }
- _lexer->setInputStream(nullptr);
- }
- }
-
- return tokens;
-}
-
-std::vector<Chunk> ParseTreePatternMatcher::split(const std::string& pattern) {
- size_t p = 0;
- size_t n = pattern.length();
- std::vector<Chunk> chunks;
-
- // find all start and stop indexes first, then collect
- std::vector<size_t> starts;
- std::vector<size_t> stops;
- while (p < n) {
- if (p == pattern.find(_escape + _start, p)) {
- p += _escape.length() + _start.length();
- } else if (p == pattern.find(_escape + _stop, p)) {
- p += _escape.length() + _stop.length();
- } else if (p == pattern.find(_start, p)) {
- starts.push_back(p);
- p += _start.length();
- } else if (p == pattern.find(_stop, p)) {
- stops.push_back(p);
- p += _stop.length();
- } else {
- p++;
- }
- }
-
- if (starts.size() > stops.size()) {
- throw IllegalArgumentException("unterminated tag in pattern: " + pattern);
- }
-
- if (starts.size() < stops.size()) {
- throw IllegalArgumentException("missing start tag in pattern: " + pattern);
- }
-
- size_t ntags = starts.size();
- for (size_t i = 0; i < ntags; i++) {
- if (starts[i] >= stops[i]) {
- throw IllegalArgumentException(
- "tag delimiters out of order in pattern: " + pattern);
- }
- }
-
- // collect into chunks now
- if (ntags == 0) {
- std::string text = pattern.substr(0, n);
- chunks.push_back(TextChunk(text));
- }
-
- if (ntags > 0 && starts[0] > 0) { // copy text up to first tag into chunks
- std::string text = pattern.substr(0, starts[0]);
- chunks.push_back(TextChunk(text));
- }
-
- for (size_t i = 0; i < ntags; i++) {
- // copy inside of <tag>
- std::string tag = pattern.substr(starts[i] + _start.length(),
- stops[i] - (starts[i] + _start.length()));
- std::string ruleOrToken = tag;
- std::string label = "";
- size_t colon = tag.find(':');
- if (colon != std::string::npos) {
- label = tag.substr(0, colon);
- ruleOrToken = tag.substr(colon + 1, tag.length() - (colon + 1));
- }
- chunks.push_back(TagChunk(label, ruleOrToken));
- if (i + 1 < ntags) {
- // copy from end of <tag> to start of next
- std::string text =
- pattern.substr(stops[i] + _stop.length(),
- starts[i + 1] - (stops[i] + _stop.length()));
- chunks.push_back(TextChunk(text));
- }
- }
-
- if (ntags > 0) {
- size_t afterLastTag = stops[ntags - 1] + _stop.length();
- if (afterLastTag < n) { // copy text from end of last tag to end
- std::string text = pattern.substr(afterLastTag, n - afterLastTag);
- chunks.push_back(TextChunk(text));
- }
- }
-
- // strip out all backslashes from text chunks but not tags
- for (size_t i = 0; i < chunks.size(); i++) {
- Chunk& c = chunks[i];
- if (is<TextChunk*>(&c)) {
- TextChunk& tc = (TextChunk&)c;
- std::string unescaped = tc.getText();
- unescaped.erase(std::remove(unescaped.begin(), unescaped.end(), '\\'),
- unescaped.end());
- if (unescaped.length() < tc.getText().length()) {
- chunks[i] = TextChunk(unescaped);
- }
- }
- }
-
- return chunks;
-}
-
-void ParseTreePatternMatcher::InitializeInstanceFields() {
- _start = "<";
- _stop = ">";
- _escape = "\\";
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.h
deleted file mode 100644
index 56ff76b1ee..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/ParseTreePatternMatcher.h
+++ /dev/null
@@ -1,196 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Exceptions.h"
-
-namespace antlr4 {
-namespace tree {
-namespace pattern {
-
-/// <summary>
-/// A tree pattern matching mechanism for ANTLR <seealso cref="ParseTree"/>s.
-/// <p/>
-/// Patterns are strings of source input text with special tags representing
-/// token or rule references such as:
-/// <p/>
-/// {@code <ID> = <expr>;}
-/// <p/>
-/// Given a pattern start rule such as {@code statement}, this object constructs
-/// a <seealso cref="ParseTree"/> with placeholders for the {@code ID} and
-/// {@code expr} subtree. Then the <seealso cref="#match"/> routines can compare
-/// an actual <seealso cref="ParseTree"/> from a parse with this pattern. Tag
-/// {@code <ID>} matches any {@code ID} token and tag {@code <expr>} references
-/// the result of the
-/// {@code expr} rule (generally an instance of {@code ExprContext}.
-/// <p/>
-/// Pattern {@code x = 0;} is a similar pattern that matches the same pattern
-/// except that it requires the identifier to be {@code x} and the expression to
-/// be {@code 0}.
-/// <p/>
-/// The <seealso cref="#matches"/> routines return {@code true} or {@code false}
-/// based upon a match for the tree rooted at the parameter sent in. The
-/// <seealso cref="#match"/> routines return a <seealso cref="ParseTreeMatch"/>
-/// object that contains the parse tree, the parse tree pattern, and a map from
-/// tag name to matched nodes (more below). A subtree that fails to match,
-/// returns with <seealso cref="ParseTreeMatch#mismatchedNode"/> set to the
-/// first tree node that did not match. <p/> For efficiency, you can compile a
-/// tree pattern in string form to a <seealso cref="ParseTreePattern"/> object.
-/// <p/>
-/// See {@code TestParseTreeMatcher} for lots of examples.
-/// <seealso cref="ParseTreePattern"/> has two static helper methods:
-/// <seealso cref="ParseTreePattern#findAll"/> and <seealso
-/// cref="ParseTreePattern#match"/> that are easy to use but not super efficient
-/// because they create new <seealso cref="ParseTreePatternMatcher"/> objects
-/// each time and have to compile the pattern in string form before using it.
-/// <p/>
-/// The lexer and parser that you pass into the <seealso
-/// cref="ParseTreePatternMatcher"/> constructor are used to parse the pattern
-/// in string form. The lexer converts the {@code <ID> = <expr>;} into a
-/// sequence of four tokens (assuming lexer throws out whitespace or puts it on
-/// a hidden channel). Be aware that the input stream is reset for the lexer
-/// (but not the parser; a <seealso cref="ParserInterpreter"/> is created to
-/// parse the input.). Any user-defined fields you have put into the lexer might
-/// get changed when this mechanism asks it to scan the pattern string. <p/>
-/// Normally a parser does not accept token {@code <expr>} as a valid
-/// {@code expr} but, from the parser passed in, we create a special version of
-/// the underlying grammar representation (an <seealso cref="ATN"/>) that allows
-/// imaginary tokens representing rules ({@code <expr>}) to match entire rules.
-/// We call these <em>bypass alternatives</em>. <p/> Delimiters are {@code <}
-/// and {@code >}, with {@code \} as the escape string by default, but you can
-/// set them to whatever you want using <seealso cref="#setDelimiters"/>. You
-/// must escape both start and stop strings
-/// {@code \<} and {@code \>}.
-/// </summary>
-class ANTLR4CPP_PUBLIC ParseTreePatternMatcher {
- public:
- class CannotInvokeStartRule : public RuntimeException {
- public:
- CannotInvokeStartRule(const RuntimeException& e);
- ~CannotInvokeStartRule();
- };
-
- // Fixes https://github.com/antlr/antlr4/issues/413
- // "Tree pattern compilation doesn't check for a complete parse"
- class StartRuleDoesNotConsumeFullPattern : public RuntimeException {
- public:
- StartRuleDoesNotConsumeFullPattern() = default;
- StartRuleDoesNotConsumeFullPattern(
- StartRuleDoesNotConsumeFullPattern const&) = default;
- ~StartRuleDoesNotConsumeFullPattern();
-
- StartRuleDoesNotConsumeFullPattern& operator=(
- StartRuleDoesNotConsumeFullPattern const&) = default;
- };
-
- /// Constructs a <seealso cref="ParseTreePatternMatcher"/> or from a <seealso
- /// cref="Lexer"/> and <seealso cref="Parser"/> object. The lexer input stream
- /// is altered for tokenizing the tree patterns. The parser is used as a
- /// convenient mechanism to get the grammar name, plus token, rule names.
- ParseTreePatternMatcher(Lexer* lexer, Parser* parser);
- virtual ~ParseTreePatternMatcher();
-
- /// <summary>
- /// Set the delimiters used for marking rule and token tags within concrete
- /// syntax used by the tree pattern parser.
- /// </summary>
- /// <param name="start"> The start delimiter. </param>
- /// <param name="stop"> The stop delimiter. </param>
- /// <param name="escapeLeft"> The escape sequence to use for escaping a start
- /// or stop delimiter.
- /// </param>
- /// <exception cref="IllegalArgumentException"> if {@code start} is {@code
- /// null} or empty. </exception> <exception cref="IllegalArgumentException">
- /// if {@code stop} is {@code null} or empty. </exception>
- virtual void setDelimiters(const std::string& start, const std::string& stop,
- const std::string& escapeLeft);
-
- /// <summary>
- /// Does {@code pattern} matched as rule {@code patternRuleIndex} match {@code
- /// tree}? </summary>
- virtual bool matches(ParseTree* tree, const std::string& pattern,
- int patternRuleIndex);
-
- /// <summary>
- /// Does {@code pattern} matched as rule patternRuleIndex match tree? Pass in
- /// a
- /// compiled pattern instead of a string representation of a tree pattern.
- /// </summary>
- virtual bool matches(ParseTree* tree, const ParseTreePattern& pattern);
-
- /// <summary>
- /// Compare {@code pattern} matched as rule {@code patternRuleIndex} against
- /// {@code tree} and return a <seealso cref="ParseTreeMatch"/> object that
- /// contains the matched elements, or the node at which the match failed.
- /// </summary>
- virtual ParseTreeMatch match(ParseTree* tree, const std::string& pattern,
- int patternRuleIndex);
-
- /// <summary>
- /// Compare {@code pattern} matched against {@code tree} and return a
- /// <seealso cref="ParseTreeMatch"/> object that contains the matched
- /// elements, or the node at which the match failed. Pass in a compiled
- /// pattern instead of a string representation of a tree pattern.
- /// </summary>
- virtual ParseTreeMatch match(ParseTree* tree,
- const ParseTreePattern& pattern);
-
- /// <summary>
- /// For repeated use of a tree pattern, compile it to a
- /// <seealso cref="ParseTreePattern"/> using this method.
- /// </summary>
- virtual ParseTreePattern compile(const std::string& pattern,
- int patternRuleIndex);
-
- /// <summary>
- /// Used to convert the tree pattern string into a series of tokens. The
- /// input stream is reset.
- /// </summary>
- virtual Lexer* getLexer();
-
- /// <summary>
- /// Used to collect to the grammar file name, token names, rule names for
- /// used to parse the pattern into a parse tree.
- /// </summary>
- virtual Parser* getParser();
-
- // ---- SUPPORT CODE ----
-
- virtual std::vector<std::unique_ptr<Token>> tokenize(
- const std::string& pattern);
-
- /// Split "<ID> = <e:expr>;" into 4 chunks for tokenizing by tokenize().
- virtual std::vector<Chunk> split(const std::string& pattern);
-
- protected:
- std::string _start;
- std::string _stop;
- std::string _escape; // e.g., \< and \> must escape BOTH!
-
- /// Recursively walk {@code tree} against {@code patternTree}, filling
- /// {@code match.}<seealso cref="ParseTreeMatch#labels labels"/>.
- ///
- /// <returns> the first node encountered in {@code tree} which does not match
- /// a corresponding node in {@code patternTree}, or {@code null} if the match
- /// was successful. The specific node returned depends on the matching
- /// algorithm used by the implementation, and may be overridden. </returns>
- virtual ParseTree* matchImpl(
- ParseTree* tree, ParseTree* patternTree,
- std::map<std::string, std::vector<ParseTree*>>& labels);
-
- /// Is t <expr> subtree?
- virtual RuleTagToken* getRuleTagToken(ParseTree* t);
-
- private:
- Lexer* _lexer;
- Parser* _parser;
-
- void InitializeInstanceFields();
-};
-
-} // namespace pattern
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.cpp
deleted file mode 100644
index 002e54ed39..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.cpp
+++ /dev/null
@@ -1,57 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-
-#include "tree/pattern/RuleTagToken.h"
-
-using namespace antlr4::tree::pattern;
-
-RuleTagToken::RuleTagToken(const std::string& /*ruleName*/,
- int _bypassTokenType)
- : bypassTokenType(_bypassTokenType) {}
-
-RuleTagToken::RuleTagToken(const std::string& ruleName, size_t bypassTokenType,
- const std::string& label)
- : ruleName(ruleName), bypassTokenType(bypassTokenType), label(label) {
- if (ruleName.empty()) {
- throw IllegalArgumentException("ruleName cannot be null or empty.");
- }
-}
-
-std::string RuleTagToken::getRuleName() const { return ruleName; }
-
-std::string RuleTagToken::getLabel() const { return label; }
-
-size_t RuleTagToken::getChannel() const { return DEFAULT_CHANNEL; }
-
-std::string RuleTagToken::getText() const {
- if (label != "") {
- return std::string("<") + label + std::string(":") + ruleName +
- std::string(">");
- }
-
- return std::string("<") + ruleName + std::string(">");
-}
-
-size_t RuleTagToken::getType() const { return bypassTokenType; }
-
-size_t RuleTagToken::getLine() const { return 0; }
-
-size_t RuleTagToken::getCharPositionInLine() const { return INVALID_INDEX; }
-
-size_t RuleTagToken::getTokenIndex() const { return INVALID_INDEX; }
-
-size_t RuleTagToken::getStartIndex() const { return INVALID_INDEX; }
-
-size_t RuleTagToken::getStopIndex() const { return INVALID_INDEX; }
-
-antlr4::TokenSource* RuleTagToken::getTokenSource() const { return nullptr; }
-
-antlr4::CharStream* RuleTagToken::getInputStream() const { return nullptr; }
-
-std::string RuleTagToken::toString() const {
- return ruleName + ":" + std::to_string(bypassTokenType);
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.h
deleted file mode 100644
index 6a09949539..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/RuleTagToken.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Token.h"
-
-namespace antlr4 {
-namespace tree {
-namespace pattern {
-
-/// <summary>
-/// A <seealso cref="Token"/> object representing an entire subtree matched by a
-/// parser rule; e.g., {@code <expr>}. These tokens are created for <seealso
-/// cref="TagChunk"/> chunks where the tag corresponds to a parser rule.
-/// </summary>
-class ANTLR4CPP_PUBLIC RuleTagToken : public Token {
- /// <summary>
- /// This is the backing field for <seealso cref="#getRuleName"/>.
- /// </summary>
- private:
- const std::string ruleName;
-
- /// The token type for the current token. This is the token type assigned to
- /// the bypass alternative for the rule during ATN deserialization.
- const size_t bypassTokenType;
-
- /// This is the backing field for <seealso cref="#getLabe"/>.
- const std::string label;
-
- public:
- /// <summary>
- /// Constructs a new instance of <seealso cref="RuleTagToken"/> with the
- /// specified rule name and bypass token type and no label.
- /// </summary>
- /// <param name="ruleName"> The name of the parser rule this rule tag matches.
- /// </param> <param name="bypassTokenType"> The bypass token type assigned to
- /// the parser rule.
- /// </param>
- /// <exception cref="IllegalArgumentException"> if {@code ruleName} is {@code
- /// null} or empty. </exception>
- RuleTagToken(
- const std::string& ruleName,
- int bypassTokenType); // this(ruleName, bypassTokenType, nullptr);
-
- /// <summary>
- /// Constructs a new instance of <seealso cref="RuleTagToken"/> with the
- /// specified rule name, bypass token type, and label.
- /// </summary>
- /// <param name="ruleName"> The name of the parser rule this rule tag matches.
- /// </param> <param name="bypassTokenType"> The bypass token type assigned to
- /// the parser rule. </param> <param name="label"> The label associated with
- /// the rule tag, or {@code null} if the rule tag is unlabeled.
- /// </param>
- /// <exception cref="IllegalArgumentException"> if {@code ruleName} is {@code
- /// null} or empty. </exception>
- RuleTagToken(const std::string& ruleName, size_t bypassTokenType,
- const std::string& label);
-
- /// <summary>
- /// Gets the name of the rule associated with this rule tag.
- /// </summary>
- /// <returns> The name of the parser rule associated with this rule tag.
- /// </returns>
- std::string getRuleName() const;
-
- /// <summary>
- /// Gets the label associated with the rule tag.
- /// </summary>
- /// <returns> The name of the label associated with the rule tag, or
- /// {@code null} if this is an unlabeled rule tag. </returns>
- std::string getLabel() const;
-
- /// <summary>
- /// {@inheritDoc}
- /// <p/>
- /// Rule tag tokens are always placed on the <seealso
- /// cref="#DEFAULT_CHANNE"/>.
- /// </summary>
- virtual size_t getChannel() const override;
-
- /// <summary>
- /// {@inheritDoc}
- /// <p/>
- /// This method returns the rule tag formatted with {@code <} and {@code >}
- /// delimiters.
- /// </summary>
- virtual std::string getText() const override;
-
- /// Rule tag tokens have types assigned according to the rule bypass
- /// transitions created during ATN deserialization.
- virtual size_t getType() const override;
-
- /// The implementation for <seealso cref="RuleTagToken"/> always returns 0.
- virtual size_t getLine() const override;
-
- /// The implementation for <seealso cref="RuleTagToken"/> always returns
- /// INVALID_INDEX.
- virtual size_t getCharPositionInLine() const override;
-
- /// The implementation for <seealso cref="RuleTagToken"/> always returns
- /// INVALID_INDEX.
- virtual size_t getTokenIndex() const override;
-
- /// The implementation for <seealso cref="RuleTagToken"/> always returns
- /// INVALID_INDEX.
- virtual size_t getStartIndex() const override;
-
- /// The implementation for <seealso cref="RuleTagToken"/> always returns
- /// INVALID_INDEX.
- virtual size_t getStopIndex() const override;
-
- /// The implementation for <seealso cref="RuleTagToken"/> always returns
- /// {@code null}.
- virtual TokenSource* getTokenSource() const override;
-
- /// The implementation for <seealso cref="RuleTagToken"/> always returns
- /// {@code null}.
- virtual CharStream* getInputStream() const override;
-
- /// The implementation for <seealso cref="RuleTagToken"/> returns a string of
- /// the form {@code ruleName:bypassTokenType}.
- virtual std::string toString() const override;
-};
-
-} // namespace pattern
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TagChunk.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TagChunk.cpp
deleted file mode 100644
index 232581d16c..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TagChunk.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-
-#include "tree/pattern/TagChunk.h"
-
-using namespace antlr4::tree::pattern;
-
-TagChunk::TagChunk(const std::string& tag) : TagChunk("", tag) {}
-
-TagChunk::TagChunk(const std::string& label, const std::string& tag)
- : _tag(tag), _label(label) {
- if (tag.empty()) {
- throw IllegalArgumentException("tag cannot be null or empty");
- }
-}
-
-TagChunk::~TagChunk() {}
-
-std::string TagChunk::getTag() { return _tag; }
-
-std::string TagChunk::getLabel() { return _label; }
-
-std::string TagChunk::toString() {
- if (!_label.empty()) {
- return _label + ":" + _tag;
- }
-
- return _tag;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TagChunk.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TagChunk.h
deleted file mode 100644
index 3099f6cfe0..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TagChunk.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Chunk.h"
-
-namespace antlr4 {
-namespace tree {
-namespace pattern {
-
-/// <summary>
-/// Represents a placeholder tag in a tree pattern. A tag can have any of the
-/// following forms.
-///
-/// <ul>
-/// <li>{@code expr}: An unlabeled placeholder for a parser rule {@code
-/// expr}.</li> <li>{@code ID}: An unlabeled placeholder for a token of type
-/// {@code ID}.</li> <li>{@code e:expr}: A labeled placeholder for a parser rule
-/// {@code expr}.</li> <li>{@code id:ID}: A labeled placeholder for a token of
-/// type {@code ID}.</li>
-/// </ul>
-///
-/// This class does not perform any validation on the tag or label names aside
-/// from ensuring that the tag is a non-null, non-empty string.
-/// </summary>
-class ANTLR4CPP_PUBLIC TagChunk : public Chunk {
- public:
- /// <summary>
- /// Construct a new instance of <seealso cref="TagChunk"/> using the specified
- /// tag and no label.
- /// </summary>
- /// <param name="tag"> The tag, which should be the name of a parser rule or
- /// token type.
- /// </param>
- /// <exception cref="IllegalArgumentException"> if {@code tag} is {@code null}
- /// or empty. </exception>
- TagChunk(const std::string& tag);
- virtual ~TagChunk();
-
- /// <summary>
- /// Construct a new instance of <seealso cref="TagChunk"/> using the specified
- /// label and tag.
- /// </summary>
- /// <param name="label"> The label for the tag. If this is {@code null}, the
- /// <seealso cref="TagChunk"/> represents an unlabeled tag. </param>
- /// <param name="tag"> The tag, which should be the name of a parser rule or
- /// token type.
- /// </param>
- /// <exception cref="IllegalArgumentException"> if {@code tag} is {@code null}
- /// or empty. </exception>
- TagChunk(const std::string& label, const std::string& tag);
-
- /// <summary>
- /// Get the tag for this chunk.
- /// </summary>
- /// <returns> The tag for the chunk. </returns>
- std::string getTag();
-
- /// <summary>
- /// Get the label, if any, assigned to this chunk.
- /// </summary>
- /// <returns> The label assigned to this chunk, or {@code null} if no label is
- /// assigned to the chunk. </returns>
- std::string getLabel();
-
- /// <summary>
- /// This method returns a text representation of the tag chunk. Labeled tags
- /// are returned in the form {@code label:tag}, and unlabeled tags are
- /// returned as just the tag name.
- /// </summary>
- virtual std::string toString();
-
- private:
- /// This is the backing field for <seealso cref="#getTag"/>.
- const std::string _tag;
- /// <summary>
- /// This is the backing field for <seealso cref="#getLabe"/>.
- /// </summary>
- const std::string _label;
-};
-
-} // namespace pattern
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TextChunk.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TextChunk.cpp
deleted file mode 100644
index c6c8d278bb..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TextChunk.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-
-#include "tree/pattern/TextChunk.h"
-
-using namespace antlr4::tree::pattern;
-
-TextChunk::TextChunk(const std::string& text) : text(text) {
- if (text == "") {
- throw IllegalArgumentException("text cannot be nul");
- }
-}
-
-TextChunk::~TextChunk() {}
-
-std::string TextChunk::getText() { return text; }
-
-std::string TextChunk::toString() {
- return std::string("'") + text + std::string("'");
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TextChunk.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TextChunk.h
deleted file mode 100644
index 7a9686cb90..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TextChunk.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Chunk.h"
-
-namespace antlr4 {
-namespace tree {
-namespace pattern {
-
-/// <summary>
-/// Represents a span of raw text (concrete syntax) between tags in a tree
-/// pattern string.
-/// </summary>
-class ANTLR4CPP_PUBLIC TextChunk : public Chunk {
- private:
- /// <summary>
- /// This is the backing field for <seealso cref="#getText"/>.
- /// </summary>
- const std::string text;
-
- /// <summary>
- /// Constructs a new instance of <seealso cref="TextChunk"/> with the
- /// specified text.
- /// </summary>
- /// <param name="text"> The text of this chunk. </param>
- /// <exception cref="IllegalArgumentException"> if {@code text} is {@code
- /// null}. </exception>
- public:
- TextChunk(const std::string& text);
- virtual ~TextChunk();
-
- /// <summary>
- /// Gets the raw text of this chunk.
- /// </summary>
- /// <returns> The text of the chunk. </returns>
- std::string getText();
-
- /// <summary>
- /// {@inheritDoc}
- /// <p/>
- /// The implementation for <seealso cref="TextChunk"/> returns the result of
- /// <seealso cref="#getText()"/> in single quotes.
- /// </summary>
- virtual std::string toString();
-};
-
-} // namespace pattern
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.cpp
deleted file mode 100644
index 142aeb75b3..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/pattern/TokenTagToken.h"
-
-using namespace antlr4::tree::pattern;
-
-TokenTagToken::TokenTagToken(const std::string& /*tokenName*/, int type)
- : CommonToken(type), tokenName(""), label("") {}
-
-TokenTagToken::TokenTagToken(const std::string& tokenName, int type,
- const std::string& label)
- : CommonToken(type), tokenName(tokenName), label(label) {}
-
-std::string TokenTagToken::getTokenName() const { return tokenName; }
-
-std::string TokenTagToken::getLabel() const { return label; }
-
-std::string TokenTagToken::getText() const {
- if (!label.empty()) {
- return "<" + label + ":" + tokenName + ">";
- }
-
- return "<" + tokenName + ">";
-}
-
-std::string TokenTagToken::toString() const {
- return tokenName + ":" + std::to_string(_type);
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.h
deleted file mode 100644
index d0b2671cd0..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/pattern/TokenTagToken.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "CommonToken.h"
-
-namespace antlr4 {
-namespace tree {
-namespace pattern {
-
-/// <summary>
-/// A <seealso cref="Token"/> object representing a token of a particular type;
-/// e.g.,
-/// {@code <ID>}. These tokens are created for <seealso cref="TagChunk"/> chunks
-/// where the tag corresponds to a lexer rule or token type.
-/// </summary>
-class ANTLR4CPP_PUBLIC TokenTagToken : public CommonToken {
- /// <summary>
- /// This is the backing field for <seealso cref="#getTokenName"/>.
- /// </summary>
- private:
- const std::string tokenName;
- /// <summary>
- /// This is the backing field for <seealso cref="#getLabe"/>.
- /// </summary>
- const std::string label;
-
- /// <summary>
- /// Constructs a new instance of <seealso cref="TokenTagToken"/> for an
- /// unlabeled tag with the specified token name and type.
- /// </summary>
- /// <param name="tokenName"> The token name. </param>
- /// <param name="type"> The token type. </param>
- public:
- TokenTagToken(const std::string& tokenName,
- int type); // this(tokenName, type, nullptr);
-
- /// <summary>
- /// Constructs a new instance of <seealso cref="TokenTagToken"/> with the
- /// specified token name, type, and label.
- /// </summary>
- /// <param name="tokenName"> The token name. </param>
- /// <param name="type"> The token type. </param>
- /// <param name="label"> The label associated with the token tag, or {@code
- /// null} if the token tag is unlabeled. </param>
- TokenTagToken(const std::string& tokenName, int type,
- const std::string& label);
-
- /// <summary>
- /// Gets the token name. </summary>
- /// <returns> The token name. </returns>
- std::string getTokenName() const;
-
- /// <summary>
- /// Gets the label associated with the rule tag.
- /// </summary>
- /// <returns> The name of the label associated with the rule tag, or
- /// {@code null} if this is an unlabeled rule tag. </returns>
- std::string getLabel() const;
-
- /// <summary>
- /// {@inheritDoc}
- /// <p/>
- /// The implementation for <seealso cref="TokenTagToken"/> returns the token
- /// tag formatted with {@code <} and {@code >} delimiters.
- /// </summary>
- virtual std::string getText() const override;
-
- /// <summary>
- /// {@inheritDoc}
- /// <p/>
- /// The implementation for <seealso cref="TokenTagToken"/> returns a string of
- /// the form
- /// {@code tokenName:type}.
- /// </summary>
- virtual std::string toString() const override;
-};
-
-} // namespace pattern
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPath.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPath.cpp
deleted file mode 100644
index 1f779cbc1e..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPath.cpp
+++ /dev/null
@@ -1,148 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "XPathElement.h"
-#include "XPathLexer.h"
-#include "XPathLexerErrorListener.h"
-#include "XPathRuleAnywhereElement.h"
-#include "XPathRuleElement.h"
-#include "XPathTokenAnywhereElement.h"
-#include "XPathTokenElement.h"
-#include "XPathWildcardAnywhereElement.h"
-#include "XPathWildcardElement.h"
-
-#include "XPath.h"
-
-using namespace antlr4;
-using namespace antlr4::tree;
-using namespace antlr4::tree::xpath;
-
-const std::string XPath::WILDCARD = "*";
-const std::string XPath::NOT = "!";
-
-XPath::XPath(Parser* parser, const std::string& path) {
- _parser = parser;
- _path = path;
- _elements = split(path);
-}
-
-std::vector<XPathElement> XPath::split(const std::string& path) {
- ANTLRFileStream in(path);
- XPathLexer lexer(&in);
- lexer.removeErrorListeners();
- XPathLexerErrorListener listener;
- lexer.addErrorListener(&listener);
- CommonTokenStream tokenStream(&lexer);
- try {
- tokenStream.fill();
- } catch (LexerNoViableAltException&) {
- size_t pos = lexer.getCharPositionInLine();
- std::string msg = "Invalid tokens or characters at index " +
- std::to_string(pos) + " in path '" + path + "'";
- throw IllegalArgumentException(msg);
- }
-
- std::vector<Token*> tokens = tokenStream.getTokens();
- std::vector<XPathElement> elements;
- size_t n = tokens.size();
- size_t i = 0;
- bool done = false;
- while (!done && i < n) {
- Token* el = tokens[i];
- Token* next = nullptr;
- switch (el->getType()) {
- case XPathLexer::ROOT:
- case XPathLexer::ANYWHERE: {
- bool anywhere = el->getType() == XPathLexer::ANYWHERE;
- i++;
- next = tokens[i];
- bool invert = next->getType() == XPathLexer::BANG;
- if (invert) {
- i++;
- next = tokens[i];
- }
- XPathElement pathElement = getXPathElement(next, anywhere);
- pathElement.setInvert(invert);
- elements.push_back(pathElement);
- i++;
- break;
- }
- case XPathLexer::TOKEN_REF:
- case XPathLexer::RULE_REF:
- case XPathLexer::WILDCARD:
- elements.push_back(getXPathElement(el, false));
- i++;
- break;
-
- case Token::EOF:
- done = true;
- break;
-
- default:
- throw IllegalArgumentException("Unknow path element " + el->toString());
- }
- }
-
- return elements;
-}
-
-XPathElement XPath::getXPathElement(Token* wordToken, bool anywhere) {
- if (wordToken->getType() == Token::EOF) {
- throw IllegalArgumentException("Missing path element at end of path");
- }
- std::string word = wordToken->getText();
- size_t ttype = _parser->getTokenType(word);
- ssize_t ruleIndex = _parser->getRuleIndex(word);
- switch (wordToken->getType()) {
- case XPathLexer::WILDCARD:
- if (anywhere) return XPathWildcardAnywhereElement();
- return XPathWildcardElement();
-
- case XPathLexer::TOKEN_REF:
- case XPathLexer::STRING:
- if (ttype == Token::INVALID_TYPE) {
- throw IllegalArgumentException(
- word + " at index " + std::to_string(wordToken->getStartIndex()) +
- " isn't a valid token name");
- }
- if (anywhere) return XPathTokenAnywhereElement(word, (int)ttype);
- return XPathTokenElement(word, (int)ttype);
-
- default:
- if (ruleIndex == -1) {
- throw IllegalArgumentException(
- word + " at index " + std::to_string(wordToken->getStartIndex()) +
- " isn't a valid rule name");
- }
- if (anywhere) return XPathRuleAnywhereElement(word, (int)ruleIndex);
- return XPathRuleElement(word, (int)ruleIndex);
- }
-}
-
-static ParserRuleContext dummyRoot;
-
-std::vector<ParseTree*> XPath::evaluate(ParseTree* t) {
- dummyRoot.children = {t}; // don't set t's parent.
-
- std::vector<ParseTree*> work = {&dummyRoot};
-
- size_t i = 0;
- while (i < _elements.size()) {
- std::vector<ParseTree*> next;
- for (auto node : work) {
- if (!node->children.empty()) {
- // only try to match next element if it has children
- // e.g., //func/*/stat might have a token node for which
- // we can't go looking for stat nodes.
- auto matching = _elements[i].evaluate(node);
- next.insert(next.end(), matching.begin(), matching.end());
- }
- }
- i++;
- work = next;
- }
-
- return work;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPath.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPath.h
deleted file mode 100644
index 3863181538..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPath.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace tree {
-namespace xpath {
-
-/// Represent a subset of XPath XML path syntax for use in identifying nodes in
-/// parse trees.
-///
-/// <para>
-/// Split path into words and separators {@code /} and {@code //} via ANTLR
-/// itself then walk path elements from left to right. At each separator-word
-/// pair, find set of nodes. Next stage uses those as work list.</para>
-///
-/// <para>
-/// The basic interface is
-/// <seealso cref="XPath#findAll ParseTree.findAll"/>{@code (tree, pathString,
-/// parser)}. But that is just shorthand for:</para>
-///
-/// <pre>
-/// <seealso cref="XPath"/> p = new <seealso cref="XPath#XPath XPath"/>(parser,
-/// pathString); return p.<seealso cref="#evaluate evaluate"/>(tree);
-/// </pre>
-///
-/// <para>
-/// See {@code org.antlr.v4.test.TestXPath} for descriptions. In short, this
-/// allows operators:</para>
-///
-/// <dl>
-/// <dt>/</dt> <dd>root</dd>
-/// <dt>//</dt> <dd>anywhere</dd>
-/// <dt>!</dt> <dd>invert; this must appear directly after root or anywhere
-/// operator</dd>
-/// </dl>
-///
-/// <para>
-/// and path elements:</para>
-///
-/// <dl>
-/// <dt>ID</dt> <dd>token name</dd>
-/// <dt>'string'</dt> <dd>any string literal token from the grammar</dd>
-/// <dt>expr</dt> <dd>rule name</dd>
-/// <dt>*</dt> <dd>wildcard matching any node</dd>
-/// </dl>
-///
-/// <para>
-/// Whitespace is not allowed.</para>
-
-class ANTLR4CPP_PUBLIC XPath {
- public:
- static const std::string WILDCARD; // word not operator/separator
- static const std::string NOT; // word for invert operator
-
- XPath(Parser* parser, const std::string& path);
- virtual ~XPath() {}
-
- // TO_DO: check for invalid token/rule names, bad syntax
- virtual std::vector<XPathElement> split(const std::string& path);
-
- /// Return a list of all nodes starting at {@code t} as root that satisfy the
- /// path. The root {@code /} is relative to the node passed to
- /// <seealso cref="#evaluate"/>.
- virtual std::vector<ParseTree*> evaluate(ParseTree* t);
-
- protected:
- std::string _path;
- std::vector<XPathElement> _elements;
- Parser* _parser;
-
- /// Convert word like {@code *} or {@code ID} or {@code expr} to a path
- /// element. {@code anywhere} is {@code true} if {@code //} precedes the
- /// word.
- virtual XPathElement getXPathElement(Token* wordToken, bool anywhere);
-};
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathElement.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathElement.cpp
deleted file mode 100644
index 43e8193419..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathElement.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "support/CPPUtils.h"
-
-#include "XPathElement.h"
-
-using namespace antlr4::tree;
-using namespace antlr4::tree::xpath;
-
-XPathElement::XPathElement(const std::string& nodeName) {
- _nodeName = nodeName;
-}
-
-XPathElement::~XPathElement() {}
-
-std::vector<ParseTree*> XPathElement::evaluate(ParseTree* /*t*/) { return {}; }
-
-std::string XPathElement::toString() const {
- std::string inv = _invert ? "!" : "";
- return antlrcpp::toString(*this) + "[" + inv + _nodeName + "]";
-}
-
-void XPathElement::setInvert(bool value) { _invert = value; }
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathElement.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathElement.h
deleted file mode 100644
index bf3ccb5fc7..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathElement.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace tree {
-class ParseTree;
-
-namespace xpath {
-
-class ANTLR4CPP_PUBLIC XPathElement {
- public:
- /// Construct element like {@code /ID} or {@code ID} or {@code /*} etc...
- /// op is null if just node
- XPathElement(const std::string& nodeName);
- XPathElement(XPathElement const&) = default;
- virtual ~XPathElement();
-
- XPathElement& operator=(XPathElement const&) = default;
-
- /// Given tree rooted at {@code t} return all nodes matched by this path
- /// element.
- virtual std::vector<ParseTree*> evaluate(ParseTree* t);
- virtual std::string toString() const;
-
- void setInvert(bool value);
-
- protected:
- std::string _nodeName;
- bool _invert = false;
-};
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.cpp
deleted file mode 100644
index 8c8ce18cad..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.cpp
+++ /dev/null
@@ -1,177 +0,0 @@
-#include "XPathLexer.h"
-
-using namespace antlr4;
-
-XPathLexer::XPathLexer(CharStream* input) : Lexer(input) {
- _interpreter = new atn::LexerATNSimulator(this, _atn, _decisionToDFA,
- _sharedContextCache);
-}
-
-XPathLexer::~XPathLexer() { delete _interpreter; }
-
-std::string XPathLexer::getGrammarFileName() const { return "XPathLexer.g4"; }
-
-const std::vector<std::string>& XPathLexer::getRuleNames() const {
- return _ruleNames;
-}
-
-const std::vector<std::string>& XPathLexer::getChannelNames() const {
- return _channelNames;
-}
-
-const std::vector<std::string>& XPathLexer::getModeNames() const {
- return _modeNames;
-}
-
-const std::vector<std::string>& XPathLexer::getTokenNames() const {
- return _tokenNames;
-}
-
-dfa::Vocabulary& XPathLexer::getVocabulary() const { return _vocabulary; }
-
-const std::vector<uint16_t> XPathLexer::getSerializedATN() const {
- return _serializedATN;
-}
-
-const atn::ATN& XPathLexer::getATN() const { return _atn; }
-
-void XPathLexer::action(RuleContext* context, size_t ruleIndex,
- size_t actionIndex) {
- switch (ruleIndex) {
- case 4:
- IDAction(dynamic_cast<antlr4::RuleContext*>(context), actionIndex);
- break;
-
- default:
- break;
- }
-}
-
-void XPathLexer::IDAction(antlr4::RuleContext* /*context*/,
- size_t actionIndex) {
- switch (actionIndex) {
- case 0:
- if (isupper(getText()[0]))
- setType(TOKEN_REF);
- else
- setType(RULE_REF);
- break;
-
- default:
- break;
- }
-}
-
-// Static vars and initialization.
-std::vector<dfa::DFA> XPathLexer::_decisionToDFA;
-atn::PredictionContextCache XPathLexer::_sharedContextCache;
-
-// We own the ATN which in turn owns the ATN states.
-atn::ATN XPathLexer::_atn;
-std::vector<uint16_t> XPathLexer::_serializedATN;
-
-std::vector<std::string> XPathLexer::_ruleNames = {
- "ANYWHERE", "ROOT", "WILDCARD", "BANG",
- "ID", "NameChar", "NameStartChar", "STRING"};
-
-std::vector<std::string> XPathLexer::_channelNames = {"DEFAULT_TOKEN_CHANNEL",
- "HIDDEN"};
-
-std::vector<std::string> XPathLexer::_modeNames = {"DEFAULT_MODE"};
-
-std::vector<std::string> XPathLexer::_literalNames = {
- "", "", "", "'//'", "'/'", "'*'", "'!'"};
-
-std::vector<std::string> XPathLexer::_symbolicNames = {
- "", "TOKEN_REF", "RULE_REF", "ANYWHERE", "ROOT",
- "WILDCARD", "BANG", "ID", "STRING"};
-
-dfa::Vocabulary XPathLexer::_vocabulary(_literalNames, _symbolicNames);
-
-std::vector<std::string> XPathLexer::_tokenNames;
-
-XPathLexer::Initializer::Initializer() {
- // This code could be in a static initializer lambda, but VS doesn't allow
- // access to private class members from there.
- for (size_t i = 0; i < _symbolicNames.size(); ++i) {
- std::string name = _vocabulary.getLiteralName(i);
- if (name.empty()) {
- name = _vocabulary.getSymbolicName(i);
- }
-
- if (name.empty()) {
- _tokenNames.push_back("<INVALID>");
- } else {
- _tokenNames.push_back(name);
- }
- }
-
- _serializedATN = {
- 0x3, 0x430, 0xd6d1, 0x8206, 0xad2d, 0x4417, 0xaef1, 0x8d80, 0xaadd,
- 0x2, 0xa, 0x34, 0x8, 0x1, 0x4, 0x2, 0x9, 0x2,
- 0x4, 0x3, 0x9, 0x3, 0x4, 0x4, 0x9, 0x4, 0x4,
- 0x5, 0x9, 0x5, 0x4, 0x6, 0x9, 0x6, 0x4, 0x7,
- 0x9, 0x7, 0x4, 0x8, 0x9, 0x8, 0x4, 0x9, 0x9,
- 0x9, 0x3, 0x2, 0x3, 0x2, 0x3, 0x2, 0x3, 0x3,
- 0x3, 0x3, 0x3, 0x4, 0x3, 0x4, 0x3, 0x5, 0x3,
- 0x5, 0x3, 0x6, 0x3, 0x6, 0x7, 0x6, 0x1f, 0xa,
- 0x6, 0xc, 0x6, 0xe, 0x6, 0x22, 0xb, 0x6, 0x3,
- 0x6, 0x3, 0x6, 0x3, 0x7, 0x3, 0x7, 0x5, 0x7,
- 0x28, 0xa, 0x7, 0x3, 0x8, 0x3, 0x8, 0x3, 0x9,
- 0x3, 0x9, 0x7, 0x9, 0x2e, 0xa, 0x9, 0xc, 0x9,
- 0xe, 0x9, 0x31, 0xb, 0x9, 0x3, 0x9, 0x3, 0x9,
- 0x3, 0x2f, 0x2, 0xa, 0x3, 0x5, 0x5, 0x6, 0x7,
- 0x7, 0x9, 0x8, 0xb, 0x9, 0xd, 0x2, 0xf, 0x2,
- 0x11, 0xa, 0x3, 0x2, 0x4, 0x7, 0x2, 0x32, 0x3b,
- 0x61, 0x61, 0xb9, 0xb9, 0x302, 0x371, 0x2041, 0x2042, 0xf,
- 0x2, 0x43, 0x5c, 0x63, 0x7c, 0xc2, 0xd8, 0xda, 0xf8,
- 0xfa, 0x301, 0x372, 0x37f, 0x381, 0x2001, 0x200e, 0x200f, 0x2072,
- 0x2191, 0x2c02, 0x2ff1, 0x3003, 0xd801, 0xf902, 0xfdd1, 0xfdf2, 0x1,
- 0x34, 0x2, 0x3, 0x3, 0x2, 0x2, 0x2, 0x2, 0x5,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x7, 0x3, 0x2, 0x2,
- 0x2, 0x2, 0x9, 0x3, 0x2, 0x2, 0x2, 0x2, 0xb,
- 0x3, 0x2, 0x2, 0x2, 0x2, 0x11, 0x3, 0x2, 0x2,
- 0x2, 0x3, 0x13, 0x3, 0x2, 0x2, 0x2, 0x5, 0x16,
- 0x3, 0x2, 0x2, 0x2, 0x7, 0x18, 0x3, 0x2, 0x2,
- 0x2, 0x9, 0x1a, 0x3, 0x2, 0x2, 0x2, 0xb, 0x1c,
- 0x3, 0x2, 0x2, 0x2, 0xd, 0x27, 0x3, 0x2, 0x2,
- 0x2, 0xf, 0x29, 0x3, 0x2, 0x2, 0x2, 0x11, 0x2b,
- 0x3, 0x2, 0x2, 0x2, 0x13, 0x14, 0x7, 0x31, 0x2,
- 0x2, 0x14, 0x15, 0x7, 0x31, 0x2, 0x2, 0x15, 0x4,
- 0x3, 0x2, 0x2, 0x2, 0x16, 0x17, 0x7, 0x31, 0x2,
- 0x2, 0x17, 0x6, 0x3, 0x2, 0x2, 0x2, 0x18, 0x19,
- 0x7, 0x2c, 0x2, 0x2, 0x19, 0x8, 0x3, 0x2, 0x2,
- 0x2, 0x1a, 0x1b, 0x7, 0x23, 0x2, 0x2, 0x1b, 0xa,
- 0x3, 0x2, 0x2, 0x2, 0x1c, 0x20, 0x5, 0xf, 0x8,
- 0x2, 0x1d, 0x1f, 0x5, 0xd, 0x7, 0x2, 0x1e, 0x1d,
- 0x3, 0x2, 0x2, 0x2, 0x1f, 0x22, 0x3, 0x2, 0x2,
- 0x2, 0x20, 0x1e, 0x3, 0x2, 0x2, 0x2, 0x20, 0x21,
- 0x3, 0x2, 0x2, 0x2, 0x21, 0x23, 0x3, 0x2, 0x2,
- 0x2, 0x22, 0x20, 0x3, 0x2, 0x2, 0x2, 0x23, 0x24,
- 0x8, 0x6, 0x2, 0x2, 0x24, 0xc, 0x3, 0x2, 0x2,
- 0x2, 0x25, 0x28, 0x5, 0xf, 0x8, 0x2, 0x26, 0x28,
- 0x9, 0x2, 0x2, 0x2, 0x27, 0x25, 0x3, 0x2, 0x2,
- 0x2, 0x27, 0x26, 0x3, 0x2, 0x2, 0x2, 0x28, 0xe,
- 0x3, 0x2, 0x2, 0x2, 0x29, 0x2a, 0x9, 0x3, 0x2,
- 0x2, 0x2a, 0x10, 0x3, 0x2, 0x2, 0x2, 0x2b, 0x2f,
- 0x7, 0x29, 0x2, 0x2, 0x2c, 0x2e, 0xb, 0x2, 0x2,
- 0x2, 0x2d, 0x2c, 0x3, 0x2, 0x2, 0x2, 0x2e, 0x31,
- 0x3, 0x2, 0x2, 0x2, 0x2f, 0x30, 0x3, 0x2, 0x2,
- 0x2, 0x2f, 0x2d, 0x3, 0x2, 0x2, 0x2, 0x30, 0x32,
- 0x3, 0x2, 0x2, 0x2, 0x31, 0x2f, 0x3, 0x2, 0x2,
- 0x2, 0x32, 0x33, 0x7, 0x29, 0x2, 0x2, 0x33, 0x12,
- 0x3, 0x2, 0x2, 0x2, 0x6, 0x2, 0x20, 0x27, 0x2f,
- 0x3, 0x3, 0x6, 0x2,
- };
-
- atn::ATNDeserializer deserializer;
- _atn = deserializer.deserialize(_serializedATN);
-
- size_t count = _atn.getNumberOfDecisions();
- _decisionToDFA.reserve(count);
- for (size_t i = 0; i < count; i++) {
- _decisionToDFA.emplace_back(_atn.getDecisionState(i), i);
- }
-}
-
-XPathLexer::Initializer XPathLexer::_init;
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.g4 b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.g4
deleted file mode 100644
index 14bcf5ab73..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.g4
+++ /dev/null
@@ -1,64 +0,0 @@
-lexer grammar XPathLexer;
-
-tokens { TOKEN_REF, RULE_REF }
-
-/*
-path : separator? word (separator word)* EOF ;
-
-separator
- : '/' '!'
- | '//' '!'
- | '/'
- | '//'
- ;
-
-word: TOKEN_REF
- | RULE_REF
- | STRING
- | '*'
- ;
-*/
-
-ANYWHERE : '//' ;
-ROOT : '/' ;
-WILDCARD : '*' ;
-BANG : '!' ;
-
-ID : NameStartChar NameChar*
- {
- if (isupper(getText()[0]))
- setType(TOKEN_REF);
- else
- setType(RULE_REF);
- }
- ;
-
-fragment
-NameChar : NameStartChar
- | '0'..'9'
- | '_'
- | '\u00B7'
- | '\u0300'..'\u036F'
- | '\u203F'..'\u2040'
- ;
-
-fragment
-NameStartChar
- : 'A'..'Z' | 'a'..'z'
- | '\u00C0'..'\u00D6'
- | '\u00D8'..'\u00F6'
- | '\u00F8'..'\u02FF'
- | '\u0370'..'\u037D'
- | '\u037F'..'\u1FFF'
- | '\u200C'..'\u200D'
- | '\u2070'..'\u218F'
- | '\u2C00'..'\u2FEF'
- | '\u3001'..'\uD7FF'
- | '\uF900'..'\uFDCF'
- | '\uFDF0'..'\uFFFF' // implicitly includes ['\u10000-'\uEFFFF]
- ;
-
-STRING : '\'' .*? '\'';
-
-//WS : [ \t\r\n]+ -> skip ;
-
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.h
deleted file mode 100644
index ef62001f28..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.h
+++ /dev/null
@@ -1,59 +0,0 @@
-#pragma once
-
-#include "antlr4-runtime.h"
-
-class XPathLexer : public antlr4::Lexer {
- public:
- enum {
- TOKEN_REF = 1,
- RULE_REF = 2,
- ANYWHERE = 3,
- ROOT = 4,
- WILDCARD = 5,
- BANG = 6,
- ID = 7,
- STRING = 8
- };
-
- XPathLexer(antlr4::CharStream* input);
- ~XPathLexer();
-
- virtual std::string getGrammarFileName() const override;
- virtual const std::vector<std::string>& getRuleNames() const override;
-
- virtual const std::vector<std::string>& getChannelNames() const override;
- virtual const std::vector<std::string>& getModeNames() const override;
- virtual const std::vector<std::string>& getTokenNames()
- const override; // deprecated, use vocabulary instead
- virtual antlr4::dfa::Vocabulary& getVocabulary() const override;
-
- virtual const std::vector<uint16_t> getSerializedATN() const override;
- virtual const antlr4::atn::ATN& getATN() const override;
-
- virtual void action(antlr4::RuleContext* context, size_t ruleIndex,
- size_t actionIndex) override;
-
- private:
- static std::vector<antlr4::dfa::DFA> _decisionToDFA;
- static antlr4::atn::PredictionContextCache _sharedContextCache;
- static std::vector<std::string> _ruleNames;
- static std::vector<std::string> _tokenNames;
- static std::vector<std::string> _channelNames;
- static std::vector<std::string> _modeNames;
-
- static std::vector<std::string> _literalNames;
- static std::vector<std::string> _symbolicNames;
- static antlr4::dfa::Vocabulary _vocabulary;
- static antlr4::atn::ATN _atn;
- static std::vector<uint16_t> _serializedATN;
-
- // Individual action functions triggered by action() above.
- void IDAction(antlr4::RuleContext* context, size_t actionIndex);
-
- // Individual semantic predicate functions triggered by sempred() above.
-
- struct Initializer {
- Initializer();
- };
- static Initializer _init;
-};
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.tokens b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.tokens
deleted file mode 100644
index 5bf699ec93..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexer.tokens
+++ /dev/null
@@ -1,12 +0,0 @@
-TOKEN_REF=1
-RULE_REF=2
-ANYWHERE=3
-ROOT=4
-WILDCARD=5
-BANG=6
-ID=7
-STRING=8
-'//'=3
-'/'=4
-'*'=5
-'!'=6
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.cpp
deleted file mode 100644
index 1bee8150cd..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.cpp
+++ /dev/null
@@ -1,16 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "XPathLexerErrorListener.h"
-
-using namespace antlr4;
-using namespace antlr4::tree::xpath;
-
-void XPathLexerErrorListener::syntaxError(Recognizer* /*recognizer*/,
- Token* /*offendingSymbol*/,
- size_t /*line*/,
- size_t /*charPositionInLine*/,
- const std::string& /*msg*/,
- std::exception_ptr /*e*/) {}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.h
deleted file mode 100644
index 921929dd62..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathLexerErrorListener.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "BaseErrorListener.h"
-
-namespace antlr4 {
-namespace tree {
-namespace xpath {
-
-class ANTLR4CPP_PUBLIC XPathLexerErrorListener : public BaseErrorListener {
- public:
- virtual void syntaxError(Recognizer* recognizer, Token* offendingSymbol,
- size_t line, size_t charPositionInLine,
- const std::string& msg,
- std::exception_ptr e) override;
-};
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.cpp
deleted file mode 100644
index 199ff6e8a0..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.cpp
+++ /dev/null
@@ -1,22 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/ParseTree.h"
-#include "tree/Trees.h"
-
-#include "tree/xpath/XPathRuleAnywhereElement.h"
-
-using namespace antlr4::tree;
-using namespace antlr4::tree::xpath;
-
-XPathRuleAnywhereElement::XPathRuleAnywhereElement(const std::string& ruleName,
- int ruleIndex)
- : XPathElement(ruleName) {
- _ruleIndex = ruleIndex;
-}
-
-std::vector<ParseTree*> XPathRuleAnywhereElement::evaluate(ParseTree* t) {
- return Trees::findAllRuleNodes(t, _ruleIndex);
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.h
deleted file mode 100644
index 3f5bbb40f2..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathRuleAnywhereElement.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "XPathElement.h"
-
-namespace antlr4 {
-namespace tree {
-namespace xpath {
-
-/// Either {@code ID} at start of path or {@code ...//ID} in middle of path.
-class ANTLR4CPP_PUBLIC XPathRuleAnywhereElement : public XPathElement {
- public:
- XPathRuleAnywhereElement(const std::string& ruleName, int ruleIndex);
-
- virtual std::vector<ParseTree*> evaluate(ParseTree* t) override;
-
- protected:
- int _ruleIndex = 0;
-};
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.cpp
deleted file mode 100644
index 498c901708..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/ParseTree.h"
-#include "tree/Trees.h"
-
-#include "XPathRuleElement.h"
-
-using namespace antlr4::tree;
-using namespace antlr4::tree::xpath;
-
-XPathRuleElement::XPathRuleElement(const std::string& ruleName,
- size_t ruleIndex)
- : XPathElement(ruleName) {
- _ruleIndex = ruleIndex;
-}
-
-std::vector<ParseTree*> XPathRuleElement::evaluate(ParseTree* t) {
- // return all children of t that match nodeName
- std::vector<ParseTree*> nodes;
- for (auto c : t->children) {
- if (antlrcpp::is<ParserRuleContext*>(c)) {
- ParserRuleContext* ctx = dynamic_cast<ParserRuleContext*>(c);
- if ((ctx->getRuleIndex() == _ruleIndex && !_invert) ||
- (ctx->getRuleIndex() != _ruleIndex && _invert)) {
- nodes.push_back(ctx);
- }
- }
- }
- return nodes;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.h
deleted file mode 100644
index 94e448e3ad..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathRuleElement.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "XPathElement.h"
-
-namespace antlr4 {
-namespace tree {
-namespace xpath {
-
-class ANTLR4CPP_PUBLIC XPathRuleElement : public XPathElement {
- public:
- XPathRuleElement(const std::string& ruleName, size_t ruleIndex);
-
- virtual std::vector<ParseTree*> evaluate(ParseTree* t) override;
-
- protected:
- size_t _ruleIndex = 0;
-};
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.cpp
deleted file mode 100644
index 9b62f0b532..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.cpp
+++ /dev/null
@@ -1,22 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/ParseTree.h"
-#include "tree/Trees.h"
-
-#include "XPathTokenAnywhereElement.h"
-
-using namespace antlr4::tree;
-using namespace antlr4::tree::xpath;
-
-XPathTokenAnywhereElement::XPathTokenAnywhereElement(
- const std::string& tokenName, int tokenType)
- : XPathElement(tokenName) {
- this->tokenType = tokenType;
-}
-
-std::vector<ParseTree*> XPathTokenAnywhereElement::evaluate(ParseTree* t) {
- return Trees::findAllTokenNodes(t, tokenType);
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.h
deleted file mode 100644
index ec2ecd2c64..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathTokenAnywhereElement.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "XPathElement.h"
-
-namespace antlr4 {
-namespace tree {
-namespace xpath {
-
-class ANTLR4CPP_PUBLIC XPathTokenAnywhereElement : public XPathElement {
- protected:
- int tokenType = 0;
-
- public:
- XPathTokenAnywhereElement(const std::string& tokenName, int tokenType);
-
- virtual std::vector<ParseTree*> evaluate(ParseTree* t) override;
-};
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.cpp
deleted file mode 100644
index e132982db2..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Token.h"
-#include "support/CPPUtils.h"
-#include "tree/ParseTree.h"
-#include "tree/Trees.h"
-
-#include "XPathTokenElement.h"
-
-using namespace antlr4;
-using namespace antlr4::tree;
-using namespace antlr4::tree::xpath;
-
-XPathTokenElement::XPathTokenElement(const std::string& tokenName,
- size_t tokenType)
- : XPathElement(tokenName) {
- _tokenType = tokenType;
-}
-
-std::vector<ParseTree*> XPathTokenElement::evaluate(ParseTree* t) {
- // return all children of t that match nodeName
- std::vector<ParseTree*> nodes;
- for (auto c : t->children) {
- if (antlrcpp::is<TerminalNode*>(c)) {
- TerminalNode* tnode = dynamic_cast<TerminalNode*>(c);
- if ((tnode->getSymbol()->getType() == _tokenType && !_invert) ||
- (tnode->getSymbol()->getType() != _tokenType && _invert)) {
- nodes.push_back(tnode);
- }
- }
- }
- return nodes;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.h
deleted file mode 100644
index 6ae0665c58..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathTokenElement.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "XPathElement.h"
-
-namespace antlr4 {
-namespace tree {
-namespace xpath {
-
-class ANTLR4CPP_PUBLIC XPathTokenElement : public XPathElement {
- public:
- XPathTokenElement(const std::string& tokenName, size_t tokenType);
-
- virtual std::vector<ParseTree*> evaluate(ParseTree* t) override;
-
- protected:
- size_t _tokenType = 0;
-};
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.cpp
deleted file mode 100644
index 34adb17113..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "XPath.h"
-#include "tree/ParseTree.h"
-#include "tree/Trees.h"
-
-#include "XPathWildcardAnywhereElement.h"
-
-using namespace antlr4::tree;
-using namespace antlr4::tree::xpath;
-
-XPathWildcardAnywhereElement::XPathWildcardAnywhereElement()
- : XPathElement(XPath::WILDCARD) {}
-
-std::vector<ParseTree*> XPathWildcardAnywhereElement::evaluate(ParseTree* t) {
- if (_invert) {
- return {}; // !* is weird but valid (empty)
- }
- return Trees::getDescendants(t);
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.h
deleted file mode 100644
index 6217abd886..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardAnywhereElement.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "XPathElement.h"
-
-namespace antlr4 {
-namespace tree {
-namespace xpath {
-
-class ANTLR4CPP_PUBLIC XPathWildcardAnywhereElement : public XPathElement {
- public:
- XPathWildcardAnywhereElement();
-
- virtual std::vector<ParseTree*> evaluate(ParseTree* t) override;
-};
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.cpp b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.cpp
deleted file mode 100644
index e288a1032a..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "XPath.h"
-#include "tree/ParseTree.h"
-#include "tree/Trees.h"
-
-#include "XPathWildcardElement.h"
-
-using namespace antlr4::tree;
-using namespace antlr4::tree::xpath;
-
-XPathWildcardElement::XPathWildcardElement() : XPathElement(XPath::WILDCARD) {}
-
-std::vector<ParseTree*> XPathWildcardElement::evaluate(ParseTree* t) {
- if (_invert) {
- return {}; // !* is weird but valid (empty)
- }
-
- return t->children;
-}
diff --git a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.h b/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.h
deleted file mode 100644
index 05f1af21bf..0000000000
--- a/deps/v8/third_party/antlr4/runtime/Cpp/runtime/src/tree/xpath/XPathWildcardElement.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "XPathElement.h"
-
-namespace antlr4 {
-namespace tree {
-namespace xpath {
-
-class ANTLR4CPP_PUBLIC XPathWildcardElement : public XPathElement {
- public:
- XPathWildcardElement();
-
- virtual std::vector<ParseTree*> evaluate(ParseTree* t) override;
-};
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h b/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
index 89314f4058..71377f87d4 100644
--- a/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
+++ b/deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
@@ -30,6 +30,7 @@
// Author: wan@google.com (Zhanyong Wan)
//
// Google C++ Testing and Mocking Framework definitions useful in production code.
+// GOOGLETEST_CM0003 DO NOT DELETE
#ifndef GTEST_INCLUDE_GTEST_GTEST_PROD_H_
#define GTEST_INCLUDE_GTEST_GTEST_PROD_H_
diff --git a/deps/v8/third_party/inspector_protocol/README.v8 b/deps/v8/third_party/inspector_protocol/README.v8
index 1ae51d18a2..79fc685c88 100644
--- a/deps/v8/third_party/inspector_protocol/README.v8
+++ b/deps/v8/third_party/inspector_protocol/README.v8
@@ -2,7 +2,7 @@ Name: inspector protocol
Short Name: inspector_protocol
URL: https://chromium.googlesource.com/deps/inspector_protocol/
Version: 0
-Revision: 0d4255502019144a5dec5669d7992165ae8924e7
+Revision: 5768a449acc0407bf55aef535b18d710df2a14f2
License: BSD
License File: LICENSE
Security Critical: no
diff --git a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
index 5403035cc4..9ad3eba91a 100644
--- a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
@@ -68,10 +68,11 @@ DispatcherBase::WeakPtr::~WeakPtr()
m_dispatcher->m_weakPtrs.erase(this);
}
-DispatcherBase::Callback::Callback(std::unique_ptr<DispatcherBase::WeakPtr> backendImpl, int callId, int callbackId)
+DispatcherBase::Callback::Callback(std::unique_ptr<DispatcherBase::WeakPtr> backendImpl, int callId, const String& method, const String& message)
: m_backendImpl(std::move(backendImpl))
, m_callId(callId)
- , m_callbackId(callbackId) { }
+ , m_method(method)
+ , m_message(message) { }
DispatcherBase::Callback::~Callback() = default;
@@ -92,32 +93,18 @@ void DispatcherBase::Callback::fallThroughIfActive()
{
if (!m_backendImpl || !m_backendImpl->get())
return;
- m_backendImpl->get()->markFallThrough(m_callbackId);
+ m_backendImpl->get()->channel()->fallThrough(m_callId, m_method, m_message);
m_backendImpl = nullptr;
}
DispatcherBase::DispatcherBase(FrontendChannel* frontendChannel)
- : m_frontendChannel(frontendChannel)
- , m_lastCallbackId(0)
- , m_lastCallbackFallThrough(false) { }
+ : m_frontendChannel(frontendChannel) { }
DispatcherBase::~DispatcherBase()
{
clearFrontend();
}
-int DispatcherBase::nextCallbackId()
-{
- m_lastCallbackFallThrough = false;
- return ++m_lastCallbackId;
-}
-
-void DispatcherBase::markFallThrough(int callbackId)
-{
- DCHECK(callbackId == m_lastCallbackId);
- m_lastCallbackFallThrough = true;
-}
-
void DispatcherBase::sendResponse(int callId, const DispatchResponse& response, std::unique_ptr<protocol::DictionaryValue> result)
{
if (!m_frontendChannel)
@@ -218,13 +205,7 @@ std::unique_ptr<DispatcherBase::WeakPtr> DispatcherBase::weakPtr()
}
UberDispatcher::UberDispatcher(FrontendChannel* frontendChannel)
- : m_frontendChannel(frontendChannel)
- , m_fallThroughForNotFound(false) { }
-
-void UberDispatcher::setFallThroughForNotFound(bool fallThroughForNotFound)
-{
- m_fallThroughForNotFound = fallThroughForNotFound;
-}
+ : m_frontendChannel(frontendChannel) { }
void UberDispatcher::registerBackend(const String& name, std::unique_ptr<protocol::DispatcherBase> dispatcher)
{
@@ -237,81 +218,70 @@ void UberDispatcher::setupRedirects(const std::unordered_map<String, String>& re
m_redirects[pair.first] = pair.second;
}
-DispatchResponse::Status UberDispatcher::dispatch(std::unique_ptr<Value> parsedMessage, int* outCallId, String* outMethod)
-{
+bool UberDispatcher::parseCommand(Value* parsedMessage, int* outCallId, String* outMethod) {
if (!parsedMessage) {
reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kParseError, "Message must be a valid JSON");
- return DispatchResponse::kError;
+ return false;
}
- std::unique_ptr<protocol::DictionaryValue> messageObject = DictionaryValue::cast(std::move(parsedMessage));
+ protocol::DictionaryValue* messageObject = DictionaryValue::cast(parsedMessage);
if (!messageObject) {
reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kInvalidRequest, "Message must be an object");
- return DispatchResponse::kError;
+ return false;
}
int callId = 0;
protocol::Value* callIdValue = messageObject->get("id");
bool success = callIdValue && callIdValue->asInteger(&callId);
- if (outCallId)
- *outCallId = callId;
if (!success) {
reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kInvalidRequest, "Message must have integer 'id' property");
- return DispatchResponse::kError;
+ return false;
}
+ if (outCallId)
+ *outCallId = callId;
protocol::Value* methodValue = messageObject->get("method");
String method;
success = methodValue && methodValue->asString(&method);
- if (outMethod)
- *outMethod = method;
if (!success) {
reportProtocolErrorTo(m_frontendChannel, callId, DispatchResponse::kInvalidRequest, "Message must have string 'method' property", nullptr);
- return DispatchResponse::kError;
+ return false;
}
std::unordered_map<String, String>::iterator redirectIt = m_redirects.find(method);
if (redirectIt != m_redirects.end())
method = redirectIt->second;
+ if (outMethod)
+ *outMethod = method;
+ return true;
+}
+protocol::DispatcherBase* UberDispatcher::findDispatcher(const String& method) {
size_t dotIndex = StringUtil::find(method, ".");
- if (dotIndex == StringUtil::kNotFound) {
- if (m_fallThroughForNotFound)
- return DispatchResponse::kFallThrough;
- reportProtocolErrorTo(m_frontendChannel, callId, DispatchResponse::kMethodNotFound, "'" + method + "' wasn't found", nullptr);
- return DispatchResponse::kError;
- }
+ if (dotIndex == StringUtil::kNotFound)
+ return nullptr;
String domain = StringUtil::substring(method, 0, dotIndex);
auto it = m_dispatchers.find(domain);
- if (it == m_dispatchers.end()) {
- if (m_fallThroughForNotFound)
- return DispatchResponse::kFallThrough;
- reportProtocolErrorTo(m_frontendChannel, callId, DispatchResponse::kMethodNotFound, "'" + method + "' wasn't found", nullptr);
- return DispatchResponse::kError;
- }
- return it->second->dispatch(callId, method, std::move(messageObject));
+ if (it == m_dispatchers.end())
+ return nullptr;
+ if (!it->second->canDispatch(method))
+ return nullptr;
+ return it->second.get();
}
-bool UberDispatcher::getCommandName(const String& message, String* method, std::unique_ptr<protocol::DictionaryValue>* parsedMessage)
+bool UberDispatcher::canDispatch(const String& method)
{
- std::unique_ptr<protocol::Value> value = StringUtil::parseJSON(message);
- if (!value) {
- reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kParseError, "Message must be a valid JSON");
- return false;
- }
-
- protocol::DictionaryValue* object = DictionaryValue::cast(value.get());
- if (!object) {
- reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kInvalidRequest, "Message must be an object");
- return false;
- }
+ return !!findDispatcher(method);
+}
- if (!object->getString("method", method)) {
- reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kInvalidRequest, "Message must have string 'method' property");
- return false;
+void UberDispatcher::dispatch(int callId, const String& method, std::unique_ptr<Value> parsedMessage, const String& rawMessage)
+{
+ protocol::DispatcherBase* dispatcher = findDispatcher(method);
+ if (!dispatcher) {
+ reportProtocolErrorTo(m_frontendChannel, callId, DispatchResponse::kMethodNotFound, "'" + method + "' wasn't found", nullptr);
+ return;
}
-
- parsedMessage->reset(DictionaryValue::cast(value.release()));
- return true;
+ std::unique_ptr<protocol::DictionaryValue> messageObject = DictionaryValue::cast(std::move(parsedMessage));
+ dispatcher->dispatch(callId, method, rawMessage, std::move(messageObject));
}
UberDispatcher::~UberDispatcher() = default;
diff --git a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
index 6c618cf0f5..4708e032d8 100644
--- a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
@@ -21,7 +21,6 @@ public:
kSuccess = 0,
kError = 1,
kFallThrough = 2,
- kAsync = 3
};
enum ErrorCode {
@@ -67,7 +66,7 @@ public:
class {{config.lib.export_macro}} Callback {
public:
- Callback(std::unique_ptr<WeakPtr> backendImpl, int callId, int callbackId);
+ Callback(std::unique_ptr<WeakPtr> backendImpl, int callId, const String& method, const String& message);
virtual ~Callback();
void dispose();
@@ -78,13 +77,16 @@ public:
private:
std::unique_ptr<WeakPtr> m_backendImpl;
int m_callId;
- int m_callbackId;
+ String m_method;
+ String m_message;
};
explicit DispatcherBase(FrontendChannel*);
virtual ~DispatcherBase();
- virtual DispatchResponse::Status dispatch(int callId, const String& method, std::unique_ptr<protocol::DictionaryValue> messageObject) = 0;
+ virtual bool canDispatch(const String& method) = 0;
+ virtual void dispatch(int callId, const String& method, const String& rawMessage, std::unique_ptr<protocol::DictionaryValue> messageObject) = 0;
+ FrontendChannel* channel() { return m_frontendChannel; }
void sendResponse(int callId, const DispatchResponse&, std::unique_ptr<protocol::DictionaryValue> result);
void sendResponse(int callId, const DispatchResponse&);
@@ -94,15 +96,9 @@ public:
std::unique_ptr<WeakPtr> weakPtr();
- int nextCallbackId();
- void markFallThrough(int callbackId);
- bool lastCallbackFallThrough() { return m_lastCallbackFallThrough; }
-
private:
FrontendChannel* m_frontendChannel;
std::unordered_set<WeakPtr*> m_weakPtrs;
- int m_lastCallbackId;
- bool m_lastCallbackFallThrough;
};
class {{config.lib.export_macro}} UberDispatcher {
@@ -111,16 +107,15 @@ public:
explicit UberDispatcher(FrontendChannel*);
void registerBackend(const String& name, std::unique_ptr<protocol::DispatcherBase>);
void setupRedirects(const std::unordered_map<String, String>&);
- DispatchResponse::Status dispatch(std::unique_ptr<Value> message, int* callId = nullptr, String* method = nullptr);
+ bool parseCommand(Value* message, int* callId, String* method);
+ bool canDispatch(const String& method);
+ void dispatch(int callId, const String& method, std::unique_ptr<Value> message, const String& rawMessage);
FrontendChannel* channel() { return m_frontendChannel; }
- bool fallThroughForNotFound() { return m_fallThroughForNotFound; }
- void setFallThroughForNotFound(bool);
- bool getCommandName(const String& message, String* method, std::unique_ptr<protocol::DictionaryValue>* parsedMessage);
virtual ~UberDispatcher();
private:
+ protocol::DispatcherBase* findDispatcher(const String& method);
FrontendChannel* m_frontendChannel;
- bool m_fallThroughForNotFound;
std::unordered_map<String, String> m_redirects;
std::unordered_map<String, std::unique_ptr<protocol::DispatcherBase>> m_dispatchers;
};
diff --git a/deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template b/deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template
index 0454978b0c..4fba5be314 100644
--- a/deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template
@@ -20,6 +20,7 @@ public:
virtual ~FrontendChannel() { }
virtual void sendProtocolResponse(int callId, std::unique_ptr<Serializable> message) = 0;
virtual void sendProtocolNotification(std::unique_ptr<Serializable> message) = 0;
+ virtual void fallThrough(int callId, const String& method, const String& message) = 0;
virtual void flushProtocolNotifications() = 0;
};
diff --git a/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template b/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template
index 71593acd0e..5af6960a7b 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template
@@ -5,6 +5,41 @@
#ifndef {{"_".join(config.protocol.namespace)}}_Maybe_h
#define {{"_".join(config.protocol.namespace)}}_Maybe_h
+// This macro allows to test for the version of the GNU C++ compiler.
+// Note that this also applies to compilers that masquerade as GCC,
+// for example clang and the Intel C++ compiler for Linux.
+// Use like:
+// #if IP_GNUC_PREREQ(4, 3, 1)
+// ...
+// #endif
+#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
+#define IP_GNUC_PREREQ(major, minor, patchlevel) \
+ ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= \
+ ((major)*10000 + (minor)*100 + (patchlevel)))
+#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
+#define IP_GNUC_PREREQ(major, minor, patchlevel) \
+ ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= \
+ ((major)*10000 + (minor)*100 + (patchlevel)))
+#else
+#define IP_GNUC_PREREQ(major, minor, patchlevel) 0
+#endif
+
+#if defined(__mips64)
+#define IP_TARGET_ARCH_MIPS64 1
+#elif defined(__MIPSEB__) || defined(__MIPSEL__)
+#define IP_TARGET_ARCH_MIPS 1
+#endif
+
+// Allowing the use of noexcept by removing the keyword on older compilers that
+// do not support adding noexcept to default members.
+#if ((IP_GNUC_PREREQ(4, 9, 0) && !defined(IP_TARGET_ARCH_MIPS) && \
+ !defined(IP_TARGET_ARCH_MIPS64)) || \
+ (defined(__clang__) && __cplusplus > 201300L))
+#define IP_NOEXCEPT noexcept
+#else
+#define IP_NOEXCEPT
+#endif
+
//#include "Forward.h"
{% for namespace in config.protocol.namespace %}
@@ -16,7 +51,7 @@ class Maybe {
public:
Maybe() : m_value() { }
Maybe(std::unique_ptr<T> value) : m_value(std::move(value)) { }
- Maybe(Maybe&& other) : m_value(std::move(other.m_value)) { }
+ Maybe(Maybe&& other) IP_NOEXCEPT : m_value(std::move(other.m_value)) {}
void operator=(std::unique_ptr<T> value) { m_value = std::move(value); }
T* fromJust() const { DCHECK(m_value); return m_value.get(); }
T* fromMaybe(T* defaultValue) const { return m_value ? m_value.get() : defaultValue; }
@@ -31,7 +66,9 @@ class MaybeBase {
public:
MaybeBase() : m_isJust(false) { }
MaybeBase(T value) : m_isJust(true), m_value(value) { }
- MaybeBase(MaybeBase&& other) : m_isJust(other.m_isJust), m_value(std::move(other.m_value)) { }
+ MaybeBase(MaybeBase&& other) IP_NOEXCEPT
+ : m_isJust(other.m_isJust),
+ m_value(std::move(other.m_value)) {}
void operator=(T value) { m_value = value; m_isJust = true; }
T fromJust() const { DCHECK(m_isJust); return m_value; }
T fromMaybe(const T& defaultValue) const { return m_isJust ? m_value : defaultValue; }
@@ -48,7 +85,7 @@ class Maybe<bool> : public MaybeBase<bool> {
public:
Maybe() { }
Maybe(bool value) : MaybeBase(value) { }
- Maybe(Maybe&& other) : MaybeBase(std::move(other)) { }
+ Maybe(Maybe&& other) IP_NOEXCEPT : MaybeBase(std::move(other)) {}
using MaybeBase::operator=;
};
@@ -57,7 +94,7 @@ class Maybe<int> : public MaybeBase<int> {
public:
Maybe() { }
Maybe(int value) : MaybeBase(value) { }
- Maybe(Maybe&& other) : MaybeBase(std::move(other)) { }
+ Maybe(Maybe&& other) IP_NOEXCEPT : MaybeBase(std::move(other)) {}
using MaybeBase::operator=;
};
@@ -66,7 +103,7 @@ class Maybe<double> : public MaybeBase<double> {
public:
Maybe() { }
Maybe(double value) : MaybeBase(value) { }
- Maybe(Maybe&& other) : MaybeBase(std::move(other)) { }
+ Maybe(Maybe&& other) IP_NOEXCEPT : MaybeBase(std::move(other)) {}
using MaybeBase::operator=;
};
@@ -75,7 +112,7 @@ class Maybe<String> : public MaybeBase<String> {
public:
Maybe() { }
Maybe(const String& value) : MaybeBase(value) { }
- Maybe(Maybe&& other) : MaybeBase(std::move(other)) { }
+ Maybe(Maybe&& other) IP_NOEXCEPT : MaybeBase(std::move(other)) {}
using MaybeBase::operator=;
};
@@ -83,4 +120,9 @@ public:
} // namespace {{namespace}}
{% endfor %}
+#undef IP_GNUC_PREREQ
+#undef IP_TARGET_ARCH_MIPS64
+#undef IP_TARGET_ARCH_MIPS
+#undef IP_NOEXCEPT
+
#endif // !defined({{"_".join(config.protocol.namespace)}}_Maybe_h)
diff --git a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
index aa448d2a19..f99ce5f0d9 100644
--- a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
@@ -196,10 +196,9 @@ void Frontend::sendRawNotification(const String& notification)
class DispatcherImpl : public protocol::DispatcherBase {
public:
- DispatcherImpl(FrontendChannel* frontendChannel, Backend* backend, bool fallThroughForNotFound)
+ DispatcherImpl(FrontendChannel* frontendChannel, Backend* backend)
: DispatcherBase(frontendChannel)
- , m_backend(backend)
- , m_fallThroughForNotFound(fallThroughForNotFound) {
+ , m_backend(backend) {
{% for command in domain.commands %}
{% if "redirect" in command %}
m_redirects["{{domain.domain}}.{{command.name}}"] = "{{command.redirect}}.{{command.name}}";
@@ -210,11 +209,12 @@ public:
{% endfor %}
}
~DispatcherImpl() override { }
- DispatchResponse::Status dispatch(int callId, const String& method, std::unique_ptr<protocol::DictionaryValue> messageObject) override;
+ bool canDispatch(const String& method) override;
+ void dispatch(int callId, const String& method, const String& message, std::unique_ptr<protocol::DictionaryValue> messageObject) override;
std::unordered_map<String, String>& redirects() { return m_redirects; }
protected:
- using CallHandler = DispatchResponse::Status (DispatcherImpl::*)(int callId, std::unique_ptr<DictionaryValue> messageObject, ErrorSupport* errors);
+ using CallHandler = void (DispatcherImpl::*)(int callId, const String& method, const String& message, std::unique_ptr<DictionaryValue> messageObject, ErrorSupport* errors);
using DispatchMap = std::unordered_map<String, CallHandler>;
DispatchMap m_dispatchMap;
std::unordered_map<String, String> m_redirects;
@@ -222,25 +222,22 @@ protected:
{% for command in domain.commands %}
{% if "redirect" in command %}{% continue %}{% endif %}
{% if not protocol.generate_command(domain.domain, command.name) %}{% continue %}{% endif %}
- DispatchResponse::Status {{command.name}}(int callId, std::unique_ptr<DictionaryValue> requestMessageObject, ErrorSupport*);
+ void {{command.name}}(int callId, const String& method, const String& message, std::unique_ptr<DictionaryValue> requestMessageObject, ErrorSupport*);
{% endfor %}
Backend* m_backend;
- bool m_fallThroughForNotFound;
};
-DispatchResponse::Status DispatcherImpl::dispatch(int callId, const String& method, std::unique_ptr<protocol::DictionaryValue> messageObject)
+bool DispatcherImpl::canDispatch(const String& method) {
+ return m_dispatchMap.find(method) != m_dispatchMap.end();
+}
+
+void DispatcherImpl::dispatch(int callId, const String& method, const String& message, std::unique_ptr<protocol::DictionaryValue> messageObject)
{
std::unordered_map<String, CallHandler>::iterator it = m_dispatchMap.find(method);
- if (it == m_dispatchMap.end()) {
- if (m_fallThroughForNotFound)
- return DispatchResponse::kFallThrough;
- reportProtocolError(callId, DispatchResponse::kMethodNotFound, "'" + method + "' wasn't found", nullptr);
- return DispatchResponse::kError;
- }
-
+ DCHECK(it != m_dispatchMap.end());
protocol::ErrorSupport errors;
- return (this->*(it->second))(callId, std::move(messageObject), &errors);
+ (this->*(it->second))(callId, method, message, std::move(messageObject), &errors);
}
{% for command in domain.commands %}
@@ -251,8 +248,8 @@ DispatchResponse::Status DispatcherImpl::dispatch(int callId, const String& meth
class {{command_name_title}}CallbackImpl : public Backend::{{command_name_title}}Callback, public DispatcherBase::Callback {
public:
- {{command_name_title}}CallbackImpl(std::unique_ptr<DispatcherBase::WeakPtr> backendImpl, int callId, int callbackId)
- : DispatcherBase::Callback(std::move(backendImpl), callId, callbackId) { }
+ {{command_name_title}}CallbackImpl(std::unique_ptr<DispatcherBase::WeakPtr> backendImpl, int callId, const String& method, const String& message)
+ : DispatcherBase::Callback(std::move(backendImpl), callId, method, message) { }
void sendSuccess(
{%- for parameter in command.returns -%}
@@ -289,7 +286,7 @@ public:
};
{% endif %}
-DispatchResponse::Status DispatcherImpl::{{command.name}}(int callId, std::unique_ptr<DictionaryValue> requestMessageObject, ErrorSupport* errors)
+void DispatcherImpl::{{command.name}}(int callId, const String& method, const String& message, std::unique_ptr<DictionaryValue> requestMessageObject, ErrorSupport* errors)
{
{% if "parameters" in command %}
// Prepare input parameters.
@@ -312,7 +309,7 @@ DispatchResponse::Status DispatcherImpl::{{command.name}}(int callId, std::uniqu
errors->pop();
if (errors->hasErrors()) {
reportProtocolError(callId, DispatchResponse::kInvalidParams, kInvalidParamsString, errors);
- return DispatchResponse::kError;
+ return;
}
{% endif %}
{% if "returns" in command and not protocol.is_async_command(domain.domain, command.name) %}
@@ -343,8 +340,10 @@ DispatchResponse::Status DispatcherImpl::{{command.name}}(int callId, std::uniqu
&out_{{parameter.name}}
{%- endfor %}
{% endif %});
- if (response.status() == DispatchResponse::kFallThrough)
- return response.status();
+ if (response.status() == DispatchResponse::kFallThrough) {
+ channel()->fallThrough(callId, method, message);
+ return;
+ }
{% if "returns" in command %}
std::unique_ptr<protocol::DictionaryValue> result = DictionaryValue::create();
if (response.status() == DispatchResponse::kSuccess) {
@@ -363,10 +362,10 @@ DispatchResponse::Status DispatcherImpl::{{command.name}}(int callId, std::uniqu
if (weak->get())
weak->get()->sendResponse(callId, response);
{% endif %}
- return response.status();
+ return;
{% else %}
std::unique_ptr<DispatcherBase::WeakPtr> weak = weakPtr();
- std::unique_ptr<{{command_name_title}}CallbackImpl> callback(new {{command.name | to_title_case}}CallbackImpl(weakPtr(), callId, nextCallbackId()));
+ std::unique_ptr<{{command_name_title}}CallbackImpl> callback(new {{command.name | to_title_case}}CallbackImpl(weakPtr(), callId, method, message));
m_backend->{{command.name | to_method_case}}(
{%- for property in command.parameters -%}
{%- if not loop.first -%}, {% endif -%}
@@ -378,7 +377,7 @@ DispatchResponse::Status DispatcherImpl::{{command.name}}(int callId, std::uniqu
{%- endfor -%}
{%- if command.parameters -%}, {% endif -%}
std::move(callback));
- return (weak->get() && weak->get()->lastCallbackFallThrough()) ? DispatchResponse::kFallThrough : DispatchResponse::kAsync;
+ return;
{% endif %}
}
{% endfor %}
@@ -386,7 +385,7 @@ DispatchResponse::Status DispatcherImpl::{{command.name}}(int callId, std::uniqu
// static
void Dispatcher::wire(UberDispatcher* uber, Backend* backend)
{
- std::unique_ptr<DispatcherImpl> dispatcher(new DispatcherImpl(uber->channel(), backend, uber->fallThroughForNotFound()));
+ std::unique_ptr<DispatcherImpl> dispatcher(new DispatcherImpl(uber->channel(), backend));
uber->setupRedirects(dispatcher->redirects());
uber->registerBackend("{{domain.domain}}", std::move(dispatcher));
}
diff --git a/deps/v8/third_party/v8/builtins/LICENSE b/deps/v8/third_party/v8/builtins/LICENSE
new file mode 100644
index 0000000000..1afbedba92
--- /dev/null
+++ b/deps/v8/third_party/v8/builtins/LICENSE
@@ -0,0 +1,254 @@
+A. HISTORY OF THE SOFTWARE
+==========================
+
+Python was created in the early 1990s by Guido van Rossum at Stichting
+Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
+as a successor of a language called ABC. Guido remains Python's
+principal author, although it includes many contributions from others.
+
+In 1995, Guido continued his work on Python at the Corporation for
+National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
+in Reston, Virginia where he released several versions of the
+software.
+
+In May 2000, Guido and the Python core development team moved to
+BeOpen.com to form the BeOpen PythonLabs team. In October of the same
+year, the PythonLabs team moved to Digital Creations, which became
+Zope Corporation. In 2001, the Python Software Foundation (PSF, see
+https://www.python.org/psf/) was formed, a non-profit organization
+created specifically to own Python-related Intellectual Property.
+Zope Corporation was a sponsoring member of the PSF.
+
+All Python releases are Open Source (see http://www.opensource.org for
+the Open Source Definition). Historically, most, but not all, Python
+releases have also been GPL-compatible; the table below summarizes
+the various releases.
+
+ Release Derived Year Owner GPL-
+ from compatible? (1)
+
+ 0.9.0 thru 1.2 1991-1995 CWI yes
+ 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
+ 1.6 1.5.2 2000 CNRI no
+ 2.0 1.6 2000 BeOpen.com no
+ 1.6.1 1.6 2001 CNRI yes (2)
+ 2.1 2.0+1.6.1 2001 PSF no
+ 2.0.1 2.0+1.6.1 2001 PSF yes
+ 2.1.1 2.1+2.0.1 2001 PSF yes
+ 2.1.2 2.1.1 2002 PSF yes
+ 2.1.3 2.1.2 2002 PSF yes
+ 2.2 and above 2.1.1 2001-now PSF yes
+
+Footnotes:
+
+(1) GPL-compatible doesn't mean that we're distributing Python under
+ the GPL. All Python licenses, unlike the GPL, let you distribute
+ a modified version without making your changes open source. The
+ GPL-compatible licenses make it possible to combine Python with
+ other software that is released under the GPL; the others don't.
+
+(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
+ because its license has a choice of law clause. According to
+ CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
+ is "not incompatible" with the GPL.
+
+Thanks to the many outside volunteers who have worked under Guido's
+direction to make these releases possible.
+
+
+B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
+===============================================================
+
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+--------------------------------------------
+
+1. This LICENSE AGREEMENT is between the Python Software Foundation
+("PSF"), and the Individual or Organization ("Licensee") accessing and
+otherwise using this software ("Python") in source or binary form and
+its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, PSF hereby
+grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+analyze, test, perform and/or display publicly, prepare derivative works,
+distribute, and otherwise use Python alone or in any derivative version,
+provided, however, that PSF's License Agreement and PSF's notice of copyright,
+i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 Python Software Foundation; All
+Rights Reserved" are retained in Python alone or in any derivative version
+prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python.
+
+4. PSF is making Python available to Licensee on an "AS IS"
+basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between PSF and
+Licensee. This License Agreement does not grant permission to use PSF
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using Python, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
+-------------------------------------------
+
+BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
+
+1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
+office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
+Individual or Organization ("Licensee") accessing and otherwise using
+this software in source or binary form and its associated
+documentation ("the Software").
+
+2. Subject to the terms and conditions of this BeOpen Python License
+Agreement, BeOpen hereby grants Licensee a non-exclusive,
+royalty-free, world-wide license to reproduce, analyze, test, perform
+and/or display publicly, prepare derivative works, distribute, and
+otherwise use the Software alone or in any derivative version,
+provided, however, that the BeOpen Python License is retained in the
+Software, alone or in any derivative version prepared by Licensee.
+
+3. BeOpen is making the Software available to Licensee on an "AS IS"
+basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
+SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
+AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
+DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+5. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+6. This License Agreement shall be governed by and interpreted in all
+respects by the law of the State of California, excluding conflict of
+law provisions. Nothing in this License Agreement shall be deemed to
+create any relationship of agency, partnership, or joint venture
+between BeOpen and Licensee. This License Agreement does not grant
+permission to use BeOpen trademarks or trade names in a trademark
+sense to endorse or promote products or services of Licensee, or any
+third party. As an exception, the "BeOpen Python" logos available at
+http://www.pythonlabs.com/logos.html may be used according to the
+permissions granted on that web page.
+
+7. By copying, installing or otherwise using the software, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
+---------------------------------------
+
+1. This LICENSE AGREEMENT is between the Corporation for National
+Research Initiatives, having an office at 1895 Preston White Drive,
+Reston, VA 20191 ("CNRI"), and the Individual or Organization
+("Licensee") accessing and otherwise using Python 1.6.1 software in
+source or binary form and its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, CNRI
+hereby grants Licensee a nonexclusive, royalty-free, world-wide
+license to reproduce, analyze, test, perform and/or display publicly,
+prepare derivative works, distribute, and otherwise use Python 1.6.1
+alone or in any derivative version, provided, however, that CNRI's
+License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
+1995-2001 Corporation for National Research Initiatives; All Rights
+Reserved" are retained in Python 1.6.1 alone or in any derivative
+version prepared by Licensee. Alternately, in lieu of CNRI's License
+Agreement, Licensee may substitute the following text (omitting the
+quotes): "Python 1.6.1 is made available subject to the terms and
+conditions in CNRI's License Agreement. This Agreement together with
+Python 1.6.1 may be located on the Internet using the following
+unique, persistent identifier (known as a handle): 1895.22/1013. This
+Agreement may also be obtained from a proxy server on the Internet
+using the following URL: http://hdl.handle.net/1895.22/1013".
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python 1.6.1 or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python 1.6.1.
+
+4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
+basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. This License Agreement shall be governed by the federal
+intellectual property law of the United States, including without
+limitation the federal copyright law, and, to the extent such
+U.S. federal law does not apply, by the law of the Commonwealth of
+Virginia, excluding Virginia's conflict of law provisions.
+Notwithstanding the foregoing, with regard to derivative works based
+on Python 1.6.1 that incorporate non-separable material that was
+previously distributed under the GNU General Public License (GPL), the
+law of the Commonwealth of Virginia shall govern this License
+Agreement only as to issues arising under or with respect to
+Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
+License Agreement shall be deemed to create any relationship of
+agency, partnership, or joint venture between CNRI and Licensee. This
+License Agreement does not grant permission to use CNRI trademarks or
+trade name in a trademark sense to endorse or promote products or
+services of Licensee, or any third party.
+
+8. By clicking on the "ACCEPT" button where indicated, or by copying,
+installing or otherwise using Python 1.6.1, Licensee agrees to be
+bound by the terms and conditions of this License Agreement.
+
+ ACCEPT
+
+
+CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
+--------------------------------------------------
+
+Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
+The Netherlands. All rights reserved.
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose and without fee is hereby granted,
+provided that the above copyright notice appear in all copies and that
+both that copyright notice and this permission notice appear in
+supporting documentation, and that the name of Stichting Mathematisch
+Centrum or CWI not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
+THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
+FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/deps/v8/third_party/v8/builtins/array-sort.tq b/deps/v8/third_party/v8/builtins/array-sort.tq
new file mode 100644
index 0000000000..a94b432935
--- /dev/null
+++ b/deps/v8/third_party/v8/builtins/array-sort.tq
@@ -0,0 +1,1808 @@
+// Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+// 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 Python Software Foundation;
+// All Rights Reserved
+
+// This file implements a stable, adapative merge sort variant called TimSort.
+//
+// It was first implemented in python and this Torque implementation
+// is based on the current version:
+//
+// https://github.com/python/cpython/blob/master/Objects/listobject.c
+//
+// Detailed analysis and a description of the algorithm can be found at:
+//
+// https://github.com/python/cpython/blob/master/Objects/listsort.txt
+
+module array {
+ // All accessors bail to the GenericElementsAccessor if assumptions checked
+ // by "CanUseSameAccessor<>" are violated:
+ // Generic <- FastPackedSmi
+ // <- FastSmiOrObject
+ // <- FastDouble
+ // <- Dictionary
+ //
+ // The only exception is TempArrayElements, since it does not describe the
+ // "elements" of the receiver, but instead is used as an "adaptor" so
+ // GallopLeft/GallopRight can be reused with the temporary array.
+ const kGenericElementsAccessorId: Smi = 0;
+ const kFastElementsAccessorId: Smi = 1;
+
+ // This is a special type, used to access the temporary array which is always
+ // PACKED_ELEMENTS. As a result, we do not need a sanity check for it,
+ // otherwise we might wrongly bail to the slow path.
+ type TempArrayElements;
+
+ // The following index constants describe the layout of the sortState.
+ // The sortState is currently implemented as a FixedArray of
+ // size kSortStateSize.
+
+ // The receiver of the Array.p.sort call.
+ const kReceiverIdx: constexpr int31 = 0;
+
+ // The initial map and length of the receiver. After calling into JS, these
+ // are reloaded and checked. If they changed we bail to the baseline
+ // GenericElementsAccessor.
+ const kInitialReceiverMapIdx: constexpr int31 = 1;
+ const kInitialReceiverLengthIdx: constexpr int31 = 2;
+
+ // If the user provided a comparison function, it is stored here.
+ const kUserCmpFnIdx: constexpr int31 = 3;
+
+ // Function pointer to the comparison function. This can either be a builtin
+ // that calls the user-provided comparison function or "SortDefault", which
+ // uses ToString and a lexicographical compare.
+ const kSortComparePtrIdx: constexpr int31 = 4;
+
+ // The following three function pointer represent a Accessor/Path.
+ // These are used to Load/Store elements and to check whether to bail to the
+ // baseline GenericElementsAccessor.
+ const kLoadFnIdx: constexpr int31 = 5;
+ const kStoreFnIdx: constexpr int31 = 6;
+ const kCanUseSameAccessorFnIdx: constexpr int31 = 7;
+
+ // If this field has the value kFailure, we need to bail to the baseline
+ // GenericElementsAccessor.
+ const kBailoutStatusIdx: constexpr int31 = 8;
+
+ // This controls when we get *into* galloping mode. It's initialized to
+ // kMinGallop. mergeLow and mergeHigh tend to nudge it higher for random data,
+ // and lower for highly structured data.
+ const kMinGallopIdx: constexpr int31 = 9;
+
+ // A stack of sortState[kPendingRunsSizeIdx] pending runs yet to be merged.
+ // Run #i starts at sortState[kPendingRunsIdx][2 * i] and extends for
+ // sortState[kPendingRunsIdx][2 * i + 1] elements:
+ //
+ // [..., base (i-1), length (i-1), base i, length i]
+ //
+ // It's always true (so long as the indices are in bounds) that
+ //
+ // base of run #i + length of run #i == base of run #i + 1
+ //
+ const kPendingRunsSizeIdx: constexpr int31 = 10;
+ const kPendingRunsIdx: constexpr int31 = 11;
+
+ // The current size of the temporary array.
+ const kTempArraySizeIdx: constexpr int31 = 12;
+
+ // Pointer to the temporary array.
+ const kTempArrayIdx: constexpr int31 = 13;
+
+ // Contains a Smi constant describing which accessors to use. This is used
+ // for reloading the right elements and for a sanity check.
+ const kAccessorIdx: constexpr int31 = 14;
+
+ const kSortStateSize: intptr = 15;
+
+ const kFailure: Smi = -1;
+ const kSuccess: Smi = 0;
+
+ // The maximum number of entries in a SortState's pending-runs stack.
+ // This is enough to sort arrays of size up to about
+ // 32 * phi ** kMaxMergePending
+ // where phi ~= 1.618. 85 is ridiculously large enough, good for an array with
+ // 2 ** 64 elements.
+ const kMaxMergePending: constexpr int31 = 85;
+
+ // When we get into galloping mode, we stay there until both runs win less
+ // often then kMinGallop consecutive times. See listsort.txt for more info.
+ const kMinGallopWins: constexpr int31 = 7;
+
+ // Default size of the temporary array. The temporary array is allocated when
+ // it is first requested, but it has always at least this size.
+ const kSortStateTempSize: Smi = 32;
+
+ type LoadFn = builtin(Context, FixedArray, HeapObject, Smi) => Object;
+ type StoreFn = builtin(Context, FixedArray, HeapObject, Smi, Object) => Smi;
+ type CanUseSameAccessorFn = builtin(Context, JSReceiver, Object, Number) =>
+ Boolean;
+ type CompareBuiltinFn = builtin(Context, Object, Object, Object) => Number;
+
+ // The following builtins implement Load/Store for all the Accessors.
+ // The most generic baseline version uses Get-/SetProperty. We do not need
+ // to worry about the prototype chain, because the pre-processing step has
+ // copied values from the prototype chain to the receiver if they were visible
+ // through a hole.
+
+ builtin Load<ElementsAccessor : type>(
+ context: Context, sortState: FixedArray, elements: HeapObject,
+ index: Smi): Object {
+ return GetProperty(context, elements, index);
+ }
+
+ Load<FastPackedSmiElements>(
+ context: Context, sortState: FixedArray, elements: HeapObject,
+ index: Smi): Object {
+ const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ return elems[index];
+ }
+
+ Load<FastSmiOrObjectElements>(
+ context: Context, sortState: FixedArray, elements: HeapObject,
+ index: Smi): Object {
+ const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ const result: Object = elems[index];
+ if (IsTheHole(result)) {
+ // The pre-processing step removed all holes by compacting all elements
+ // at the start of the array. Finding a hole means the cmp function or
+ // ToString changes the array.
+ return Failure(sortState);
+ }
+ return result;
+ }
+
+ Load<FastDoubleElements>(
+ context: Context, sortState: FixedArray, elements: HeapObject,
+ index: Smi): Object {
+ try {
+ const elems: FixedDoubleArray = unsafe_cast<FixedDoubleArray>(elements);
+ const value: float64 =
+ LoadDoubleWithHoleCheck(elems, index) otherwise Bailout;
+ return AllocateHeapNumberWithValue(value);
+ }
+ label Bailout {
+ // The pre-processing step removed all holes by compacting all elements
+ // at the start of the array. Finding a hole means the cmp function or
+ // ToString changes the array.
+ return Failure(sortState);
+ }
+ }
+
+ Load<DictionaryElements>(
+ context: Context, sortState: FixedArray, elements: HeapObject,
+ index: Smi): Object {
+ try {
+ const dictionary: NumberDictionary =
+ unsafe_cast<NumberDictionary>(elements);
+ const intptr_index: intptr = convert<intptr>(index);
+ const value: Object =
+ BasicLoadNumberDictionaryElement(dictionary, intptr_index)
+ otherwise Bailout, Bailout;
+ return value;
+ }
+ label Bailout {
+ return Failure(sortState);
+ }
+ }
+
+ Load<TempArrayElements>(
+ context: Context, sortState: FixedArray, elements: HeapObject,
+ index: Smi): Object {
+ assert(IsFixedArray(elements));
+ const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ return elems[index];
+ }
+
+ builtin Store<ElementsAccessor : type>(
+ context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
+ value: Object): Smi {
+ SetProperty(context, elements, index, value);
+ return kSuccess;
+ }
+
+ Store<FastPackedSmiElements>(
+ context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
+ value: Object): Smi {
+ const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ StoreFixedArrayElementSmi(elems, index, value, SKIP_WRITE_BARRIER);
+ return kSuccess;
+ }
+
+ Store<FastSmiOrObjectElements>(
+ context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
+ value: Object): Smi {
+ const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ elems[index] = value;
+ return kSuccess;
+ }
+
+ Store<FastDoubleElements>(
+ context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
+ value: Object): Smi {
+ const elems: FixedDoubleArray = unsafe_cast<FixedDoubleArray>(elements);
+ const heap_val: HeapNumber = unsafe_cast<HeapNumber>(value);
+ // Make sure we do not store signalling NaNs into double arrays.
+ const val: float64 = Float64SilenceNaN(convert<float64>(heap_val));
+ StoreFixedDoubleArrayElementWithSmiIndex(elems, index, val);
+ return kSuccess;
+ }
+
+ Store<DictionaryElements>(
+ context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
+ value: Object): Smi {
+ const dictionary: NumberDictionary =
+ unsafe_cast<NumberDictionary>(elements);
+ const intptr_index: intptr = convert<intptr>(index);
+ try {
+ BasicStoreNumberDictionaryElement(dictionary, intptr_index, value)
+ otherwise Fail, Fail, ReadOnly;
+ return kSuccess;
+ }
+ label ReadOnly {
+ // We cannot write to read-only data properties. Throw the same TypeError
+ // as SetProperty would.
+ const receiver: JSReceiver = GetReceiver(sortState);
+ ThrowTypeError(
+ context, kStrictReadOnlyProperty, index, Typeof(receiver), receiver);
+ }
+ label Fail {
+ return Failure(sortState);
+ }
+ }
+
+ Store<TempArrayElements>(
+ context: Context, sortState: FixedArray, elements: HeapObject, index: Smi,
+ value: Object): Smi {
+ const elems: FixedArray = unsafe_cast<FixedArray>(elements);
+ elems[index] = value;
+ return kSuccess;
+ }
+
+ extern macro UnsafeCastObjectToCompareBuiltinFn(Object): CompareBuiltinFn;
+ unsafe_cast<CompareBuiltinFn>(o: Object): CompareBuiltinFn {
+ return UnsafeCastObjectToCompareBuiltinFn(o);
+ }
+
+ extern macro UnsafeCastObjectToLoadFn(Object): LoadFn;
+ unsafe_cast<LoadFn>(o: Object): LoadFn {
+ return UnsafeCastObjectToLoadFn(o);
+ }
+
+ extern macro UnsafeCastObjectToStoreFn(Object): StoreFn;
+ unsafe_cast<StoreFn>(o: Object): StoreFn {
+ return UnsafeCastObjectToStoreFn(o);
+ }
+
+ extern macro UnsafeCastObjectToCanUseSameAccessorFn(Object):
+ CanUseSameAccessorFn;
+ unsafe_cast<CanUseSameAccessorFn>(o: Object): CanUseSameAccessorFn {
+ return UnsafeCastObjectToCanUseSameAccessorFn(o);
+ }
+
+ builtin SortCompareDefault(
+ context: Context, comparefn: Object, x: Object, y: Object): Number {
+ assert(comparefn == Undefined);
+
+ if (TaggedIsSmi(x) && TaggedIsSmi(y)) {
+ // TODO(szuend): Replace with a fast CallCFunction call.
+ return SmiLexicographicCompare(context, x, y);
+ }
+
+ // 5. Let xString be ? ToString(x).
+ const xString: String = ToString_Inline(context, x);
+
+ // 6. Let yString be ? ToString(y).
+ const yString: String = ToString_Inline(context, y);
+
+ // 7. Let xSmaller be the result of performing
+ // Abstract Relational Comparison xString < yString.
+ // 8. If xSmaller is true, return -1.
+ if (StringLessThan(context, xString, yString) == True) return -1;
+
+ // 9. Let ySmaller be the result of performing
+ // Abstract Relational Comparison yString < xString.
+ // 10. If ySmaller is true, return 1.
+ if (StringLessThan(context, yString, xString) == True) return 1;
+
+ // 11. Return +0.
+ return 0;
+ }
+
+ builtin SortCompareUserFn(
+ context: Context, comparefn: Object, x: Object, y: Object): Number {
+ assert(comparefn != Undefined);
+ const cmpfn: Callable = unsafe_cast<Callable>(comparefn);
+
+ // a. Let v be ? ToNumber(? Call(comparefn, undefined, x, y)).
+ const v: Number =
+ ToNumber_Inline(context, Call(context, cmpfn, Undefined, x, y));
+
+ // b. If v is NaN, return +0.
+ if (NumberIsNaN(v)) return 0;
+
+ // c. return v.
+ return v;
+ }
+
+ builtin CanUseSameAccessor<ElementsAccessor : type>(
+ context: Context, receiver: JSReceiver, initialReceiverMap: Object,
+ initialReceiverLength: Number): Boolean {
+ assert(IsJSArray(receiver));
+
+ let a: JSArray = unsafe_cast<JSArray>(receiver);
+ if (a.map != initialReceiverMap) return False;
+
+ assert(TaggedIsSmi(initialReceiverLength));
+ let originalLength: Smi = unsafe_cast<Smi>(initialReceiverLength);
+ if (a.length_fast != originalLength) return False;
+
+ return True;
+ }
+
+ CanUseSameAccessor<GenericElementsAccessor>(
+ context: Context, receiver: JSReceiver, initialReceiverMap: Object,
+ initialReceiverLength: Number): Boolean {
+ // Do nothing. We are already on the slow path.
+ return True;
+ }
+
+ CanUseSameAccessor<DictionaryElements>(
+ context: Context, receiver: JSReceiver, initialReceiverMap: Object,
+ initialReceiverLength: Number): Boolean {
+ let obj: JSReceiver = unsafe_cast<JSReceiver>(receiver);
+ return SelectBooleanConstant(obj.map == initialReceiverMap);
+ }
+
+ macro CallCompareFn(
+ context: Context, sortState: FixedArray, x: Object, y: Object): Number
+ labels Bailout {
+ const userCmpFn: Object = sortState[kUserCmpFnIdx];
+ const sortCompare: CompareBuiltinFn =
+ unsafe_cast<CompareBuiltinFn>(sortState[kSortComparePtrIdx]);
+
+ const result: Number = sortCompare(context, userCmpFn, x, y);
+
+ const receiver: JSReceiver = GetReceiver(sortState);
+ const initialReceiverMap: Object = sortState[kInitialReceiverMapIdx];
+ const initialReceiverLength: Number =
+ unsafe_cast<Number>(sortState[kInitialReceiverLengthIdx]);
+ const CanUseSameAccessor: CanUseSameAccessorFn =
+ GetCanUseSameAccessorFn(sortState);
+
+ if (!CanUseSameAccessor(
+ context, receiver, initialReceiverMap, initialReceiverLength)) {
+ goto Bailout;
+ }
+ return result;
+ }
+
+ // Reloading elements after returning from JS is needed since left-trimming
+ // might have occurred. This means we cannot leave any pointer to the elements
+ // backing store on the stack (since it would point to the filler object).
+ // TODO(v8:7995): Remove reloading once left-trimming is removed.
+ macro ReloadElements(sortState: FixedArray): HeapObject {
+ const receiver: JSReceiver = GetReceiver(sortState);
+ if (sortState[kAccessorIdx] == kGenericElementsAccessorId) return receiver;
+
+ const object: JSObject = unsafe_cast<JSObject>(receiver);
+ return object.elements;
+ }
+
+ macro GetLoadFn(sortState: FixedArray): LoadFn {
+ return unsafe_cast<LoadFn>(sortState[kLoadFnIdx]);
+ }
+
+ macro GetStoreFn(sortState: FixedArray): StoreFn {
+ return unsafe_cast<StoreFn>(sortState[kStoreFnIdx]);
+ }
+
+ macro GetCanUseSameAccessorFn(sortState: FixedArray): CanUseSameAccessorFn {
+ return unsafe_cast<CanUseSameAccessorFn>(
+ sortState[kCanUseSameAccessorFnIdx]);
+ }
+
+ macro GetReceiver(sortState: FixedArray): JSReceiver {
+ return unsafe_cast<JSReceiver>(sortState[kReceiverIdx]);
+ }
+
+ // Returns the temporary array without changing its size.
+ macro GetTempArray(sortState: FixedArray): FixedArray {
+ return unsafe_cast<FixedArray>(sortState[kTempArrayIdx]);
+ }
+
+ // Re-loading the stack-size is done in a few places. The small macro allows
+ // for easier invariant checks at all use sites.
+ macro GetPendingRunsSize(sortState: FixedArray): Smi {
+ assert(TaggedIsSmi(sortState[kPendingRunsSizeIdx]));
+ const stack_size: Smi = unsafe_cast<Smi>(sortState[kPendingRunsSizeIdx]);
+
+ assert(stack_size >= 0);
+ return stack_size;
+ }
+
+ macro SetPendingRunsSize(sortState: FixedArray, value: Smi) {
+ sortState[kPendingRunsSizeIdx] = value;
+ }
+
+ macro GetPendingRunBase(pendingRuns: FixedArray, run: Smi): Smi {
+ return unsafe_cast<Smi>(pendingRuns[run << 1]);
+ }
+
+ macro SetPendingRunBase(pendingRuns: FixedArray, run: Smi, value: Smi) {
+ pendingRuns[run << 1] = value;
+ }
+
+ macro GetPendingRunLength(pendingRuns: FixedArray, run: Smi): Smi {
+ return unsafe_cast<Smi>(pendingRuns[(run << 1) + 1]);
+ }
+
+ macro SetPendingRunLength(pendingRuns: FixedArray, run: Smi, value: Smi) {
+ pendingRuns[(run << 1) + 1] = value;
+ }
+
+ macro PushRun(sortState: FixedArray, base: Smi, length: Smi) {
+ assert(GetPendingRunsSize(sortState) < kMaxMergePending);
+
+ const stack_size: Smi = GetPendingRunsSize(sortState);
+ const pending_runs: FixedArray =
+ unsafe_cast<FixedArray>(sortState[kPendingRunsIdx]);
+
+ SetPendingRunBase(pending_runs, stack_size, base);
+ SetPendingRunLength(pending_runs, stack_size, length);
+
+ SetPendingRunsSize(sortState, stack_size + 1);
+ }
+
+ // Returns the temporary array and makes sure that it is big enough.
+ // TODO(szuend): Implement a better re-size strategy.
+ macro GetTempArray(sortState: FixedArray, requestedSize: Smi): FixedArray {
+ const min_size: Smi = SmiMax(kSortStateTempSize, requestedSize);
+
+ const current_size: Smi = unsafe_cast<Smi>(sortState[kTempArraySizeIdx]);
+ if (current_size >= min_size) {
+ return GetTempArray(sortState);
+ }
+
+ const temp_array: FixedArray =
+ AllocateZeroedFixedArray(convert<intptr>(min_size));
+ FillFixedArrayWithSmiZero(temp_array, min_size);
+
+ sortState[kTempArraySizeIdx] = min_size;
+ sortState[kTempArrayIdx] = temp_array;
+ return temp_array;
+ }
+
+ // This macro jumps to the Bailout label iff kBailoutStatus is kFailure.
+ macro EnsureSuccess(sortState: FixedArray) labels Bailout {
+ const status: Smi = unsafe_cast<Smi>(sortState[kBailoutStatusIdx]);
+ if (status == kFailure) goto Bailout;
+ }
+
+ // Sets kBailoutStatus to kFailure and returns kFailure.
+ macro Failure(sortState: FixedArray): Smi {
+ sortState[kBailoutStatusIdx] = kFailure;
+ return kFailure;
+ }
+
+ // The following Call* macros wrap builtin calls, making call sites more
+ // readable since we can use labels and do not have to check kBailoutStatus
+ // or the return value.
+
+ macro CallLoad(
+ context: Context, sortState: FixedArray, Load: LoadFn,
+ elements: HeapObject, index: Smi): Object labels Bailout {
+ const result: Object = Load(context, sortState, elements, index);
+ EnsureSuccess(sortState) otherwise Bailout;
+ return result;
+ }
+
+ macro CallStore(
+ context: Context, sortState: FixedArray, Store: StoreFn,
+ elements: HeapObject, index: Smi, value: Object) labels Bailout {
+ Store(context, sortState, elements, index, value);
+ EnsureSuccess(sortState) otherwise Bailout;
+ }
+
+ macro CallCopyFromTempArray(
+ context: Context, sortState: FixedArray, dstElements: HeapObject,
+ dstPos: Smi, tempArray: FixedArray, srcPos: Smi, length: Smi)
+ labels Bailout {
+ CopyFromTempArray(
+ context, sortState, dstElements, dstPos, tempArray, srcPos, length);
+ EnsureSuccess(sortState) otherwise Bailout;
+ }
+
+ macro CallCopyWithinSortArray(
+ context: Context, sortState: FixedArray, elements: HeapObject,
+ srcPos: Smi, dstPos: Smi, length: Smi)
+ labels Bailout {
+ CopyWithinSortArray(context, sortState, elements, srcPos, dstPos, length);
+ EnsureSuccess(sortState) otherwise Bailout;
+ }
+
+ macro CallGallopRight(
+ context: Context, sortState: FixedArray, Load: LoadFn, key: Object,
+ base: Smi, length: Smi, hint: Smi, useTempArray: Boolean): Smi
+ labels Bailout {
+ const result: Smi = GallopRight(
+ context, sortState, Load, key, base, length, hint, useTempArray);
+ EnsureSuccess(sortState) otherwise Bailout;
+ return result;
+ }
+
+ macro CallGallopLeft(
+ context: Context, sortState: FixedArray, Load: LoadFn, key: Object,
+ base: Smi, length: Smi, hint: Smi, useTempArray: Boolean): Smi
+ labels Bailout {
+ const result: Smi = GallopLeft(
+ context, sortState, Load, key, base, length, hint, useTempArray);
+ EnsureSuccess(sortState) otherwise Bailout;
+ return result;
+ }
+
+ macro CallMergeAt(context: Context, sortState: FixedArray, i: Smi)
+ labels Bailout {
+ MergeAt(context, sortState, i);
+ EnsureSuccess(sortState) otherwise Bailout;
+ }
+
+ // Used for OOB asserts in Copy* builtins.
+ macro GetReceiverLengthProperty(
+ context: Context, sortState: FixedArray): Smi {
+ const receiver: JSReceiver = GetReceiver(sortState);
+
+ if (IsJSArray(receiver)) return unsafe_cast<JSArray>(receiver).length_fast;
+
+ const len: Number =
+ ToLength_Inline(context, GetProperty(context, receiver, 'length'));
+ return unsafe_cast<Smi>(len);
+ }
+
+ macro CopyToTempArray(
+ context: Context, sortState: FixedArray, Load: LoadFn,
+ srcElements: HeapObject, srcPos: Smi, tempArray: FixedArray, dstPos: Smi,
+ length: Smi)
+ labels Bailout {
+ assert(srcPos >= 0);
+ assert(dstPos >= 0);
+ assert(srcPos <= GetReceiverLengthProperty(context, sortState) - length);
+ assert(dstPos <= tempArray.length - length);
+
+ let src_idx: Smi = srcPos;
+ let dst_idx: Smi = dstPos;
+ let to: Smi = srcPos + length;
+
+ while (src_idx < to) {
+ let element: Object =
+ CallLoad(context, sortState, Load, srcElements, src_idx++)
+ otherwise Bailout;
+ tempArray[dst_idx++] = element;
+ }
+ }
+
+ builtin CopyFromTempArray(
+ context: Context, sortState: FixedArray, dstElements: HeapObject,
+ dstPos: Smi, tempArray: FixedArray, srcPos: Smi, length: Smi): Smi {
+ assert(srcPos >= 0);
+ assert(dstPos >= 0);
+ assert(srcPos <= tempArray.length - length);
+ assert(dstPos <= GetReceiverLengthProperty(context, sortState) - length);
+
+ let Store: StoreFn = GetStoreFn(sortState);
+
+ let src_idx: Smi = srcPos;
+ let dst_idx: Smi = dstPos;
+ let to: Smi = srcPos + length;
+ try {
+ while (src_idx < to) {
+ CallStore(
+ context, sortState, Store, dstElements, dst_idx++,
+ tempArray[src_idx++])
+ otherwise Bailout;
+ }
+ return kSuccess;
+ }
+ label Bailout {
+ return Failure(sortState);
+ }
+ }
+
+ builtin CopyWithinSortArray(
+ context: Context, sortState: FixedArray, elements: HeapObject,
+ srcPos: Smi, dstPos: Smi, length: Smi): Smi {
+ assert(srcPos >= 0);
+ assert(dstPos >= 0);
+ assert(srcPos <= GetReceiverLengthProperty(context, sortState) - length);
+ assert(dstPos <= GetReceiverLengthProperty(context, sortState) - length);
+
+ try {
+ let Load: LoadFn = GetLoadFn(sortState);
+ let Store: StoreFn = GetStoreFn(sortState);
+
+ if (srcPos < dstPos) {
+ let src_idx: Smi = srcPos + length - 1;
+ let dst_idx: Smi = dstPos + length - 1;
+ while (src_idx >= srcPos) {
+ CopyElement(
+ context, sortState, Load, Store, elements, src_idx--, dst_idx--)
+ otherwise Bailout;
+ }
+ } else {
+ let src_idx: Smi = srcPos;
+ let dst_idx: Smi = dstPos;
+ let to: Smi = srcPos + length;
+ while (src_idx < to) {
+ CopyElement(
+ context, sortState, Load, Store, elements, src_idx++, dst_idx++)
+ otherwise Bailout;
+ }
+ }
+ return kSuccess;
+ }
+ label Bailout {
+ return Failure(sortState);
+ }
+ }
+
+ // BinaryInsertionSort is the best method for sorting small arrays: it does
+ // few compares, but can do data movement quadratic in the number of elements.
+ // This is an advantage since comparisons are more expensive due to
+ // calling into JS.
+ //
+ // [low, high) is a contiguous range of a array, and is sorted via
+ // binary insertion. This sort is stable.
+ //
+ // On entry, must have low <= start <= high, and that [low, start) is already
+ // sorted. Pass start == low if you do not know!.
+ builtin BinaryInsertionSort(
+ context: Context, sortState: FixedArray, low: Smi, startArg: Smi,
+ high: Smi): Smi {
+ assert(low <= startArg && startArg <= high);
+
+ try {
+ let elements: HeapObject = ReloadElements(sortState);
+
+ const Load: LoadFn = GetLoadFn(sortState);
+ const Store: StoreFn = GetStoreFn(sortState);
+
+ let start: Smi = low == startArg ? (startArg + 1) : startArg;
+
+ for (; start < high; ++start) {
+ // Set left to where a[start] belongs.
+ let left: Smi = low;
+ let right: Smi = start;
+
+ const pivot: Object =
+ CallLoad(context, sortState, Load, elements, right)
+ otherwise Bailout;
+
+ // Invariants:
+ // pivot >= all in [low, left).
+ // pivot < all in [right, start).
+ assert(left < right);
+
+ // Find pivot insertion point.
+ while (left < right) {
+ const mid: Smi = left + ((right - left) >>> 1);
+ const mid_element: Object =
+ CallLoad(context, sortState, Load, elements, mid)
+ otherwise Bailout;
+ const order: Number =
+ CallCompareFn(context, sortState, pivot, mid_element)
+ otherwise Bailout;
+ elements = ReloadElements(sortState);
+
+ if (order < 0) {
+ right = mid;
+ } else {
+ left = mid + 1;
+ }
+ }
+ assert(left == right);
+
+ // The invariants still hold, so:
+ // pivot >= all in [low, left) and
+ // pivot < all in [left, start),
+ //
+ // so pivot belongs at left. Note that if there are elements equal to
+ // pivot, left points to the first slot after them -- that's why this
+ // sort is stable.
+ // Slide over to make room.
+ for (let p: Smi = start; p > left; --p) {
+ CopyElement(context, sortState, Load, Store, elements, p - 1, p)
+ otherwise Bailout;
+ }
+ CallStore(context, sortState, Store, elements, left, pivot)
+ otherwise Bailout;
+ }
+ return kSuccess;
+ }
+ label Bailout {
+ return Failure(sortState);
+ }
+ }
+
+ // Return the length of the run beginning at low, in the range [low, high),
+ // low < high is required on entry.
+ // "A run" is the longest ascending sequence, with
+ //
+ // a[low] <= a[low + 1] <= a[low + 2] <= ...
+ //
+ // or the longest descending sequence, with
+ //
+ // a[low] > a[low + 1] > a[low + 2] > ...
+ //
+ // For its intended use in stable mergesort, the strictness of the definition
+ // of "descending" is needed so that the range can safely be reversed
+ // without violating stability (strict ">" ensures there are no equal
+ // elements to get out of order).
+ //
+ // In addition, if the run is "descending", it is reversed, so the returned
+ // length is always an ascending sequence.
+ macro CountAndMakeRun(
+ context: Context, sortState: FixedArray, lowArg: Smi, high: Smi): Smi
+ labels Bailout {
+ assert(lowArg < high);
+
+ let elements: HeapObject = ReloadElements(sortState);
+ const Load: LoadFn = GetLoadFn(sortState);
+ const Store: StoreFn = GetStoreFn(sortState);
+
+ let low: Smi = lowArg + 1;
+ if (low == high) return 1;
+
+ let run_length: Smi = 2;
+
+ const element_low: Object =
+ CallLoad(context, sortState, Load, elements, low) otherwise Bailout;
+ const element_low_pred: Object =
+ CallLoad(context, sortState, Load, elements, low - 1) otherwise Bailout;
+ let order: Number =
+ CallCompareFn(context, sortState, element_low, element_low_pred)
+ otherwise Bailout;
+ elements = ReloadElements(sortState);
+
+ // TODO(szuend): Replace with "order < 0" once Torque supports it.
+ // Currently the operator<(Number, Number) has return type
+ // 'never' and uses two labels to branch.
+ const is_descending: bool = order < 0 ? true : false;
+
+ let previous_element: Object = element_low;
+ for (let idx: Smi = low + 1; idx < high; ++idx) {
+ const current_element: Object =
+ CallLoad(context, sortState, Load, elements, idx) otherwise Bailout;
+ order =
+ CallCompareFn(context, sortState, current_element, previous_element)
+ otherwise Bailout;
+ elements = ReloadElements(sortState);
+
+ if (is_descending) {
+ if (order >= 0) break;
+ } else {
+ if (order < 0) break;
+ }
+
+ previous_element = current_element;
+ ++run_length;
+ }
+
+ if (is_descending) {
+ ReverseRange(
+ context, sortState, Load, Store, elements, lowArg,
+ lowArg + run_length)
+ otherwise Bailout;
+ }
+
+ return run_length;
+ }
+
+ macro ReverseRange(
+ context: Context, sortState: FixedArray, Load: LoadFn, Store: StoreFn,
+ elements: HeapObject, from: Smi, to: Smi)
+ labels Bailout {
+ let low: Smi = from;
+ let high: Smi = to - 1;
+
+ while (low < high) {
+ const element_low: Object =
+ CallLoad(context, sortState, Load, elements, low) otherwise Bailout;
+ const element_high: Object =
+ CallLoad(context, sortState, Load, elements, high) otherwise Bailout;
+ CallStore(context, sortState, Store, elements, low++, element_high)
+ otherwise Bailout;
+ CallStore(context, sortState, Store, elements, high--, element_low)
+ otherwise Bailout;
+ }
+ }
+
+ // Merges the two runs at stack indices i and i + 1.
+ // Returns kFailure if we need to bailout, kSuccess otherwise.
+ builtin MergeAt(context: Context, sortState: FixedArray, i: Smi): Smi {
+ const stack_size: Smi = GetPendingRunsSize(sortState);
+
+ // We are only allowed to either merge the two top-most runs, or leave
+ // the top most run alone and merge the two next runs.
+ assert(stack_size >= 2);
+ assert(i >= 0);
+ assert(i == stack_size - 2 || i == stack_size - 3);
+
+ const elements: HeapObject = ReloadElements(sortState);
+ const Load: LoadFn = GetLoadFn(sortState);
+
+ const pending_runs: FixedArray =
+ unsafe_cast<FixedArray>(sortState[kPendingRunsIdx]);
+ let base_a: Smi = GetPendingRunBase(pending_runs, i);
+ let length_a: Smi = GetPendingRunLength(pending_runs, i);
+ let base_b: Smi = GetPendingRunBase(pending_runs, i + 1);
+ let length_b: Smi = GetPendingRunLength(pending_runs, i + 1);
+ assert(length_a > 0 && length_b > 0);
+ assert(base_a + length_a == base_b);
+
+ // Record the length of the combined runs; if i is the 3rd-last run now,
+ // also slide over the last run (which isn't involved in this merge).
+ // The current run i + 1 goes away in any case.
+ SetPendingRunLength(pending_runs, i, length_a + length_b);
+ if (i == stack_size - 3) {
+ const base: Smi = GetPendingRunBase(pending_runs, i + 2);
+ const length: Smi = GetPendingRunLength(pending_runs, i + 2);
+ SetPendingRunBase(pending_runs, i + 1, base);
+ SetPendingRunLength(pending_runs, i + 1, length);
+ }
+ SetPendingRunsSize(sortState, stack_size - 1);
+
+ try {
+ // Where does b start in a? Elements in a before that can be ignored,
+ // because they are already in place.
+ const key_right: Object =
+ CallLoad(context, sortState, Load, elements, base_b)
+ otherwise Bailout;
+ const k: Smi = CallGallopRight(
+ context, sortState, Load, key_right, base_a, length_a, 0, False)
+ otherwise Bailout;
+ assert(k >= 0);
+
+ base_a = base_a + k;
+ length_a = length_a - k;
+ if (length_a == 0) return kSuccess;
+ assert(length_a > 0);
+
+ // Where does a end in b? Elements in b after that can be ignored,
+ // because they are already in place.
+ let key_left: Object =
+ CallLoad(context, sortState, Load, elements, base_a + length_a - 1)
+ otherwise Bailout;
+ length_b = CallGallopLeft(
+ context, sortState, Load, key_left, base_b, length_b, length_b - 1,
+ False) otherwise Bailout;
+ assert(length_b >= 0);
+ if (length_b == 0) return kSuccess;
+
+ // Merge what remains of the runs, using a temp array with
+ // min(length_a, length_b) elements.
+ if (length_a <= length_b) {
+ MergeLow(context, sortState, base_a, length_a, base_b, length_b)
+ otherwise Bailout;
+ } else {
+ MergeHigh(context, sortState, base_a, length_a, base_b, length_b)
+ otherwise Bailout;
+ }
+ return kSuccess;
+ }
+ label Bailout {
+ return Failure(sortState);
+ }
+ }
+
+ // Locates the proper position of key in a sorted array; if the array contains
+ // an element equal to key, return the position immediately to the left of
+ // the leftmost equal element. (GallopRight does the same except returns the
+ // position to the right of the rightmost equal element (if any)).
+ //
+ // The array is sorted with "length" elements, starting at "base".
+ // "length" must be > 0.
+ //
+ // "hint" is an index at which to begin the search, 0 <= hint < n. The closer
+ // hint is to the final result, the faster this runs.
+ //
+ // The return value is the int offset in 0..length such that
+ //
+ // array[base + offset] < key <= array[base + offset + 1]
+ //
+ // pretending that array[base - 1] is minus infinity and array[base + len]
+ // is plus infinity. In other words, key belongs at index base + k.
+ builtin GallopLeft(
+ context: Context, sortState: FixedArray, Load: LoadFn, key: Object,
+ base: Smi, length: Smi, hint: Smi, useTempArray: Boolean): Smi {
+ assert(length > 0 && base >= 0);
+ assert(0 <= hint && hint < length);
+
+ // We cannot leave a pointer to elements on the stack (see comment at
+ // ReloadElements). For this reason we pass a flag whether to reload
+ // and which array to use.
+ let elements: HeapObject = useTempArray == True ? GetTempArray(sortState) :
+ ReloadElements(sortState);
+
+ let last_ofs: Smi = 0;
+ let offset: Smi = 1;
+
+ try {
+ const base_hint_element: Object =
+ CallLoad(context, sortState, Load, elements, base + hint)
+ otherwise Bailout;
+ let order: Number =
+ CallCompareFn(context, sortState, base_hint_element, key)
+ otherwise Bailout;
+ if (useTempArray == False) {
+ elements = ReloadElements(sortState);
+ }
+
+ if (order < 0) {
+ // a[base + hint] < key: gallop right, until
+ // a[base + hint + last_ofs] < key <= a[base + hint + offset].
+
+ // a[base + length - 1] is highest.
+ let max_ofs: Smi = length - hint;
+ while (offset < max_ofs) {
+ const offset_element: Object =
+ CallLoad(context, sortState, Load, elements, base + hint + offset)
+ otherwise Bailout;
+ order = CallCompareFn(context, sortState, offset_element, key)
+ otherwise Bailout;
+ if (useTempArray == False) {
+ elements = ReloadElements(sortState);
+ }
+
+ // a[base + hint + offset] >= key? Break.
+ if (order >= 0) break;
+
+ last_ofs = offset;
+ offset = (offset << 1) + 1;
+
+ // Integer overflow.
+ if (offset <= 0) offset = max_ofs;
+ }
+
+ if (offset > max_ofs) offset = max_ofs;
+
+ // Translate back to positive offsets relative to base.
+ last_ofs = last_ofs + hint;
+ offset = offset + hint;
+ } else {
+ // key <= a[base + hint]: gallop left, until
+ // a[base + hint - offset] < key <= a[base + hint - last_ofs].
+ assert(order >= 0);
+
+ // a[base + hint] is lowest.
+ let max_ofs: Smi = hint + 1;
+ while (offset < max_ofs) {
+ const offset_element: Object =
+ CallLoad(context, sortState, Load, elements, base + hint - offset)
+ otherwise Bailout;
+ order = CallCompareFn(context, sortState, offset_element, key)
+ otherwise Bailout;
+ if (useTempArray == False) {
+ elements = ReloadElements(sortState);
+ }
+
+ if (order < 0) break;
+
+ last_ofs = offset;
+ offset = (offset << 1) + 1;
+
+ // Integer overflow.
+ if (offset <= 0) offset = max_ofs;
+ }
+
+ if (offset > max_ofs) offset = max_ofs;
+
+ // Translate back to positive offsets relative to base.
+ const tmp: Smi = last_ofs;
+ last_ofs = hint - offset;
+ offset = hint - tmp;
+ }
+
+ assert(-1 <= last_ofs && last_ofs < offset && offset <= length);
+
+ // Now a[base+last_ofs] < key <= a[base+offset], so key belongs somewhere
+ // to the right of last_ofs but no farther right than offset. Do a binary
+ // search, with invariant:
+ // a[base + last_ofs - 1] < key <= a[base + offset].
+ last_ofs++;
+ while (last_ofs < offset) {
+ const m: Smi = last_ofs + ((offset - last_ofs) >>> 1);
+
+ const base_m_element: Object =
+ CallLoad(context, sortState, Load, elements, base + m)
+ otherwise Bailout;
+ order = CallCompareFn(context, sortState, base_m_element, key)
+ otherwise Bailout;
+ if (useTempArray == False) {
+ elements = ReloadElements(sortState);
+ }
+
+ if (order < 0) {
+ last_ofs = m + 1; // a[base + m] < key.
+ } else {
+ offset = m; // key <= a[base + m].
+ }
+ }
+ // so a[base + offset - 1] < key <= a[base + offset].
+ assert(last_ofs == offset);
+ assert(0 <= offset && offset <= length);
+ return offset;
+ }
+ label Bailout {
+ return Failure(sortState);
+ }
+ }
+
+ // Exactly like GallopLeft, except that if key already exists in
+ // [base, base + length), finds the position immediately to the right of the
+ // rightmost equal value.
+ //
+ // The return value is the int offset in 0..length such that
+ //
+ // array[base + offset - 1] <= key < array[base + offset]
+ //
+ // or kFailure on error.
+ builtin GallopRight(
+ context: Context, sortState: FixedArray, Load: LoadFn, key: Object,
+ base: Smi, length: Smi, hint: Smi, useTempArray: Boolean): Smi {
+ assert(length > 0 && base >= 0);
+ assert(0 <= hint && hint < length);
+
+ // We cannot leave a pointer to elements on the stack (see comment at
+ // ReloadElements). For this reason we pass a flag whether to reload
+ // and which array to use.
+ let elements: HeapObject = useTempArray == True ? GetTempArray(sortState) :
+ ReloadElements(sortState);
+
+ let last_ofs: Smi = 0;
+ let offset: Smi = 1;
+
+ try {
+ const base_hint_element: Object =
+ CallLoad(context, sortState, Load, elements, base + hint)
+ otherwise Bailout;
+ let order: Number =
+ CallCompareFn(context, sortState, key, base_hint_element)
+ otherwise Bailout;
+ if (useTempArray == False) {
+ elements = ReloadElements(sortState);
+ }
+
+ if (order < 0) {
+ // key < a[base + hint]: gallop left, until
+ // a[base + hint - offset] <= key < a[base + hint - last_ofs].
+
+ // a[base + hint] is lowest.
+ let max_ofs: Smi = hint + 1;
+ while (offset < max_ofs) {
+ const offset_element: Object =
+ CallLoad(context, sortState, Load, elements, base + hint - offset)
+ otherwise Bailout;
+ order = CallCompareFn(context, sortState, key, offset_element)
+ otherwise Bailout;
+ if (useTempArray == False) {
+ elements = ReloadElements(sortState);
+ }
+
+ if (order >= 0) break;
+
+ last_ofs = offset;
+ offset = (offset << 1) + 1;
+
+ // Integer overflow.
+ if (offset <= 0) offset = max_ofs;
+ }
+
+ if (offset > max_ofs) offset = max_ofs;
+
+ // Translate back to positive offsets relative to base.
+ const tmp: Smi = last_ofs;
+ last_ofs = hint - offset;
+ offset = hint - tmp;
+ } else {
+ // a[base + hint] <= key: gallop right, until
+ // a[base + hint + last_ofs] <= key < a[base + hint + offset].
+
+ // a[base + length - 1] is highest.
+ let max_ofs: Smi = length - hint;
+ while (offset < max_ofs) {
+ const offset_element: Object =
+ CallLoad(context, sortState, Load, elements, base + hint + offset)
+ otherwise Bailout;
+ order = CallCompareFn(context, sortState, key, offset_element)
+ otherwise Bailout;
+ if (useTempArray == False) {
+ elements = ReloadElements(sortState);
+ }
+
+ // a[base + hint + ofs] <= key.
+ if (order < 0) break;
+
+ last_ofs = offset;
+ offset = (offset << 1) + 1;
+
+ // Integer overflow.
+ if (offset <= 0) offset = max_ofs;
+ }
+
+ if (offset > max_ofs) offset = max_ofs;
+
+ // Translate back to positive offests relative to base.
+ last_ofs = last_ofs + hint;
+ offset = offset + hint;
+ }
+ assert(-1 <= last_ofs && last_ofs < offset && offset <= length);
+
+ // Now a[base + last_ofs] <= key < a[base + ofs], so key belongs
+ // somewhere to the right of last_ofs but no farther right than ofs.
+ // Do a binary search, with invariant
+ // a[base + last_ofs - 1] < key <= a[base + ofs].
+ last_ofs++;
+ while (last_ofs < offset) {
+ const m: Smi = last_ofs + ((offset - last_ofs) >>> 1);
+
+ const base_m_element: Object =
+ CallLoad(context, sortState, Load, elements, base + m)
+ otherwise Bailout;
+ order = CallCompareFn(context, sortState, key, base_m_element)
+ otherwise Bailout;
+ if (useTempArray == False) {
+ elements = ReloadElements(sortState);
+ }
+
+ if (order < 0) {
+ offset = m; // key < a[base + m].
+ } else {
+ last_ofs = m + 1; // a[base + m] <= key.
+ }
+ }
+ // so a[base + offset - 1] <= key < a[base + offset].
+ assert(last_ofs == offset);
+ assert(0 <= offset && offset <= length);
+ return offset;
+ }
+ label Bailout {
+ return Failure(sortState);
+ }
+ }
+
+ // Copies a single element inside the array/object (NOT the temp_array).
+ macro CopyElement(
+ context: Context, sortState: FixedArray, Load: LoadFn, Store: StoreFn,
+ elements: HeapObject, from: Smi, to: Smi)
+ labels Bailout {
+ const element: Object = CallLoad(context, sortState, Load, elements, from)
+ otherwise Bailout;
+ CallStore(context, sortState, Store, elements, to, element)
+ otherwise Bailout;
+ }
+
+ // Merge the length_a elements starting at base_a with the length_b elements
+ // starting at base_b in a stable way, in-place. length_a and length_b must
+ // be > 0, and base_a + length_a == base_b. Must also have that
+ // array[base_b] < array[base_a],
+ // that array[base_a + length_a - 1] belongs at the end of the merge,
+ // and should have length_a <= length_b.
+ macro MergeLow(
+ context: Context, sortState: FixedArray, baseA: Smi, lengthA: Smi,
+ baseB: Smi, lengthB: Smi)
+ labels Bailout {
+ assert(0 < lengthA && 0 < lengthB);
+ assert(0 <= baseA && 0 < baseB);
+ assert(baseA + lengthA == baseB);
+
+ let length_a: Smi = lengthA;
+ let length_b: Smi = lengthB;
+
+ let elements: HeapObject = ReloadElements(sortState);
+ const LoadF: LoadFn = GetLoadFn(sortState);
+ const Store: StoreFn = GetStoreFn(sortState);
+
+ const temp_array: FixedArray = GetTempArray(sortState, length_a);
+ CopyToTempArray(
+ context, sortState, LoadF, elements, baseA, temp_array, 0, length_a)
+ otherwise Bailout;
+
+ let dest: Smi = baseA;
+ let cursor_temp: Smi = 0;
+ let cursor_b: Smi = baseB;
+
+ CopyElement(context, sortState, LoadF, Store, elements, cursor_b++, dest++)
+ otherwise Bailout;
+
+ try {
+ if (--length_b == 0) goto Succeed;
+ if (length_a == 1) goto CopyB;
+
+ let min_gallop: Smi = unsafe_cast<Smi>(sortState[kMinGallopIdx]);
+ // TODO(szuend): Replace with something that does not have a runtime
+ // overhead as soon as its available in Torque.
+ while (Int32TrueConstant()) {
+ let nof_wins_a: Smi = 0; // # of times A won in a row.
+ let nof_wins_b: Smi = 0; // # of times B won in a row.
+
+ // Do the straightforward thing until (if ever) one run appears to
+ // win consistently.
+ // TODO(szuend): Replace with something that does not have a runtime
+ // overhead as soon as its available in Torque.
+ while (Int32TrueConstant()) {
+ assert(length_a > 1 && length_b > 0);
+
+ let element_b: Object =
+ CallLoad(context, sortState, LoadF, elements, cursor_b)
+ otherwise Bailout;
+ let order: Number = CallCompareFn(
+ context, sortState, element_b, temp_array[cursor_temp])
+ otherwise Bailout;
+ elements = ReloadElements(sortState);
+
+ if (order < 0) {
+ CopyElement(
+ context, sortState, LoadF, Store, elements, cursor_b, dest)
+ otherwise Bailout;
+
+ ++cursor_b;
+ ++dest;
+ ++nof_wins_b;
+ --length_b;
+ nof_wins_a = 0;
+
+ if (length_b == 0) goto Succeed;
+ if (nof_wins_b >= min_gallop) break;
+ } else {
+ CallStore(
+ context, sortState, Store, elements, dest,
+ temp_array[cursor_temp])
+ otherwise Bailout;
+
+ ++cursor_temp;
+ ++dest;
+ ++nof_wins_a;
+ --length_a;
+ nof_wins_b = 0;
+
+ if (length_a == 1) goto CopyB;
+ if (nof_wins_a >= min_gallop) break;
+ }
+ }
+
+ // One run is winning so consistently that galloping may be a huge win.
+ // So try that, and continue galloping until (if ever) neither run
+ // appears to be winning consistently anymore.
+ ++min_gallop;
+ let first_iteration: bool = true;
+ while (nof_wins_a >= kMinGallopWins || nof_wins_b >= kMinGallopWins ||
+ first_iteration) {
+ first_iteration = false;
+ assert(length_a > 1 && length_b > 0);
+
+ min_gallop = SmiMax(1, min_gallop - 1);
+ sortState[kMinGallopIdx] = min_gallop;
+
+ let key_right: Object =
+ CallLoad(context, sortState, LoadF, elements, cursor_b)
+ otherwise Bailout;
+ nof_wins_a = CallGallopRight(
+ context, sortState, Load<TempArrayElements>, key_right,
+ cursor_temp, length_a, 0, True) otherwise Bailout;
+ assert(nof_wins_a >= 0);
+
+ if (nof_wins_a > 0) {
+ CallCopyFromTempArray(
+ context, sortState, elements, dest, temp_array, cursor_temp,
+ nof_wins_a) otherwise Bailout;
+ dest = dest + nof_wins_a;
+ cursor_temp = cursor_temp + nof_wins_a;
+ length_a = length_a - nof_wins_a;
+
+ if (length_a == 1) goto CopyB;
+
+ // length_a == 0 is impossible now if the comparison function is
+ // consistent, but we can't assume that it is.
+ if (length_a == 0) goto Succeed;
+ }
+ CopyElement(
+ context, sortState, LoadF, Store, elements, cursor_b++, dest++)
+ otherwise Bailout;
+ if (--length_b == 0) goto Succeed;
+
+ nof_wins_b = CallGallopLeft(
+ context, sortState, LoadF, temp_array[cursor_temp], cursor_b,
+ length_b, 0, False)
+ otherwise Bailout;
+ assert(nof_wins_b >= 0);
+ if (nof_wins_b > 0) {
+ CallCopyWithinSortArray(
+ context, sortState, elements, cursor_b, dest, nof_wins_b)
+ otherwise Bailout;
+
+ dest = dest + nof_wins_b;
+ cursor_b = cursor_b + nof_wins_b;
+ length_b = length_b - nof_wins_b;
+
+ if (length_b == 0) goto Succeed;
+ }
+ CallStore(
+ context, sortState, Store, elements, dest++,
+ temp_array[cursor_temp++])
+ otherwise Bailout;
+ if (--length_a == 1) goto CopyB;
+ }
+ ++min_gallop; // Penalize it for leaving galloping mode
+ sortState[kMinGallopIdx] = min_gallop;
+ }
+ }
+ label Succeed {
+ if (length_a > 0) {
+ CallCopyFromTempArray(
+ context, sortState, elements, dest, temp_array, cursor_temp,
+ length_a) otherwise Bailout;
+ }
+ }
+ label CopyB {
+ assert(length_a == 1 && length_b > 0);
+ // The last element of run A belongs at the end of the merge.
+ CallCopyWithinSortArray(
+ context, sortState, elements, cursor_b, dest, length_b)
+ otherwise Bailout;
+ CallStore(
+ context, sortState, Store, elements, dest + length_b,
+ temp_array[cursor_temp])
+ otherwise Bailout;
+ }
+ }
+
+ // Merge the length_a elements starting at base_a with the length_b elements
+ // starting at base_b in a stable way, in-place. length_a and length_b must
+ // be > 0. Must also have that array[base_a + length_a - 1] belongs at the
+ // end of the merge and should have length_a >= length_b.
+ macro MergeHigh(
+ context: Context, sortState: FixedArray, baseA: Smi, lengthA: Smi,
+ baseB: Smi, lengthB: Smi)
+ labels Bailout {
+ assert(0 < lengthA && 0 < lengthB);
+ assert(0 <= baseA && 0 < baseB);
+ assert(baseA + lengthA == baseB);
+
+ let length_a: Smi = lengthA;
+ let length_b: Smi = lengthB;
+
+ let elements: HeapObject = ReloadElements(sortState);
+ const LoadF: LoadFn = GetLoadFn(sortState);
+ const Store: StoreFn = GetStoreFn(sortState);
+
+ const temp_array: FixedArray = GetTempArray(sortState, length_b);
+ CopyToTempArray(
+ context, sortState, LoadF, elements, baseB, temp_array, 0, length_b)
+ otherwise Bailout;
+
+ // MergeHigh merges the two runs backwards.
+ let dest: Smi = baseB + length_b - 1;
+ let cursor_temp: Smi = length_b - 1;
+ let cursor_a: Smi = baseA + length_a - 1;
+
+ CopyElement(context, sortState, LoadF, Store, elements, cursor_a--, dest--)
+ otherwise Bailout;
+
+ try {
+ if (--length_a == 0) goto Succeed;
+ if (length_b == 1) goto CopyA;
+
+ let min_gallop: Smi = unsafe_cast<Smi>(sortState[kMinGallopIdx]);
+ // TODO(szuend): Replace with something that does not have a runtime
+ // overhead as soon as its available in Torque.
+ while (Int32TrueConstant()) {
+ let nof_wins_a: Smi = 0; // # of times A won in a row.
+ let nof_wins_b: Smi = 0; // # of times B won in a row.
+
+ // Do the straightforward thing until (if ever) one run appears to
+ // win consistently.
+ // TODO(szuend): Replace with something that does not have a runtime
+ // overhead as soon as its available in Torque.
+ while (Int32TrueConstant()) {
+ assert(length_a > 0 && length_b > 1);
+
+ let element_a: Object =
+ CallLoad(context, sortState, LoadF, elements, cursor_a)
+ otherwise Bailout;
+ let order: Number = CallCompareFn(
+ context, sortState, temp_array[cursor_temp], element_a)
+ otherwise Bailout;
+ elements = ReloadElements(sortState);
+
+ if (order < 0) {
+ CopyElement(
+ context, sortState, LoadF, Store, elements, cursor_a, dest)
+ otherwise Bailout;
+
+ --cursor_a;
+ --dest;
+ ++nof_wins_a;
+ --length_a;
+ nof_wins_b = 0;
+
+ if (length_a == 0) goto Succeed;
+ if (nof_wins_a >= min_gallop) break;
+ } else {
+ CallStore(
+ context, sortState, Store, elements, dest,
+ temp_array[cursor_temp])
+ otherwise Bailout;
+
+ --cursor_temp;
+ --dest;
+ ++nof_wins_b;
+ --length_b;
+ nof_wins_a = 0;
+
+ if (length_b == 1) goto CopyA;
+ if (nof_wins_b >= min_gallop) break;
+ }
+ }
+
+ // One run is winning so consistently that galloping may be a huge win.
+ // So try that, and continue galloping until (if ever) neither run
+ // appears to be winning consistently anymore.
+ ++min_gallop;
+ let first_iteration: bool = true;
+ while (nof_wins_a >= kMinGallopWins || nof_wins_b >= kMinGallopWins ||
+ first_iteration) {
+ first_iteration = false;
+
+ assert(length_a > 0 && length_b > 1);
+
+ min_gallop = SmiMax(1, min_gallop - 1);
+ sortState[kMinGallopIdx] = min_gallop;
+
+ let k: Smi = CallGallopRight(
+ context, sortState, LoadF, temp_array[cursor_temp], baseA,
+ length_a, length_a - 1, False)
+ otherwise Bailout;
+ assert(k >= 0);
+ nof_wins_a = length_a - k;
+
+ if (nof_wins_a > 0) {
+ dest = dest - nof_wins_a;
+ cursor_a = cursor_a - nof_wins_a;
+ CallCopyWithinSortArray(
+ context, sortState, elements, cursor_a + 1, dest + 1,
+ nof_wins_a)
+ otherwise Bailout;
+
+ length_a = length_a - nof_wins_a;
+ if (length_a == 0) goto Succeed;
+ }
+ CallStore(
+ context, sortState, Store, elements, dest--,
+ temp_array[cursor_temp--])
+ otherwise Bailout;
+ if (--length_b == 1) goto CopyA;
+
+ let key: Object =
+ CallLoad(context, sortState, LoadF, elements, cursor_a)
+ otherwise Bailout;
+ k = CallGallopLeft(
+ context, sortState, Load<TempArrayElements>, key, 0, length_b,
+ length_b - 1, True) otherwise Bailout;
+ assert(k >= 0);
+ nof_wins_b = length_b - k;
+
+ if (nof_wins_b > 0) {
+ dest = dest - nof_wins_b;
+ cursor_temp = cursor_temp - nof_wins_b;
+ CallCopyFromTempArray(
+ context, sortState, elements, dest + 1, temp_array,
+ cursor_temp + 1, nof_wins_b) otherwise Bailout;
+
+ length_b = length_b - nof_wins_b;
+ if (length_b == 1) goto CopyA;
+
+ // length_b == 0 is impossible now if the comparison function is
+ // consistent, but we can't assume that it is.
+ if (length_b == 0) goto Succeed;
+ }
+ CopyElement(
+ context, sortState, LoadF, Store, elements, cursor_a--, dest--)
+ otherwise Bailout;
+ if (--length_a == 0) goto Succeed;
+ }
+ ++min_gallop;
+ sortState[kMinGallopIdx] = min_gallop;
+ }
+ }
+ label Succeed {
+ if (length_b > 0) {
+ assert(length_a == 0);
+ CallCopyFromTempArray(
+ context, sortState, elements, dest - (length_b - 1), temp_array, 0,
+ length_b) otherwise Bailout;
+ }
+ }
+ label CopyA {
+ assert(length_b == 1 && length_a > 0);
+
+ // The first element of run B belongs at the front of the merge.
+ dest = dest - length_a;
+ cursor_a = cursor_a - length_a;
+ CallCopyWithinSortArray(
+ context, sortState, elements, cursor_a + 1, dest + 1, length_a)
+ otherwise Bailout;
+ CallStore(
+ context, sortState, Store, elements, dest, temp_array[cursor_temp])
+ otherwise Bailout;
+ }
+ }
+
+ // Compute a good value for the minimum run length; natural runs shorter than
+ // this are boosted artificially via binary insertion sort.
+ //
+ // If n < 64, return n (it's too small to bother with fancy stuff).
+ // Else if n is an exact power of 2, return 32.
+ // Else return an int k, 32 <= k <= 64, such that n/k is close to, but
+ // strictly less than, an exact power of 2.
+ //
+ // See listsort.txt for more info.
+ macro ComputeMinRunLength(nArg: Smi): Smi {
+ let n: Smi = nArg;
+ let r: Smi = 0; // Becomes 1 if any 1 bits are shifted off.
+
+ assert(n >= 0);
+ while (n >= 64) {
+ r = r | (n & 1);
+ n = n >>> 1;
+ }
+
+ const min_run_length: Smi = n + r;
+ assert(nArg < 64 || (32 <= min_run_length && min_run_length <= 64));
+ return min_run_length;
+ }
+
+ // Returns true iff run_length(n - 2) > run_length(n - 1) + run_length(n).
+ macro RunInvariantEstablished(pendingRuns: FixedArray, n: Smi): bool {
+ if (n < 2) return true;
+
+ const run_length_n: Smi = GetPendingRunLength(pendingRuns, n);
+ const run_length_nm: Smi = GetPendingRunLength(pendingRuns, n - 1);
+ const run_length_nmm: Smi = GetPendingRunLength(pendingRuns, n - 2);
+
+ return run_length_nmm > run_length_nm + run_length_n;
+ }
+
+ // Examines the stack of runs waiting to be merged, merging adjacent runs
+ // until the stack invariants are re-established:
+ //
+ // 1. run_length(i - 3) > run_length(i - 2) + run_length(i - 1)
+ // 2. run_length(i - 2) > run_length(i - 1)
+ //
+ // TODO(szuend): Remove unnecessary loads. This macro was refactored to
+ // improve readability, introducing unnecessary loads in the
+ // process. Determine if all these extra loads are ok.
+ macro MergeCollapse(context: Context, sortState: FixedArray)
+ labels Bailout {
+ const pending_runs: FixedArray =
+ unsafe_cast<FixedArray>(sortState[kPendingRunsIdx]);
+
+ // Reload the stack size because MergeAt might change it.
+ while (GetPendingRunsSize(sortState) > 1) {
+ let n: Smi = GetPendingRunsSize(sortState) - 2;
+
+ if (!RunInvariantEstablished(pending_runs, n + 1) ||
+ !RunInvariantEstablished(pending_runs, n)) {
+ if (GetPendingRunLength(pending_runs, n - 1) <
+ GetPendingRunLength(pending_runs, n + 1)) {
+ --n;
+ }
+
+ CallMergeAt(context, sortState, n) otherwise Bailout;
+ } else if (
+ GetPendingRunLength(pending_runs, n) <=
+ GetPendingRunLength(pending_runs, n + 1)) {
+ CallMergeAt(context, sortState, n) otherwise Bailout;
+ } else {
+ break;
+ }
+ }
+ }
+
+ // Regardless of invariants, merge all runs on the stack until only one
+ // remains. This is used at the end of the mergesort.
+ macro MergeForceCollapse(context: Context, sortState: FixedArray)
+ labels Bailout {
+ let pending_runs: FixedArray =
+ unsafe_cast<FixedArray>(sortState[kPendingRunsIdx]);
+
+ // Reload the stack size becuase MergeAt might change it.
+ while (GetPendingRunsSize(sortState) > 1) {
+ let n: Smi = GetPendingRunsSize(sortState) - 2;
+
+ if (n > 0 &&
+ GetPendingRunLength(pending_runs, n - 1) <
+ GetPendingRunLength(pending_runs, n + 1)) {
+ --n;
+ }
+ CallMergeAt(context, sortState, n) otherwise Bailout;
+ }
+ }
+
+ macro InitializeSortState(sortState: FixedArray) {
+ sortState[kMinGallopIdx] = SmiConstant(kMinGallopWins);
+ sortState[kTempArraySizeIdx] = SmiConstant(0);
+
+ SetPendingRunsSize(sortState, 0);
+ let pending_runs: FixedArray =
+ AllocateZeroedFixedArray(convert<intptr>(kMaxMergePending));
+ FillFixedArrayWithSmiZero(pending_runs, kMaxMergePending);
+ sortState[kPendingRunsIdx] = pending_runs;
+ }
+
+ macro InitializeSortStateAccessor<Accessor : type>(sortState: FixedArray) {
+ sortState[kAccessorIdx] = kFastElementsAccessorId;
+ sortState[kLoadFnIdx] = Load<Accessor>;
+ sortState[kStoreFnIdx] = Store<Accessor>;
+ sortState[kCanUseSameAccessorFnIdx] = CanUseSameAccessor<Accessor>;
+ }
+
+ InitializeSortStateAccessor<GenericElementsAccessor>(sortState: FixedArray) {
+ sortState[kAccessorIdx] = kGenericElementsAccessorId;
+ sortState[kLoadFnIdx] = Load<GenericElementsAccessor>;
+ sortState[kStoreFnIdx] = Store<GenericElementsAccessor>;
+ sortState[kCanUseSameAccessorFnIdx] =
+ CanUseSameAccessor<GenericElementsAccessor>;
+ }
+
+ macro ArrayTimSortImpl(context: Context, sortState: FixedArray, length: Smi)
+ labels Bailout {
+ InitializeSortState(sortState);
+
+ if (length < 2) return;
+ let remaining: Smi = length;
+
+ // March over the array once, left to right, finding natural runs,
+ // and extending short natural runs to minrun elements.
+ let low: Smi = 0;
+ const min_run_length: Smi = ComputeMinRunLength(remaining);
+ while (remaining != 0) {
+ let current_run_length: Smi =
+ CountAndMakeRun(context, sortState, low, low + remaining)
+ otherwise Bailout;
+
+ // If the run is short, extend it to min(min_run_length, remaining).
+ if (current_run_length < min_run_length) {
+ const forced_run_length: Smi = SmiMin(min_run_length, remaining);
+ BinaryInsertionSort(
+ context, sortState, low, low + current_run_length,
+ low + forced_run_length);
+ EnsureSuccess(sortState) otherwise Bailout;
+ current_run_length = forced_run_length;
+ }
+
+ // Push run onto pending-runs stack, and maybe merge.
+ PushRun(sortState, low, current_run_length);
+
+ MergeCollapse(context, sortState) otherwise Bailout;
+
+ // Advance to find next run.
+ low = low + current_run_length;
+ remaining = remaining - current_run_length;
+ }
+
+ MergeForceCollapse(context, sortState) otherwise Bailout;
+ assert(GetPendingRunsSize(sortState) == 1);
+ assert(
+ GetPendingRunLength(
+ unsafe_cast<FixedArray>(sortState[kPendingRunsIdx]), 0) == length);
+ }
+
+ builtin ArrayTimSort(
+ context: Context, sortState: FixedArray, length: Smi): Object {
+ try {
+ ArrayTimSortImpl(context, sortState, length)
+ otherwise Slow;
+ }
+ label Slow {
+ if (sortState[kAccessorIdx] == kGenericElementsAccessorId) {
+ // We were already on the slow path. This must not happen.
+ unreachable;
+ }
+ sortState[kBailoutStatusIdx] = kSuccess;
+
+ InitializeSortStateAccessor<GenericElementsAccessor>(sortState);
+ ArrayTimSort(context, sortState, length);
+ }
+ return kSuccess;
+ }
+
+ // For compatibility with JSC, we also sort elements inherited from
+ // the prototype chain on non-Array objects.
+ // We do this by copying them to this object and sorting only
+ // own elements. This is not very efficient, but sorting with
+ // inherited elements happens very, very rarely, if at all.
+ // The specification allows "implementation dependent" behavior
+ // if an element on the prototype chain has an element that
+ // might interact with sorting.
+ //
+ // We also move all non-undefined elements to the front of the
+ // array and move the undefineds after that. Holes are removed.
+ // This happens for Array as well as non-Array objects.
+ extern runtime PrepareElementsForSort(Context, Object, Number): Smi;
+ extern macro FillFixedArrayWithSmiZero(FixedArray, Smi);
+
+ // https://tc39.github.io/ecma262/#sec-array.prototype.sort
+ javascript builtin ArrayPrototypeSort(
+ context: Context, receiver: Object, ...arguments): Object {
+ // 1. If comparefn is not undefined and IsCallable(comparefn) is false,
+ // throw a TypeError exception.
+ const comparefnObj: Object = arguments[0];
+ if (comparefnObj != Undefined && !TaggedIsCallable(comparefnObj)) {
+ ThrowTypeError(context, kBadSortComparisonFunction, comparefnObj);
+ }
+
+ // 2. Let obj be ? ToObject(this value).
+ const obj: JSReceiver = ToObject(context, receiver);
+ let map: Map = obj.map;
+
+ const sort_state: FixedArray =
+ AllocateZeroedFixedArray(kSortStateSize);
+ FillFixedArrayWithSmiZero(sort_state, SmiTag(kSortStateSize));
+
+ sort_state[kReceiverIdx] = obj;
+ sort_state[kUserCmpFnIdx] = comparefnObj;
+ sort_state[kSortComparePtrIdx] =
+ comparefnObj != Undefined ? SortCompareUserFn : SortCompareDefault;
+ sort_state[kInitialReceiverMapIdx] = map;
+ sort_state[kBailoutStatusIdx] = kSuccess;
+
+ try {
+ const a: JSArray = cast<JSArray>(obj) otherwise slow;
+ const elementsKind: ElementsKind = map.elements_kind;
+ if (!IsFastElementsKind(elementsKind)) goto slow;
+
+ // 3. Let len be ? ToLength(? Get(obj, "length")).
+ const len: Smi = a.length_fast;
+ if (len < 2) return receiver;
+
+ // TODO(szuend): Investigate performance tradeoff of skipping this step
+ // for PACKED_* and handling Undefineds during sorting.
+ const nofNonUndefined: Smi = PrepareElementsForSort(context, obj, len);
+ assert(a.map == map);
+
+ sort_state[kInitialReceiverLengthIdx] = len;
+
+ if (IsDoubleElementsKind(elementsKind)) {
+ InitializeSortStateAccessor<FastDoubleElements>(sort_state);
+ } else if (elementsKind == PACKED_SMI_ELEMENTS) {
+ InitializeSortStateAccessor<FastPackedSmiElements>(sort_state);
+ } else {
+ InitializeSortStateAccessor<FastSmiOrObjectElements>(sort_state);
+ }
+ ArrayTimSort(context, sort_state, nofNonUndefined);
+ }
+ label slow {
+ // 3. Let len be ? ToLength(? Get(obj, "length")).
+ const len: Number =
+ ToLength_Inline(context, GetProperty(context, obj, 'length'));
+
+ if (len < 2) return receiver;
+ const nofNonUndefined: Smi = PrepareElementsForSort(context, obj, len);
+
+ sort_state[kInitialReceiverLengthIdx] = len;
+
+ // Reload the map, PrepareElementsForSort might have changed the
+ // elements kind.
+ map = obj.map;
+
+ if (map.elements_kind == DICTIONARY_ELEMENTS && IsExtensibleMap(map) &&
+ !IsCustomElementsReceiverInstanceType(map.instance_type)) {
+ InitializeSortStateAccessor<DictionaryElements>(sort_state);
+ } else {
+ InitializeSortStateAccessor<GenericElementsAccessor>(sort_state);
+ }
+ ArrayTimSort(context, sort_state, nofNonUndefined);
+ }
+
+ return receiver;
+ }
+}
diff --git a/deps/v8/tools/BUILD.gn b/deps/v8/tools/BUILD.gn
index 29d7b273f0..05c382e2f6 100644
--- a/deps/v8/tools/BUILD.gn
+++ b/deps/v8/tools/BUILD.gn
@@ -25,9 +25,26 @@ group("v8_check_static_initializers") {
]
}
+group("v8_android_test_runner_deps") {
+ testonly = true
+
+ if (is_android && !build_with_chromium) {
+ data_deps = [
+ "../build/android:test_runner_py",
+ ]
+ data = [
+ # This is used by android.py, but not included by test_runner_py above.
+ "../third_party/catapult/devil/devil/android/perf/",
+ ]
+ }
+}
+
group("v8_testrunner") {
+ testonly = true
+
data_deps = [
"..:v8_dump_build_config",
+ ":v8_android_test_runner_deps",
]
data = [
diff --git a/deps/v8/tools/callstats.py.vpython b/deps/v8/tools/callstats.py.vpython
new file mode 100644
index 0000000000..11e3f34ef6
--- /dev/null
+++ b/deps/v8/tools/callstats.py.vpython
@@ -0,0 +1,43 @@
+# This is a vpython "spec" file.
+#
+# It describes patterns for python wheel dependencies of the python scripts in
+# the callstats.py, particularly for dependencies that have compiled components
+# (since pure-python dependencies can be easily vendored into third_party).
+#
+# When vpython is invoked, it finds this file and builds a python VirtualEnv,
+# containing all of the dependencies described in this file, fetching them from
+# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`,
+# this never requires the end-user machine to have a working python extension
+# compilation environment. All of these packages are built using:
+# https://chromium.googlesource.com/infra/infra/+/master/infra/tools/dockerbuild/
+#
+# All python scripts in the repo share this same spec, to avoid dependency
+# fragmentation.
+#
+# If you have depot_tools installed in your $PATH, you can invoke python scripts
+# in this repo by running them as you normally would run them, except
+# substituting `vpython` instead of `python` on the command line, e.g.:
+# vpython path/to/script.py some --arguments
+#
+# Read more about `vpython` and how to modify this file here:
+# https://chromium.googlesource.com/infra/infra/+/master/doc/users/vpython.md
+
+python_version: "2.7"
+
+wheel: <
+ name: "infra/python/wheels/numpy/${vpython_platform}"
+ version: "version:1.11.3"
+>
+
+wheel: <
+ name: "infra/python/wheels/scipy/${vpython_platform}"
+ version: "version:0.19.0"
+ match_tag: <
+ abi: "cp27mu"
+ platform: "manylinux1_i686"
+ >
+ match_tag: <
+ abi: "cp27mu"
+ platform: "manylinux1_x86_64"
+ >
+>
diff --git a/deps/v8/tools/clusterfuzz/testdata/failure_output.txt b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
index 5eacc59f32..49431aec30 100644
--- a/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
+++ b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
@@ -9,9 +9,9 @@
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---abort_on_stack_or_string_length_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --wasm-num-compilation-tasks=0 --random-seed 12345 --turbo-filter=~ --noopt --suppress-asm-messages
+--abort_on_stack_or_string_length_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --wasm-num-compilation-tasks=0 --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
# Flags of x64,ignition_turbo:
---abort_on_stack_or_string_length_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --wasm-num-compilation-tasks=0 --random-seed 12345 --suppress-asm-messages --stress-scavenge=100
+--abort_on_stack_or_string_length_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --wasm-num-compilation-tasks=0 --suppress-asm-messages --random-seed 12345 --stress-scavenge=100
#
# Difference:
- unknown
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie.py b/deps/v8/tools/clusterfuzz/v8_foozzie.py
index 28685845bb..6fcd49f209 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie.py
@@ -21,63 +21,49 @@ import v8_commands
import v8_suppressions
CONFIGS = dict(
- default=[
- '--suppress-asm-messages',
- ],
+ default=[],
ignition=[
'--turbo-filter=~',
'--noopt',
- '--suppress-asm-messages',
+ '--liftoff',
+ '--no-wasm-tier-up',
],
ignition_asm=[
'--turbo-filter=~',
'--noopt',
'--validate-asm',
'--stress-validate-asm',
- '--suppress-asm-messages',
],
ignition_eager=[
'--turbo-filter=~',
'--noopt',
'--no-lazy',
'--no-lazy-inner-functions',
- '--suppress-asm-messages',
- ],
- ignition_turbo=[
- '--suppress-asm-messages',
],
+ ignition_turbo=[],
ignition_turbo_opt=[
'--always-opt',
- '--suppress-asm-messages',
+ '--no-liftoff',
+ '--no-wasm-tier-up',
],
ignition_turbo_opt_eager=[
'--always-opt',
'--no-lazy',
'--no-lazy-inner-functions',
- '--suppress-asm-messages',
- ],
- liftoff=[
- '--liftoff',
- '--no-wasm-tier-up',
- '--suppress-asm-messages',
],
slow_path=[
'--force-slow-path',
- '--suppress-asm-messages',
],
slow_path_opt=[
'--always-opt',
'--force-slow-path',
- '--suppress-asm-messages',
],
trusted=[
'--no-untrusted-code-mitigations',
- '--suppress-asm-messages',
],
trusted_opt=[
'--always-opt',
'--no-untrusted-code-mitigations',
- '--suppress-asm-messages',
],
)
@@ -107,7 +93,8 @@ ARCH_MOCKS = os.path.join(BASE_PATH, 'v8_mock_archs.js')
FLAGS = ['--abort_on_stack_or_string_length_overflow', '--expose-gc',
'--allow-natives-syntax', '--invoke-weak-callbacks', '--omit-quit',
- '--es-staging', '--wasm-num-compilation-tasks=0']
+ '--es-staging', '--wasm-num-compilation-tasks=0',
+ '--suppress-asm-messages']
SUPPORTED_ARCHS = ['ia32', 'x64', 'arm', 'arm64']
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie_test.py b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
index 9f6dd33c54..3b95111271 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
@@ -39,7 +39,7 @@ class ConfigTest(unittest.TestCase):
self.assertEqual(
[
'--first-config=ignition',
- '--second-config=ignition_turbo_opt',
+ '--second-config=ignition_turbo',
'--second-d8=d8',
],
v8_fuzz_config.Config('foo', Rng()).choose_foozzie_flags(),
diff --git a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
index 24bbf8f43d..8cc1939e38 100644
--- a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
+++ b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
@@ -13,15 +13,13 @@ FOOZZIE_EXPERIMENTS = [
[5, 'ignition', 'trusted_opt', 'd8'],
[10, 'ignition', 'slow_path', 'd8'],
[5, 'ignition', 'slow_path_opt', 'd8'],
- [20, 'ignition', 'ignition_turbo', 'd8'],
+ [25, 'ignition', 'ignition_turbo', 'd8'],
[20, 'ignition', 'ignition_turbo_opt', 'd8'],
- [4, 'ignition_turbo_opt', 'ignition_turbo_opt', 'clang_x86/d8'],
- [4, 'ignition_turbo', 'ignition_turbo', 'clang_x86/d8'],
- [4, 'ignition', 'ignition', 'clang_x86/d8'],
+ [5, 'ignition_turbo_opt', 'ignition_turbo_opt', 'clang_x86/d8'],
+ [5, 'ignition_turbo', 'ignition_turbo', 'clang_x86/d8'],
+ [5, 'ignition', 'ignition', 'clang_x86/d8'],
[5, 'ignition', 'ignition', 'clang_x64_v8_arm64/d8'],
[5, 'ignition', 'ignition', 'clang_x86_v8_arm/d8'],
- [5, 'ignition', 'liftoff', 'd8'],
- [3, 'liftoff', 'liftoff', 'clang_x86/d8'],
]
class Config(object):
diff --git a/deps/v8/tools/csvparser.js b/deps/v8/tools/csvparser.js
index f0f8680cf8..bd106a7a41 100644
--- a/deps/v8/tools/csvparser.js
+++ b/deps/v8/tools/csvparser.js
@@ -49,6 +49,9 @@ class CsvParser {
if (escapeIdentifier == 'n') {
result += '\n';
nextPos = pos;
+ } else if (escapeIdentifier == '\\') {
+ result += '\\';
+ nextPos = pos;
} else {
if (escapeIdentifier == 'x') {
// \x00 ascii range escapes consume 2 chars.
diff --git a/deps/v8/tools/eval_gc_time.sh b/deps/v8/tools/eval_gc_time.sh
index 9abc93a95f..f809c3579f 100755
--- a/deps/v8/tools/eval_gc_time.sh
+++ b/deps/v8/tools/eval_gc_time.sh
@@ -94,7 +94,6 @@ INTERESTING_OLD_GEN_KEYS="\
clear.slots_buffer \
clear.store_buffer \
clear.string_table \
- clear.weak_cells \
clear.weak_collections \
clear.weak_lists \
evacuate.candidates \
diff --git a/deps/v8/tools/gcmole/BUILD.gn b/deps/v8/tools/gcmole/BUILD.gn
index 99b5c0aea7..f10667e6c2 100644
--- a/deps/v8/tools/gcmole/BUILD.gn
+++ b/deps/v8/tools/gcmole/BUILD.gn
@@ -20,6 +20,7 @@ group("v8_run_gcmole") {
"../../include/",
"../../src/",
"../../test/cctest/",
+ "../../test/common/",
"../../testing/gtest/include/gtest/gtest_prod.h",
"../../third_party/googletest/src/googletest/include/gtest/gtest_prod.h",
"../../third_party/icu/source/",
diff --git a/deps/v8/tools/gdbinit b/deps/v8/tools/gdbinit
index ced0a82096..cea0f07b1e 100644
--- a/deps/v8/tools/gdbinit
+++ b/deps/v8/tools/gdbinit
@@ -122,3 +122,23 @@ end
set disassembly-flavor intel
set disable-randomization off
+
+# Install a handler whenever the debugger stops due to a signal. It walks up the
+# stack looking for V8_Dcheck and moves the frame to the one above it so it's
+# immediately at the line of code that triggered the DCHECK.
+python
+def dcheck_stop_handler(event):
+ orig_frame = gdb.selected_frame()
+ frame = orig_frame
+ select_frame = None
+ while frame is not None:
+ if frame.name() in ('V8_Dcheck', 'V8_Fatal'):
+ select_frame = frame.older()
+ frame = frame.older()
+
+ if select_frame is not None:
+ select_frame.select()
+ gdb.execute('frame')
+
+gdb.events.stop.connect(dcheck_stop_handler)
+end
diff --git a/deps/v8/tools/generate-header-include-checks.py b/deps/v8/tools/generate-header-include-checks.py
new file mode 100755
index 0000000000..ca3b0079ca
--- /dev/null
+++ b/deps/v8/tools/generate-header-include-checks.py
@@ -0,0 +1,156 @@
+#!/usr/bin/env python
+# vim:fenc=utf-8:shiftwidth=2
+
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Check that each header can be included in isolation.
+
+For each header we generate one .cc file which only includes this one header.
+All these .cc files are then added to a sources.gni file which is included in
+BUILD.gn. Just compile to check whether there are any violations to the rule
+that each header must be includable in isolation.
+"""
+
+import argparse
+import os
+import os.path
+import re
+import sys
+
+# TODO(clemensh): Extend to tests.
+DEFAULT_INPUT = ['base', 'src']
+DEFAULT_GN_FILE = 'BUILD.gn'
+MY_DIR = os.path.dirname(os.path.realpath(__file__))
+V8_DIR = os.path.dirname(MY_DIR)
+OUT_DIR = os.path.join(V8_DIR, 'check-header-includes')
+AUTO_EXCLUDE = [
+ # flag-definitions.h needs a mode set for being included.
+ 'src/flag-definitions.h',
+ # blacklist of headers we need to fix (https://crbug.com/v8/7965).
+ 'src/allocation-site-scopes.h',
+ 'src/compiler/allocation-builder.h',
+ 'src/compiler/js-context-specialization.h',
+ 'src/compiler/raw-machine-assembler.h',
+ 'src/dateparser-inl.h',
+ 'src/heap/incremental-marking.h',
+ 'src/ic/ic.h',
+ 'src/lookup.h',
+ 'src/parsing/parser.h',
+ 'src/parsing/preparser.h',
+ 'src/regexp/jsregexp.h',
+ 'src/snapshot/object-deserializer.h',
+ 'src/transitions.h',
+]
+AUTO_EXCLUDE_PATTERNS = [
+ 'src/base/atomicops_internals_.*',
+] + [
+ # platform-specific headers
+ '\\b{}\\b'.format(p) for p in
+ ('win32', 'ia32', 'x64', 'arm', 'arm64', 'mips', 'mips64', 's390', 'ppc')]
+
+args = None
+def parse_args():
+ global args
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-i', '--input', type=str, action='append',
+ help='Headers or directories to check (directories '
+ 'are scanned for headers recursively); default: ' +
+ ','.join(DEFAULT_INPUT))
+ parser.add_argument('-x', '--exclude', type=str, action='append',
+ help='Add an exclude pattern (regex)')
+ parser.add_argument('-v', '--verbose', action='store_true',
+ help='Be verbose')
+ args = parser.parse_args()
+ args.exclude = (args.exclude or []) + AUTO_EXCLUDE_PATTERNS
+ args.exclude += ['^' + re.escape(x) + '$' for x in AUTO_EXCLUDE]
+ if not args.input:
+ args.input=DEFAULT_INPUT
+
+
+def printv(line):
+ if args.verbose:
+ print line
+
+
+def find_all_headers():
+ printv('Searching for headers...')
+ header_files = []
+ exclude_patterns = [re.compile(x) for x in args.exclude]
+ def add_recursively(filename):
+ full_name = os.path.join(V8_DIR, filename)
+ if not os.path.exists(full_name):
+ sys.exit('File does not exist: {}'.format(full_name))
+ if os.path.isdir(full_name):
+ for subfile in os.listdir(full_name):
+ full_name = os.path.join(filename, subfile)
+ printv('Scanning {}'.format(full_name))
+ add_recursively(full_name)
+ elif filename.endswith('.h'):
+ printv('--> Found header file {}'.format(filename))
+ for p in exclude_patterns:
+ if p.search(filename):
+ printv('--> EXCLUDED (matches {})'.format(p.pattern))
+ return
+ header_files.append(filename)
+
+ for filename in args.input:
+ add_recursively(filename)
+
+ return header_files
+
+
+def get_cc_file_name(header):
+ split = os.path.split(header)
+ header_dir = os.path.relpath(split[0], V8_DIR)
+ # Prefix with the directory name, to avoid collisions in the object files.
+ prefix = header_dir.replace(os.path.sep, '-')
+ cc_file_name = 'test-include-' + prefix + '-' + split[1][:-1] + 'cc'
+ return os.path.join(OUT_DIR, cc_file_name)
+
+
+def create_including_cc_files(header_files):
+ comment = 'check including this header in isolation'
+ for header in header_files:
+ cc_file_name = get_cc_file_name(header)
+ rel_cc_file_name = os.path.relpath(cc_file_name, V8_DIR)
+ content = '#include "{}" // {}\n'.format(header, comment)
+ if os.path.exists(cc_file_name):
+ with open(cc_file_name) as cc_file:
+ if cc_file.read() == content:
+ printv('File {} is up to date'.format(rel_cc_file_name))
+ continue
+ printv('Creating file {}'.format(rel_cc_file_name))
+ with open(cc_file_name, 'w') as cc_file:
+ cc_file.write(content)
+
+
+def generate_gni(header_files):
+ gni_file = os.path.join(OUT_DIR, 'sources.gni')
+ printv('Generating file "{}"'.format(os.path.relpath(gni_file, V8_DIR)))
+ with open(gni_file, 'w') as gn:
+ gn.write("""\
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This list is filled automatically by tools/check_header_includes.py.
+check_header_includes_sources = [
+""");
+ for header in header_files:
+ cc_file_name = get_cc_file_name(header)
+ gn.write(' "{}",\n'.format(os.path.relpath(cc_file_name, V8_DIR)))
+ gn.write(']\n')
+
+
+def main():
+ parse_args()
+ header_files = find_all_headers()
+ if not os.path.exists(OUT_DIR):
+ os.mkdir(OUT_DIR)
+ create_including_cc_files(header_files)
+ generate_gni(header_files)
+
+if __name__ == '__main__':
+ main()
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index 75e80fef90..60215857c0 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -761,7 +761,7 @@ class MinidumpReader(object):
def IsValidExceptionStackAddress(self, address):
if not self.IsValidAddress(address): return False
- return self.isExceptionStackAddress(address)
+ return self.IsExceptionStackAddress(address)
def IsModuleAddress(self, address):
return self.GetModuleForAddress(address) != None
@@ -1231,9 +1231,6 @@ class Map(HeapObject):
def DependentCodeOffset(self):
return self.CodeCacheOffset() + self.heap.PointerSize()
- def WeakCellCacheOffset(self):
- return self.DependentCodeOffset() + self.heap.PointerSize()
-
def ReadByte(self, offset):
return self.heap.reader.ReadU8(self.address + offset)
diff --git a/deps/v8/tools/heap-stats/categories.js b/deps/v8/tools/heap-stats/categories.js
index fd3e5f1497..4ede5e9a45 100644
--- a/deps/v8/tools/heap-stats/categories.js
+++ b/deps/v8/tools/heap-stats/categories.js
@@ -76,6 +76,8 @@ const CATEGORIES = new Map([
'SHORT_EXTERNAL_STRING_TYPE',
'SLICED_ONE_BYTE_STRING_TYPE',
'SLICED_STRING_TYPE',
+ 'STRING_EXTERNAL_RESOURCE_ONE_BYTE_TYPE',
+ 'STRING_EXTERNAL_RESOURCE_TWO_BYTE_TYPE',
'STRING_TYPE',
'SYMBOL_TYPE',
'THIN_ONE_BYTE_STRING_TYPE',
@@ -134,20 +136,16 @@ const CATEGORIES = new Map([
'STRING_SPLIT_CACHE_TYPE',
'STRING_TABLE_TYPE',
'TRANSITION_ARRAY_TYPE',
- 'WEAK_CELL_TYPE',
'WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE',
])
],
[
'code', new Set([
'BUILTIN',
- 'BYTECODE_HANDLER',
- 'OPTIMIZED_FUNCTION',
- 'REGEXP',
- 'STUB',
'BYTECODE_ARRAY_CONSTANT_POOL_TYPE',
'BYTECODE_ARRAY_HANDLER_TABLE_TYPE',
'BYTECODE_ARRAY_TYPE',
+ 'BYTECODE_HANDLER',
'CODE_DATA_CONTAINER_TYPE',
'DEOPTIMIZATION_DATA_TYPE',
'EMBEDDED_OBJECT_TYPE',
@@ -167,15 +165,24 @@ const CATEGORIES = new Map([
'LOAD_HANDLER_TYPE',
'NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE',
'OPTIMIZED_CODE_LITERALS_TYPE',
+ 'OPTIMIZED_FUNCTION',
+ 'PRE_PARSED_SCOPE_DATA_TYPE',
+ 'REGEXP',
'RELOC_INFO_TYPE',
+ 'SCRIPT_SOURCE_EXTERNAL_ONE_BYTE_TYPE',
+ 'SCRIPT_SOURCE_EXTERNAL_TWO_BYTE_TYPE',
'SCRIPT_SOURCE_EXTERNAL_TYPE',
- 'SCRIPT_SOURCE_NON_EXTERNAL_TYPE',
+ 'SCRIPT_SOURCE_NON_EXTERNAL_ONE_BYTE_TYPE',
+ 'SCRIPT_SOURCE_NON_EXTERNAL_TWO_BYTE_TYPE',
'SCRIPT_TYPE',
'SHARED_FUNCTION_INFO_TYPE',
'SOURCE_POSITION_TABLE_TYPE',
'STORE_HANDLER_TYPE',
+ 'STUB',
+ 'UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE',
+ 'UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE',
'UNCOMPILED_JS_FUNCTION_TYPE',
- 'UNCOMPILED_SHARED_FUNCTION_INFO_TYPE',
+ 'UNCOMPILED_SHARED_FUNCTION_INFO_TYPE'
])
],
['unclassified', new Set()],
diff --git a/deps/v8/tools/heap-stats/trace-file-reader.js b/deps/v8/tools/heap-stats/trace-file-reader.js
index 03684978de..5c244a5e92 100644
--- a/deps/v8/tools/heap-stats/trace-file-reader.js
+++ b/deps/v8/tools/heap-stats/trace-file-reader.js
@@ -80,7 +80,16 @@ class TraceFileReader extends HTMLElement {
// Delay the loading a bit to allow for CSS animations to happen.
setTimeout(() => reader.readAsArrayBuffer(file), 10);
} else {
- reader.onload = (e) => this.processRawText(file, e.target.result);
+ reader.onload = (e) => {
+ try {
+ this.processRawText(file, e.target.result);
+ this.section.className = 'success';
+ this.$('#fileReader').classList.add('done');
+ } catch (err) {
+ console.error(err);
+ this.section.className = 'failure';
+ }
+ };
setTimeout(() => reader.readAsText(file), 10);
}
}
@@ -240,7 +249,7 @@ class TraceFileReader extends HTMLElement {
line = line.replace(/^I\/v8\s*\(\d+\):\s+/g, '');
return JSON.parse(line);
} catch (e) {
- console.log('Unable to parse line: \'' + line + '\'\' (' + e + ')');
+ console.log('Unable to parse line: \'' + line + '\' (' + e + ')');
}
return null;
});
diff --git a/deps/v8/tools/ic-processor.js b/deps/v8/tools/ic-processor.js
index 14f875bfdc..db1eef4295 100644
--- a/deps/v8/tools/ic-processor.js
+++ b/deps/v8/tools/ic-processor.js
@@ -61,7 +61,6 @@ function IcProcessor() {
parsers : propertyICParser,
processor: this.processPropertyIC.bind(this, "StoreInArrayLiteralIC") },
});
- this.deserializedEntriesNames_ = [];
this.profile_ = new Profile();
this.LoadIC = 0;
@@ -118,10 +117,6 @@ IcProcessor.prototype.addEntry = function(entry) {
IcProcessor.prototype.processCodeCreation = function(
type, kind, timestamp, start, size, name, maybe_func) {
- name = this.deserializedEntriesNames_[start] || name;
- if (name.startsWith("onComplete")) {
- console.log(name);
- }
if (maybe_func.length) {
var funcAddr = parseInt(maybe_func[0]);
var state = parseState(maybe_func[1]);
diff --git a/deps/v8/tools/map-processor.js b/deps/v8/tools/map-processor.js
index d7385cb6e9..c0731e8555 100644
--- a/deps/v8/tools/map-processor.js
+++ b/deps/v8/tools/map-processor.js
@@ -39,7 +39,6 @@ class MapProcessor extends LogReader {
processor: this.processMapDetails
}
};
- this.deserializedEntriesNames_ = [];
this.profile_ = new Profile();
this.timeline_ = new Timeline();
}
@@ -66,7 +65,7 @@ class MapProcessor extends LogReader {
this.processLogLine(line);
}
} catch(e) {
- console.log("Error occurred during parsing, trying to continue: " + e);
+ console.error("Error occurred during parsing, trying to continue: " + e);
}
return this.finalize();
}
@@ -108,10 +107,6 @@ class MapProcessor extends LogReader {
processCodeCreation(
type, kind, timestamp, start, size, name, maybe_func) {
- name = this.deserializedEntriesNames_[start] || name;
- if (name.startsWith("onComplete")) {
- console.log(name);
- }
if (maybe_func.length) {
let funcAddr = parseInt(maybe_func[0]);
let state = this.parseState(maybe_func[1]);
@@ -180,9 +175,6 @@ class MapProcessor extends LogReader {
}
createMap(id, time) {
- if (id == 0x1821257d1761) {
- console.log(id);
- }
let map = new V8Map(id, time);
this.timeline_.push(map);
return map;
diff --git a/deps/v8/tools/node/node_common.py b/deps/v8/tools/node/node_common.py
index de2e98d909..172e9df812 100755
--- a/deps/v8/tools/node/node_common.py
+++ b/deps/v8/tools/node/node_common.py
@@ -8,6 +8,7 @@ import pipes
import shutil
import stat
import subprocess
+import sys
DEPOT_TOOLS_URL = \
"https://chromium.googlesource.com/chromium/tools/depot_tools.git"
@@ -27,6 +28,10 @@ def EnsureDepotTools(v8_path, fetch_if_not_exist):
subprocess.check_call("git clone {} {}".format(
pipes.quote(DEPOT_TOOLS_URL),
pipes.quote(depot_tools)), shell=True)
+ # Using check_output to hide warning messages.
+ subprocess.check_output(
+ [sys.executable, gclient_path, "metrics", "--opt-out"],
+ cwd=depot_tools)
return depot_tools
return None
depot_tools = _Get(v8_path)
diff --git a/deps/v8/tools/predictable_wrapper.py b/deps/v8/tools/predictable_wrapper.py
index c357c13b41..2299a5195e 100644
--- a/deps/v8/tools/predictable_wrapper.py
+++ b/deps/v8/tools/predictable_wrapper.py
@@ -17,10 +17,14 @@ compared. Differences are reported as errors.
import sys
from testrunner.local import command
+from testrunner.local import utils
MAX_TRIES = 3
TIMEOUT = 120
+# Predictable mode works only when run on the host os.
+command.setup(utils.GuessOS())
+
def main(args):
def allocation_str(stdout):
for line in reversed((stdout or '').splitlines()):
diff --git a/deps/v8/tools/profile.js b/deps/v8/tools/profile.js
index cddadaaf53..74b4b3bf66 100644
--- a/deps/v8/tools/profile.js
+++ b/deps/v8/tools/profile.js
@@ -1002,33 +1002,10 @@ JsonProfile.prototype.addSourcePositions = function(
};
};
-function unescapeString(s) {
- s = s.split("\\");
- for (var i = 1; i < s.length; i++) {
- if (s[i] === "") {
- // Double backslash.
- s[i] = "\\";
- } else if (i > 0 && s[i].startsWith("x")) {
- // Escaped Ascii character.
- s[i] = String.fromCharCode(parseInt(s[i].substring(1, 3), 16)) +
- s[i].substring(3);
- } else if (i > 0 && s[i].startsWith("u")) {
- // Escaped unicode character.
- s[i] = String.fromCharCode(parseInt(s[i].substring(1, 5), 16)) +
- s[i].substring(5);
- } else {
- if (i > 0 && s[i - 1] !== "\\") {
- printErr("Malformed source string");
- }
- }
- }
- return s.join("");
-}
-
JsonProfile.prototype.addScriptSource = function(script, url, source) {
this.scripts_[script] = {
- name : unescapeString(url),
- source : unescapeString(source)
+ name : url,
+ source : source
};
};
diff --git a/deps/v8/tools/profview/index.html b/deps/v8/tools/profview/index.html
index 32f7c7b70f..957b6d04d6 100644
--- a/deps/v8/tools/profview/index.html
+++ b/deps/v8/tools/profview/index.html
@@ -108,7 +108,7 @@ found in the LICENSE file. -->
<br>
<br>
<br>
-Copyright the V8 Authors - Last change to this page: 2017/02/15
+Copyright the V8 Authors - Last change to this page: 2018/08/13
</p>
</body>
diff --git a/deps/v8/tools/profview/profview.css b/deps/v8/tools/profview/profview.css
index 106bfe2885..eb77ef14d8 100644
--- a/deps/v8/tools/profview/profview.css
+++ b/deps/v8/tools/profview/profview.css
@@ -2,6 +2,11 @@ table.calltree {
width : 100%;
}
+td {
+ padding-top: 0.1em;
+ padding-bottom: 0.1em;
+}
+
.numeric {
width : 12ex;
}
@@ -14,27 +19,25 @@ body {
font-family: 'Roboto', sans-serif;
}
-div.code-type-chip {
- display : inline-block;
- padding : 0.0em;
+.tree-row-arrow {
+ margin-right: 0.2em;
+ text-align: right;
}
-span.code-type-chip {
+.code-type-chip {
border-radius : 1em;
- display : inline-block;
- padding : 0.1em;
+ padding : 0.2em;
background-color : #4040c0;
color: #ffffff;
font-size : small;
box-shadow: 0 2px 5px 0 rgba(0, 0, 0, 0.16), 0 2px 10px 0 rgba(0, 0, 0, 0.12);
}
-span.code-type-chip-space {
- width : 0.5ex;
- display : inline-block;
+.tree-row-name {
+ margin-left: 0.2em;
}
-span.codeid-link {
+.codeid-link {
text-decoration: underline;
cursor: pointer;
}
diff --git a/deps/v8/tools/profview/profview.js b/deps/v8/tools/profview/profview.js
index d480cd4a77..e976b00be3 100644
--- a/deps/v8/tools/profview/profview.js
+++ b/deps/v8/tools/profview/profview.js
@@ -8,28 +8,27 @@ function $(id) {
return document.getElementById(id);
}
-let components = [];
-
+let components;
function createViews() {
- components.push(new CallTreeView());
- components.push(new TimelineView());
- components.push(new HelpView());
- components.push(new SummaryView());
- components.push(new ModeBarView());
-
- main.setMode("summary");
+ components = [
+ new CallTreeView(),
+ new TimelineView(),
+ new HelpView(),
+ new SummaryView(),
+ new ModeBarView(),
+ ];
}
function emptyState() {
return {
file : null,
- mode : "none",
+ mode : null,
currentCodeId : null,
start : 0,
end : Infinity,
- timeLine : {
- width : 100,
- height : 100
+ timelineSize : {
+ width : 0,
+ height : 0
},
callTree : {
attribution : "js-exclude-bc",
@@ -120,22 +119,12 @@ let main = {
}
},
- setTimeLineDimensions(width, height) {
- if (width !== main.currentState.timeLine.width ||
- height !== main.currentState.timeLine.height) {
- let timeLine = Object.assign({}, main.currentState.timeLine);
- timeLine.width = width;
- timeLine.height = height;
- main.currentState = Object.assign({}, main.currentState);
- main.currentState.timeLine = timeLine;
- main.delayRender();
- }
- },
-
setFile(file) {
if (file !== main.currentState.file) {
- main.currentState = Object.assign({}, main.currentState);
+ let lastMode = main.currentState.mode || "summary";
+ main.currentState = emptyState();
main.currentState.file = file;
+ main.setMode(lastMode);
main.delayRender();
}
},
@@ -149,9 +138,7 @@ let main = {
},
onResize() {
- main.setTimeLineDimensions(
- Math.round(window.innerWidth - 20),
- Math.round(window.innerHeight / 5));
+ main.delayRender();
},
onLoad() {
@@ -160,9 +147,7 @@ let main = {
if (f) {
let reader = new FileReader();
reader.onload = function(event) {
- let profData = JSON.parse(event.target.result);
- main.setViewInterval(0, Infinity);
- main.setFile(profData);
+ main.setFile(JSON.parse(event.target.result));
};
reader.onerror = function(event) {
console.error(
@@ -176,7 +161,6 @@ let main = {
$("fileinput").addEventListener(
"change", loadHandler, false);
createViews();
- main.onResize();
},
delayRender() {
@@ -188,50 +172,51 @@ let main = {
}
};
-let bucketDescriptors =
+const CATEGORY_COLOR = "#f5f5f5";
+const bucketDescriptors =
[ { kinds : [ "JSOPT" ],
- color : "#00ff00",
- backgroundColor : "#c0ffc0",
+ color : "#64dd17",
+ backgroundColor : "#80e27e",
text : "JS Optimized" },
{ kinds : [ "JSUNOPT", "BC" ],
- color : "#ffb000",
- backgroundColor : "#ffe0c0",
+ color : "#dd2c00",
+ backgroundColor : "#ff9e80",
text : "JS Unoptimized" },
{ kinds : [ "IC" ],
- color : "#ffff00",
- backgroundColor : "#ffffc0",
+ color : "#ff6d00",
+ backgroundColor : "#ffab40",
text : "IC" },
{ kinds : [ "STUB", "BUILTIN", "REGEXP" ],
- color : "#ffb0b0",
- backgroundColor : "#fff0f0",
+ color : "#ffd600",
+ backgroundColor : "#ffea00",
text : "Other generated" },
{ kinds : [ "CPP", "LIB" ],
- color : "#0000ff",
- backgroundColor : "#c0c0ff",
+ color : "#304ffe",
+ backgroundColor : "#6ab7ff",
text : "C++" },
{ kinds : [ "CPPEXT" ],
- color : "#8080ff",
- backgroundColor : "#e0e0ff",
+ color : "#003c8f",
+ backgroundColor : "#c0cfff",
text : "C++/external" },
{ kinds : [ "CPPPARSE" ],
- color : "#b890f7",
- backgroundColor : "#ebdeff",
+ color : "#aa00ff",
+ backgroundColor : "#ffb2ff",
text : "C++/Parser" },
{ kinds : [ "CPPCOMPBC" ],
- color : "#52b0ce",
- backgroundColor : "#a5c8d4",
+ color : "#43a047",
+ backgroundColor : "#88c399",
text : "C++/Bytecode compiler" },
{ kinds : [ "CPPCOMP" ],
- color : "#00ffff",
- backgroundColor : "#c0ffff",
+ color : "#00e5ff",
+ backgroundColor : "#6effff",
text : "C++/Compiler" },
{ kinds : [ "CPPGC" ],
- color : "#ff00ff",
- backgroundColor : "#ffc0ff",
+ color : "#6200ea",
+ backgroundColor : "#e1bee7",
text : "C++/GC" },
{ kinds : [ "UNKNOWN" ],
- color : "#f0f0f0",
- backgroundColor : "#e0e0e0",
+ color : "#bdbdbd",
+ backgroundColor : "#efefef",
text : "Unknown" }
];
@@ -260,13 +245,13 @@ function codeTypeToText(type) {
case "UNKNOWN":
return "Unknown";
case "CPPPARSE":
- return "C++ (parser)";
+ return "C++ Parser";
case "CPPCOMPBC":
- return "C++ (bytecode compiler)";
+ return "C++ Bytecode Compiler)";
case "CPPCOMP":
- return "C++ (compiler)";
+ return "C++ Compiler";
case "CPPGC":
- return "C++";
+ return "C++ GC";
case "CPPEXT":
return "C++ External";
case "CPP":
@@ -291,27 +276,15 @@ function codeTypeToText(type) {
console.error("Unknown type: " + type);
}
-function createTypeDiv(type) {
+function createTypeNode(type) {
if (type === "CAT") {
return document.createTextNode("");
}
- let div = document.createElement("div");
- div.classList.add("code-type-chip");
-
let span = document.createElement("span");
span.classList.add("code-type-chip");
span.textContent = codeTypeToText(type);
- div.appendChild(span);
-
- span = document.createElement("span");
- span.classList.add("code-type-chip-space");
- div.appendChild(span);
-
- return div;
-}
-function isBytecodeHandler(kind) {
- return kind === "BytecodeHandler";
+ return span;
}
function filterFromFilterId(id) {
@@ -322,31 +295,42 @@ function filterFromFilterId(id) {
return (type, kind) => type !== 'CODE';
case "js-exclude-bc":
return (type, kind) =>
- type !== 'CODE' || !isBytecodeHandler(kind);
+ type !== 'CODE' || kind !== "BytecodeHandler";
}
}
-function createTableExpander(indent) {
+function createIndentNode(indent) {
let div = document.createElement("div");
- div.style.width = (indent + 0.5) + "em";
div.style.display = "inline-block";
- div.style.textAlign = "right";
+ div.style.width = (indent + 0.5) + "em";
return div;
}
+function createArrowNode() {
+ let span = document.createElement("span");
+ span.classList.add("tree-row-arrow");
+ return span;
+}
+
function createFunctionNode(name, codeId) {
- if (codeId === -1) {
- return document.createTextNode(name);
- }
let nameElement = document.createElement("span");
- nameElement.classList.add("codeid-link");
- nameElement.onclick = function() {
- main.setCurrentCode(codeId);
- };
nameElement.appendChild(document.createTextNode(name));
+ nameElement.classList.add("tree-row-name");
+ if (codeId !== -1) {
+ nameElement.classList.add("codeid-link");
+ nameElement.onclick = (event) => {
+ main.setCurrentCode(codeId);
+ // Prevent the click from bubbling to the row and causing it to
+ // collapse/expand.
+ event.stopPropagation();
+ };
+ }
return nameElement;
}
+const COLLAPSED_ARROW = "\u25B6";
+const EXPANDED_ARROW = "\u25BC";
+
class CallTreeView {
constructor() {
this.element = $("calltree");
@@ -400,22 +384,19 @@ class CallTreeView {
}
expandTree(tree, indent) {
- let that = this;
let index = 0;
let id = "R/";
let row = tree.row;
- let expander = tree.expander;
if (row) {
index = row.rowIndex;
id = row.id;
- // Make sure we collapse the children when the row is clicked
- // again.
- expander.textContent = "\u25BE";
- let expandHandler = expander.onclick;
- expander.onclick = () => {
- that.collapseRow(tree, expander, expandHandler);
+ tree.arrow.textContent = EXPANDED_ARROW;
+ // Collapse the children when the row is clicked again.
+ let expandHandler = row.onclick;
+ row.onclick = () => {
+ this.collapseRow(tree, expandHandler);
}
}
@@ -439,7 +420,9 @@ class CallTreeView {
let row = this.rows.insertRow(index);
row.id = id + i + "/";
- if (node.type !== "CAT") {
+ if (node.type === "CAT") {
+ row.style.backgroundColor = CATEGORY_COLOR;
+ } else {
row.style.backgroundColor = bucketFromKind(node.type).backgroundColor;
}
@@ -460,9 +443,10 @@ class CallTreeView {
// Create the name cell.
let nameCell = row.insertCell();
- let expander = createTableExpander(indent + 1);
- nameCell.appendChild(expander);
- nameCell.appendChild(createTypeDiv(node.type));
+ nameCell.appendChild(createIndentNode(indent + 1));
+ let arrow = createArrowNode();
+ nameCell.appendChild(arrow);
+ nameCell.appendChild(createTypeNode(node.type));
nameCell.appendChild(createFunctionNode(node.name, node.codeId));
// Inclusive ticks cell.
@@ -476,18 +460,18 @@ class CallTreeView {
c.style.textAlign = "right";
}
if (node.children.length > 0) {
- expander.textContent = "\u25B8";
- expander.onclick = () => { that.expandTree(node, indent + 1); };
+ arrow.textContent = COLLAPSED_ARROW;
+ row.onclick = () => { this.expandTree(node, indent + 1); };
}
node.row = row;
- node.expander = expander;
+ node.arrow = arrow;
index++;
}
}
- collapseRow(tree, expander, expandHandler) {
+ collapseRow(tree, expandHandler) {
let row = tree.row;
let id = row.id;
let index = row.rowIndex;
@@ -496,8 +480,8 @@ class CallTreeView {
this.rows.deleteRow(index);
}
- expander.textContent = "\u25B8";
- expander.onclick = expandHandler;
+ tree.arrow.textContent = COLLAPSED_ARROW;
+ row.onclick = expandHandler;
}
fillSelects(mode, calltree) {
@@ -809,10 +793,12 @@ class TimelineView {
return;
}
- this.currentState = newState;
+ let width = Math.round(window.innerWidth - 20);
+ let height = Math.round(window.innerHeight / 5);
+
if (oldState) {
- if (newState.timeLine.width === oldState.timeLine.width &&
- newState.timeLine.height === oldState.timeLine.height &&
+ if (width === oldState.timelineSize.width &&
+ height === oldState.timelineSize.height &&
newState.file === oldState.file &&
newState.currentCodeId === oldState.currentCodeId &&
newState.start === oldState.start &&
@@ -821,21 +807,27 @@ class TimelineView {
return;
}
}
+ this.currentState = newState;
+ this.currentState.timelineSize.width = width;
+ this.currentState.timelineSize.height = height;
this.element.style.display = "inherit";
+ let file = this.currentState.file;
+
+ const minPixelsPerBucket = 10;
+ const minTicksPerBucket = 8;
+ let maxBuckets = Math.round(file.ticks.length / minTicksPerBucket);
+ let bucketCount = Math.min(
+ Math.round(width / minPixelsPerBucket), maxBuckets);
+
// Make sure the canvas has the right dimensions.
- let width = this.currentState.timeLine.width;
- let height = this.currentState.timeLine.height;
this.canvas.width = width;
this.canvas.height = height;
// Make space for the selection text.
height -= this.imageOffset;
- let file = this.currentState.file;
- if (!file) return;
-
let currentCodeId = this.currentState.currentCodeId;
let firstTime = file.ticks[0].tm;
@@ -846,13 +838,6 @@ class TimelineView {
this.selectionStart = (start - firstTime) / (lastTime - firstTime) * width;
this.selectionEnd = (end - firstTime) / (lastTime - firstTime) * width;
- let tickCount = file.ticks.length;
-
- let minBucketPixels = 10;
- let minBucketSamples = 30;
- let bucketCount = Math.min(width / minBucketPixels,
- tickCount / minBucketSamples);
-
let stackProcessor = new CategorySampler(file, bucketCount);
generateTree(file, 0, Infinity, stackProcessor);
let codeIdProcessor = new FunctionTimelineProcessor(
@@ -873,28 +858,36 @@ class TimelineView {
let sum = 0;
let bucketData = [];
let total = buckets[i].total;
- for (let j = 0; j < bucketDescriptors.length; j++) {
- let desc = bucketDescriptors[j];
- for (let k = 0; k < desc.kinds.length; k++) {
- sum += buckets[i][desc.kinds[k]];
+ if (total > 0) {
+ for (let j = 0; j < bucketDescriptors.length; j++) {
+ let desc = bucketDescriptors[j];
+ for (let k = 0; k < desc.kinds.length; k++) {
+ sum += buckets[i][desc.kinds[k]];
+ }
+ bucketData.push(Math.round(graphHeight * sum / total));
+ }
+ } else {
+ // No ticks fell into this bucket. Fill with "Unknown."
+ for (let j = 0; j < bucketDescriptors.length; j++) {
+ let desc = bucketDescriptors[j];
+ bucketData.push(desc.text === "Unknown" ? graphHeight : 0);
}
- bucketData.push(Math.round(graphHeight * sum / total));
}
bucketsGraph.push(bucketData);
}
// Draw the category graph into the buffer.
- let bucketWidth = width / bucketsGraph.length;
+ let bucketWidth = width / (bucketsGraph.length - 1);
let ctx = buffer.getContext('2d');
for (let i = 0; i < bucketsGraph.length - 1; i++) {
let bucketData = bucketsGraph[i];
let nextBucketData = bucketsGraph[i + 1];
+ let x1 = Math.round(i * bucketWidth);
+ let x2 = Math.round((i + 1) * bucketWidth);
for (let j = 0; j < bucketData.length; j++) {
- let x1 = Math.round(i * bucketWidth);
- let x2 = Math.round((i + 1) * bucketWidth);
ctx.beginPath();
- ctx.moveTo(x1, j && bucketData[j - 1]);
- ctx.lineTo(x2, j && nextBucketData[j - 1]);
+ ctx.moveTo(x1, j > 0 ? bucketData[j - 1] : 0);
+ ctx.lineTo(x2, j > 0 ? nextBucketData[j - 1] : 0);
ctx.lineTo(x2, nextBucketData[j]);
ctx.lineTo(x1, bucketData[j]);
ctx.closePath();
@@ -1114,22 +1107,22 @@ class SummaryView {
return row;
}
- function makeCollapsible(row, expander) {
- expander.textContent = "\u25BE";
- let expandHandler = expander.onclick;
- expander.onclick = () => {
+ function makeCollapsible(row, arrow) {
+ arrow.textContent = EXPANDED_ARROW;
+ let expandHandler = row.onclick;
+ row.onclick = () => {
let id = row.id;
let index = row.rowIndex + 1;
while (index < rows.rows.length &&
rows.rows[index].id.startsWith(id)) {
rows.deleteRow(index);
}
- expander.textContent = "\u25B8";
- expander.onclick = expandHandler;
+ arrow.textContent = COLLAPSED_ARROW;
+ row.onclick = expandHandler;
}
}
- function expandDeoptInstances(row, expander, instances, indent, kind) {
+ function expandDeoptInstances(row, arrow, instances, indent, kind) {
let index = row.rowIndex;
for (let i = 0; i < instances.length; i++) {
let childRow = rows.insertRow(index + 1);
@@ -1145,18 +1138,19 @@ class SummaryView {
document.createTextNode("Reason: " + deopt.reason));
reasonCell.style.textIndent = indent + "em";
}
- makeCollapsible(row, expander);
+ makeCollapsible(row, arrow);
}
- function expandDeoptFunctionList(row, expander, list, indent, kind) {
+ function expandDeoptFunctionList(row, arrow, list, indent, kind) {
let index = row.rowIndex;
for (let i = 0; i < list.length; i++) {
let childRow = rows.insertRow(index + 1);
childRow.id = row.id + i + "/";
let textCell = childRow.insertCell(-1);
- let expander = createTableExpander(indent);
- textCell.appendChild(expander);
+ textCell.appendChild(createIndentNode(indent));
+ let childArrow = createArrowNode();
+ textCell.appendChild(childArrow);
textCell.appendChild(
createFunctionNode(list[i].f.name, list[i].f.codes[0]));
@@ -1164,16 +1158,16 @@ class SummaryView {
numberCell.textContent = list[i].instances.length;
numberCell.style.textIndent = indent + "em";
- expander.textContent = "\u25B8";
- expander.onclick = () => {
+ childArrow.textContent = COLLAPSED_ARROW;
+ childRow.onclick = () => {
expandDeoptInstances(
- childRow, expander, list[i].instances, indent + 1);
+ childRow, childArrow, list[i].instances, indent + 1);
};
}
- makeCollapsible(row, expander);
+ makeCollapsible(row, arrow);
}
- function expandOptimizedFunctionList(row, expander, list, indent, kind) {
+ function expandOptimizedFunctionList(row, arrow, list, indent, kind) {
let index = row.rowIndex;
for (let i = 0; i < list.length; i++) {
let childRow = rows.insertRow(index + 1);
@@ -1188,17 +1182,19 @@ class SummaryView {
numberCell.textContent = list[i].instances.length;
numberCell.style.textIndent = indent + "em";
}
- makeCollapsible(row, expander);
+ makeCollapsible(row, arrow);
}
function addExpandableRow(text, list, indent, kind) {
let row = rows.insertRow(-1);
row.id = "opt-table/" + kind + "/";
+ row.style.backgroundColor = CATEGORY_COLOR;
let textCell = row.insertCell(-1);
- let expander = createTableExpander(indent);
- textCell.appendChild(expander);
+ textCell.appendChild(createIndentNode(indent));
+ let arrow = createArrowNode();
+ textCell.appendChild(arrow);
textCell.appendChild(document.createTextNode(text));
let numberCell = row.insertCell(-1);
@@ -1208,16 +1204,16 @@ class SummaryView {
}
if (list.count > 0) {
- expander.textContent = "\u25B8";
+ arrow.textContent = COLLAPSED_ARROW;
if (kind === "opt") {
- expander.onclick = () => {
+ row.onclick = () => {
expandOptimizedFunctionList(
- row, expander, list.functions, indent + 1, kind);
+ row, arrow, list.functions, indent + 1, kind);
};
} else {
- expander.onclick = () => {
+ row.onclick = () => {
expandDeoptFunctionList(
- row, expander, list.functions, indent + 1, kind);
+ row, arrow, list.functions, indent + 1, kind);
};
}
}
diff --git a/deps/v8/tools/release/create_release.py b/deps/v8/tools/release/create_release.py
index caee70308c..e5c2114b1a 100755
--- a/deps/v8/tools/release/create_release.py
+++ b/deps/v8/tools/release/create_release.py
@@ -221,7 +221,8 @@ class LandBranch(Step):
else:
self.GitUpload(author=self._options.author,
force=True,
- bypass_hooks=True)
+ bypass_hooks=True,
+ private=True)
cmd = "cl land --bypass-hooks -f"
if self._options.dry_run:
print "Dry run. Command:\ngit %s" % cmd
diff --git a/deps/v8/tools/release/filter_build_files.py b/deps/v8/tools/release/filter_build_files.py
index 9cc6607108..7d3f22138a 100755
--- a/deps/v8/tools/release/filter_build_files.py
+++ b/deps/v8/tools/release/filter_build_files.py
@@ -35,7 +35,7 @@ SUPPLEMENTARY_FILES = [
LIBRARY_FILES = {
'android': ['*.a', '*.so'],
'linux': ['*.a', '*.so'],
- 'mac': ['*.a', '*.so', '*.dylib'],
+ 'mac': ['*.a', '*.so'],
'win': ['*.lib', '*.dll'],
}
diff --git a/deps/v8/tools/release/git_recipes.py b/deps/v8/tools/release/git_recipes.py
index f3ac16058c..9deaee891b 100644
--- a/deps/v8/tools/release/git_recipes.py
+++ b/deps/v8/tools/release/git_recipes.py
@@ -206,8 +206,8 @@ class GitRecipesMixin(object):
self.Git(MakeArgs(args), **kwargs)
def GitUpload(self, reviewer="", author="", force=False, cq=False,
- cq_dry_run=False, bypass_hooks=False, cc="", tbr_reviewer="",
- **kwargs):
+ cq_dry_run=False, bypass_hooks=False, cc="", private=False,
+ tbr_reviewer="", **kwargs):
args = ["cl upload --send-mail"]
if author:
args += ["--email", Quoted(author)]
@@ -226,6 +226,8 @@ class GitRecipesMixin(object):
if cc:
args += ["--cc", Quoted(cc)]
args += ["--gerrit"]
+ if private:
+ args += ["--private"]
# TODO(machenbach): Check output in forced mode. Verify that all required
# base files were uploaded, if not retry.
self.Git(MakeArgs(args), pipe=False, **kwargs)
diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index 86326bfda2..565b2b7c8f 100755
--- a/deps/v8/tools/release/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -934,7 +934,7 @@ TBR=reviewer@chromium.org"""
Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
cb=CheckVersionCommit),
Cmd("git cl upload --send-mail --email \"author@chromium.org\" "
- "-f --bypass-hooks --gerrit", ""),
+ "-f --bypass-hooks --gerrit --private", ""),
Cmd("git cl land --bypass-hooks -f", ""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep="
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index e0a9fc3b59..310bd8a008 100755
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -56,7 +56,7 @@ A suite without "tests" is considered a performance test itself.
Full example (suite with one runner):
{
"path": ["."],
- "owner": ["username@chromium.org"],
+ "owners": ["username@chromium.org"],
"flags": ["--expose-gc"],
"test_flags": ["5"],
"archs": ["ia32", "x64"],
@@ -76,7 +76,7 @@ Full example (suite with one runner):
Full example (suite with several runners):
{
"path": ["."],
- "owner": ["username@chromium.org", "otherowner@google.com"],
+ "owners": ["username@chromium.org", "otherowner@google.com"],
"flags": ["--expose-gc"],
"archs": ["ia32", "x64"],
"run_count": 5,
@@ -109,6 +109,7 @@ import re
import subprocess
import sys
+from testrunner.local import android
from testrunner.local import command
from testrunner.local import utils
@@ -126,25 +127,6 @@ RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
-def LoadAndroidBuildTools(path): # pragma: no cover
- assert os.path.exists(path)
- sys.path.insert(0, path)
-
- import devil_chromium
- from devil.android import device_errors # pylint: disable=import-error
- from devil.android import device_utils # pylint: disable=import-error
- from devil.android.sdk import adb_wrapper # pylint: disable=import-error
- from devil.android.perf import cache_control # pylint: disable=import-error
- from devil.android.perf import perf_control # pylint: disable=import-error
- global adb_wrapper
- global cache_control
- global device_errors
- global device_utils
- global perf_control
-
- devil_chromium.Initialize()
-
-
def GeometricMean(values):
"""Returns the geometric mean of a list of values.
@@ -257,8 +239,7 @@ def RunResultsProcessor(results_processor, stdout, count):
stderr=subprocess.PIPE,
)
result, _ = p.communicate(input=stdout)
- print ">>> Processed stdout (#%d):" % count
- print result
+ logging.info(">>> Processed stdout (#%d):\n%s", count, result)
return result
@@ -504,7 +485,7 @@ class RunnableConfig(GraphConfig):
# TODO(machenbach): This requires +.exe if run on windows.
extra_flags = extra_flags or []
if self.binary != 'd8' and '--prof' in extra_flags:
- print "Profiler supported only on a benchmark run with d8"
+ logging.info("Profiler supported only on a benchmark run with d8")
if self.process_size:
cmd_prefix = ["/usr/bin/time", "--format=MaxMemory: %MKB"] + cmd_prefix
@@ -630,10 +611,19 @@ class Platform(object):
self.shell_dir = options.shell_dir
self.shell_dir_secondary = options.shell_dir_secondary
self.extra_flags = options.extra_flags.split()
+ self.options = options
+
+ @staticmethod
+ def ReadBuildConfig(options):
+ config_path = os.path.join(options.shell_dir, 'v8_build_config.json')
+ if not os.path.isfile(config_path):
+ return {}
+ with open(config_path) as f:
+ return json.load(f)
@staticmethod
def GetPlatform(options):
- if options.android_build_tools:
+ if Platform.ReadBuildConfig(options).get('is_android', False):
return AndroidPlatform(options)
else:
return DesktopPlatform(options)
@@ -664,6 +654,9 @@ class DesktopPlatform(Platform):
super(DesktopPlatform, self).__init__(options)
self.command_prefix = []
+ # Setup command class to OS specific version.
+ command.setup(utils.GuessOS())
+
if options.prioritize or options.affinitize != None:
self.command_prefix = ["schedtool"]
if options.prioritize:
@@ -695,26 +688,24 @@ class DesktopPlatform(Platform):
cmd = runnable.GetCommand(self.command_prefix, shell_dir, self.extra_flags)
try:
output = cmd.execute()
- except OSError as e: # pragma: no cover
- print title % "OSError"
- print e
+ except OSError: # pragma: no cover
+ logging.exception(title % "OSError")
return ""
- print title % "Stdout"
- print output.stdout
+ logging.info(title % "Stdout" + "\n%s", output.stdout)
if output.stderr: # pragma: no cover
# Print stderr for debugging.
- print title % "Stderr"
- print output.stderr
+ logging.info(title % "Stderr" + "\n%s", output.stderr)
if output.timed_out:
- print ">>> Test timed out after %ss." % runnable.timeout
+ logging.warning(">>> Test timed out after %ss.", runnable.timeout)
if '--prof' in self.extra_flags:
os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS())
if os_prefix:
tick_tools = os.path.join(TOOLS_BASE, "%s-tick-processor" % os_prefix)
subprocess.check_call(tick_tools + " --only-summary", shell=True)
else: # pragma: no cover
- print "Profiler option currently supported on Linux and Mac OS."
+ logging.warning(
+ "Profiler option currently supported on Linux and Mac OS.")
# time outputs to stderr
if runnable.process_size:
@@ -723,95 +714,17 @@ class DesktopPlatform(Platform):
class AndroidPlatform(Platform): # pragma: no cover
- DEVICE_DIR = "/data/local/tmp/v8/"
def __init__(self, options):
super(AndroidPlatform, self).__init__(options)
- LoadAndroidBuildTools(options.android_build_tools)
-
- if not options.device:
- # Detect attached device if not specified.
- devices = adb_wrapper.AdbWrapper.Devices()
- assert devices and len(devices) == 1, (
- "None or multiple devices detected. Please specify the device on "
- "the command-line with --device")
- options.device = str(devices[0])
- self.adb_wrapper = adb_wrapper.AdbWrapper(options.device)
- self.device = device_utils.DeviceUtils(self.adb_wrapper)
+ self.driver = android.android_driver(options.device)
def PreExecution(self):
- perf = perf_control.PerfControl(self.device)
- perf.SetHighPerfMode()
-
- # Remember what we have already pushed to the device.
- self.pushed = set()
+ self.driver.set_high_perf_mode()
def PostExecution(self):
- perf = perf_control.PerfControl(self.device)
- perf.SetDefaultPerfMode()
- self.device.RemovePath(
- AndroidPlatform.DEVICE_DIR, force=True, recursive=True)
-
- def _PushFile(self, host_dir, file_name, target_rel=".",
- skip_if_missing=False):
- file_on_host = os.path.join(host_dir, file_name)
- file_on_device_tmp = os.path.join(
- AndroidPlatform.DEVICE_DIR, "_tmp_", file_name)
- file_on_device = os.path.join(
- AndroidPlatform.DEVICE_DIR, target_rel, file_name)
- folder_on_device = os.path.dirname(file_on_device)
-
- # Only attempt to push files that exist.
- if not os.path.exists(file_on_host):
- if not skip_if_missing:
- logging.critical('Missing file on host: %s' % file_on_host)
- return
-
- # Only push files not yet pushed in one execution.
- if file_on_host in self.pushed:
- return
- else:
- self.pushed.add(file_on_host)
-
- # Work-around for "text file busy" errors. Push the files to a temporary
- # location and then copy them with a shell command.
- output = self.adb_wrapper.Push(file_on_host, file_on_device_tmp)
- # Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)".
- # Errors look like this: "failed to copy ... ".
- if output and not re.search('^[0-9]', output.splitlines()[-1]):
- logging.critical('PUSH FAILED: ' + output)
- self.adb_wrapper.Shell("mkdir -p %s" % folder_on_device)
- self.adb_wrapper.Shell("cp %s %s" % (file_on_device_tmp, file_on_device))
-
- def _PushExecutable(self, shell_dir, target_dir, binary):
- self._PushFile(shell_dir, binary, target_dir)
-
- # Push external startup data. Backwards compatible for revisions where
- # these files didn't exist.
- self._PushFile(
- shell_dir,
- "natives_blob.bin",
- target_dir,
- skip_if_missing=True,
- )
- self._PushFile(
- shell_dir,
- "snapshot_blob.bin",
- target_dir,
- skip_if_missing=True,
- )
- self._PushFile(
- shell_dir,
- "snapshot_blob_trusted.bin",
- target_dir,
- skip_if_missing=True,
- )
- self._PushFile(
- shell_dir,
- "icudtl.dat",
- target_dir,
- skip_if_missing=True,
- )
+ self.driver.set_default_perf_mode()
+ self.driver.tear_down()
def PreTests(self, node, path):
if isinstance(node, RunnableConfig):
@@ -824,25 +737,21 @@ class AndroidPlatform(Platform): # pragma: no cover
bench_rel = "."
bench_abs = suite_dir
- self._PushExecutable(self.shell_dir, "bin", node.binary)
+ self.driver.push_executable(self.shell_dir, "bin", node.binary)
if self.shell_dir_secondary:
- self._PushExecutable(
+ self.driver.push_executable(
self.shell_dir_secondary, "bin_secondary", node.binary)
if isinstance(node, RunnableConfig):
- self._PushFile(bench_abs, node.main, bench_rel)
+ self.driver.push_file(bench_abs, node.main, bench_rel)
for resource in node.resources:
- self._PushFile(bench_abs, resource, bench_rel)
+ self.driver.push_file(bench_abs, resource, bench_rel)
def _Run(self, runnable, count, secondary=False):
suffix = ' - secondary' if secondary else ''
target_dir = "bin_secondary" if secondary else "bin"
title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
- cache = cache_control.CacheControl(self.device)
- cache.DropRamCaches()
- binary_on_device = os.path.join(
- AndroidPlatform.DEVICE_DIR, target_dir, runnable.binary)
- cmd = [binary_on_device] + runnable.GetCommandFlags(self.extra_flags)
+ self.driver.drop_ram_caches()
# Relative path to benchmark directory.
if runnable.path:
@@ -850,19 +759,29 @@ class AndroidPlatform(Platform): # pragma: no cover
else:
bench_rel = "."
+ logcat_file = None
+ if self.options.dump_logcats_to:
+ runnable_name = '-'.join(runnable.graphs)
+ logcat_file = os.path.join(
+ self.options.dump_logcats_to, 'logcat-%s-#%d%s.log' % (
+ runnable_name, count + 1, '-secondary' if secondary else ''))
+ logging.debug('Dumping logcat into %s', logcat_file)
+
try:
- output = self.device.RunShellCommand(
- cmd,
- cwd=os.path.join(AndroidPlatform.DEVICE_DIR, bench_rel),
- check_return=True,
+ stdout = self.driver.run(
+ target_dir=target_dir,
+ binary=runnable.binary,
+ args=runnable.GetCommandFlags(self.extra_flags),
+ rel_path=bench_rel,
timeout=runnable.timeout,
- retries=0,
+ logcat_file=logcat_file,
)
- stdout = "\n".join(output)
- print title % "Stdout"
- print stdout
- except device_errors.CommandTimeoutError:
- print ">>> Test timed out after %ss." % runnable.timeout
+ logging.info(title % "Stdout" + "\n%s", stdout)
+ except android.CommandFailedException as e:
+ logging.info(title % "Stdout" + "\n%s", e.output)
+ raise
+ except android.TimeoutException:
+ logging.warning(">>> Test timed out after %ss.", runnable.timeout)
stdout = ""
if runnable.process_size:
return stdout + "MaxMemory: Unsupported"
@@ -895,19 +814,19 @@ class CustomMachineConfiguration:
try:
with open("/proc/sys/kernel/randomize_va_space", "r") as f:
return int(f.readline().strip())
- except Exception as e:
- print "Failed to get current ASLR settings."
- raise e
+ except Exception:
+ logging.exception("Failed to get current ASLR settings.")
+ raise
@staticmethod
def SetASLR(value):
try:
with open("/proc/sys/kernel/randomize_va_space", "w") as f:
f.write(str(value))
- except Exception as e:
- print "Failed to update ASLR to %s." % value
- print "Are we running under sudo?"
- raise e
+ except Exception:
+ logging.exception(
+ "Failed to update ASLR to %s. Are we running under sudo?", value)
+ raise
new_value = CustomMachineConfiguration.GetASLR()
if value != new_value:
@@ -922,9 +841,9 @@ class CustomMachineConfiguration:
if len(r) == 1:
return range(r[0], r[0] + 1)
return range(r[0], r[1] + 1)
- except Exception as e:
- print "Failed to retrieve number of CPUs."
- raise e
+ except Exception:
+ logging.exception("Failed to retrieve number of CPUs.")
+ raise
@staticmethod
def GetCPUPathForId(cpu_index):
@@ -948,10 +867,10 @@ class CustomMachineConfiguration:
elif ret != val:
raise Exception("CPU cores have differing governor settings")
return ret
- except Exception as e:
- print "Failed to get the current CPU governor."
- print "Is the CPU governor disabled? Check BIOS."
- raise e
+ except Exception:
+ logging.exception("Failed to get the current CPU governor. Is the CPU "
+ "governor disabled? Check BIOS.")
+ raise
@staticmethod
def SetCPUGovernor(value):
@@ -962,10 +881,10 @@ class CustomMachineConfiguration:
with open(cpu_device, "w") as f:
f.write(value)
- except Exception as e:
- print "Failed to change CPU governor to %s." % value
- print "Are we running under sudo?"
- raise e
+ except Exception:
+ logging.exception("Failed to change CPU governor to %s. Are we "
+ "running under sudo?", value)
+ raise
cur_value = CustomMachineConfiguration.GetCPUGovernor()
if cur_value != value:
@@ -973,17 +892,15 @@ class CustomMachineConfiguration:
% cur_value )
def Main(args):
- logging.getLogger().setLevel(logging.INFO)
parser = optparse.OptionParser()
- parser.add_option("--android-build-tools",
- help="Path to chromium's build/android. Specifying this "
- "option will run tests using android platform.")
+ parser.add_option("--android-build-tools", help="Deprecated.")
parser.add_option("--arch",
help=("The architecture to run tests for, "
"'auto' or 'native' for auto-detect"),
default="x64")
parser.add_option("--buildbot",
- help="Adapt to path structure used on buildbots",
+ help="Adapt to path structure used on buildbots and adds "
+ "timestamps/level to all logged status messages",
default=False, action="store_true")
parser.add_option("--device",
help="The device ID to run Android tests on. If not given "
@@ -1034,9 +951,21 @@ def Main(args):
"--filter=JSTests/TypedArrays/ will run only TypedArray "
"benchmarks from the JSTests suite.",
default="")
+ parser.add_option("--run-count-multiplier", default=1, type="int",
+ help="Multipled used to increase number of times each test "
+ "is retried.")
+ parser.add_option("--dump-logcats-to",
+ help="Writes logcat output from each test into specified "
+ "directory. Only supported for android targets.")
(options, args) = parser.parse_args(args)
+ if options.buildbot:
+ logging.basicConfig(
+ level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
+ else:
+ logging.basicConfig(level=logging.INFO, format="%(message)s")
+
if len(args) == 0: # pragma: no cover
parser.print_help()
return 1
@@ -1045,17 +974,13 @@ def Main(args):
options.arch = ARCH_GUESS
if not options.arch in SUPPORTED_ARCHS: # pragma: no cover
- print "Unknown architecture %s" % options.arch
- return 1
-
- if options.device and not options.android_build_tools: # pragma: no cover
- print "Specifying a device requires Android build tools."
+ logging.error("Unknown architecture %s", options.arch)
return 1
if (options.json_test_results_secondary and
not options.outdir_secondary): # pragma: no cover
- print("For writing secondary json test results, a secondary outdir patch "
- "must be specified.")
+ logging.error("For writing secondary json test results, a secondary outdir "
+ "patch must be specified.")
return 1
workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
@@ -1070,10 +995,10 @@ def Main(args):
default_binary_name = "d8"
else:
if not os.path.isfile(options.binary_override_path):
- print "binary-override-path must be a file name"
+ logging.error("binary-override-path must be a file name")
return 1
if options.outdir_secondary:
- print "specify either binary-override-path or outdir-secondary"
+ logging.error("specify either binary-override-path or outdir-secondary")
return 1
options.shell_dir = os.path.abspath(
os.path.dirname(options.binary_override_path))
@@ -1132,11 +1057,12 @@ def Main(args):
if (not runnable_name.startswith(options.filter) and
runnable_name + "/" != options.filter):
continue
- print ">>> Running suite: %s" % runnable_name
+ logging.info(">>> Running suite: %s", runnable_name)
def Runner():
"""Output generator that reruns several times."""
- for i in xrange(0, max(1, runnable.run_count)):
+ total_runs = runnable.run_count * options.run_count_multiplier
+ for i in xrange(0, max(1, total_runs)):
# TODO(machenbach): Allow timeout per arch like with run_count per
# arch.
yield platform.Run(runnable, i)
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index d7d2373f2d..481cc10134 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -19,6 +19,7 @@ sys.path.insert(
os.path.dirname(os.path.abspath(__file__))))
+from testrunner.local import command
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.test_config import TestConfig
@@ -171,11 +172,12 @@ class BuildConfig(object):
else:
self.arch = build_config['v8_target_cpu']
- self.is_debug = build_config['is_debug']
self.asan = build_config['is_asan']
self.cfi_vptr = build_config['is_cfi']
self.dcheck_always_on = build_config['dcheck_always_on']
self.gcov_coverage = build_config['is_gcov_coverage']
+ self.is_android = build_config['is_android']
+ self.is_debug = build_config['is_debug']
self.msan = build_config['is_msan']
self.no_i18n = not build_config['v8_enable_i18n_support']
self.no_snap = not build_config['v8_use_snapshot']
@@ -221,6 +223,7 @@ class BaseTestRunner(object):
self.build_config = None
self.mode_name = None
self.mode_options = None
+ self.target_os = None
def execute(self, sys_args=None):
if sys_args is None: # pragma: no cover
@@ -234,6 +237,7 @@ class BaseTestRunner(object):
print ' '.join(sys.argv)
self._load_build_config(options)
+ command.setup(self.target_os)
try:
self._process_default_options(options)
@@ -256,6 +260,8 @@ class BaseTestRunner(object):
return utils.EXIT_CODE_INTERNAL_ERROR
except KeyboardInterrupt:
return utils.EXIT_CODE_INTERRUPTED
+ finally:
+ command.tear_down()
def _create_parser(self):
parser = optparse.OptionParser()
@@ -369,6 +375,13 @@ class BaseTestRunner(object):
print '>>> Autodetected:'
print self.build_config
+ # Represents the OS where tests are run on. Same as host OS except for
+ # Android, which is determined by build output.
+ if self.build_config.is_android:
+ self.target_os = 'android'
+ else:
+ self.target_os = utils.GuessOS()
+
# Returns possible build paths in order:
# gn
# outdir
@@ -463,7 +476,11 @@ class BaseTestRunner(object):
'build directory (%s) instead.' % self.outdir)
if options.j == 0:
- options.j = multiprocessing.cpu_count()
+ if self.build_config.is_android:
+ # Adb isn't happy about multi-processed file pushing.
+ options.j = 1
+ else:
+ options.j = multiprocessing.cpu_count()
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
@@ -630,7 +647,7 @@ class BaseTestRunner(object):
"simd_mips": simd_mips,
"simulator": utils.UseSimulator(self.build_config.arch),
"simulator_run": False,
- "system": utils.GuessOS(),
+ "system": self.target_os,
"tsan": self.build_config.tsan,
"ubsan_vptr": self.build_config.ubsan_vptr,
}
diff --git a/deps/v8/tools/testrunner/local/android.py b/deps/v8/tools/testrunner/local/android.py
new file mode 100644
index 0000000000..fb25bb5a17
--- /dev/null
+++ b/deps/v8/tools/testrunner/local/android.py
@@ -0,0 +1,207 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Wrapper around the Android device abstraction from src/build/android.
+"""
+
+import logging
+import os
+import sys
+
+
+BASE_DIR = os.path.normpath(
+ os.path.join(os.path.dirname(__file__), '..', '..', '..'))
+ANDROID_DIR = os.path.join(BASE_DIR, 'build', 'android')
+DEVICE_DIR = '/data/local/tmp/v8/'
+
+
+class TimeoutException(Exception):
+ def __init__(self, timeout):
+ self.timeout = timeout
+
+
+class CommandFailedException(Exception):
+ def __init__(self, status, output):
+ self.status = status
+ self.output = output
+
+
+class _Driver(object):
+ """Helper class to execute shell commands on an Android device."""
+ def __init__(self, device=None):
+ assert os.path.exists(ANDROID_DIR)
+ sys.path.insert(0, ANDROID_DIR)
+
+ # We import the dependencies only on demand, so that this file can be
+ # imported unconditionally.
+ import devil_chromium
+ from devil.android import device_errors # pylint: disable=import-error
+ from devil.android import device_utils # pylint: disable=import-error
+ from devil.android.perf import cache_control # pylint: disable=import-error
+ from devil.android.perf import perf_control # pylint: disable=import-error
+ from devil.android.sdk import adb_wrapper # pylint: disable=import-error
+ global cache_control
+ global device_errors
+ global perf_control
+
+ devil_chromium.Initialize()
+
+ if not device:
+ # Detect attached device if not specified.
+ devices = adb_wrapper.AdbWrapper.Devices()
+ assert devices, 'No devices detected'
+ assert len(devices) == 1, 'Multiple devices detected.'
+ device = str(devices[0])
+ self.adb_wrapper = adb_wrapper.AdbWrapper(device)
+ self.device = device_utils.DeviceUtils(self.adb_wrapper)
+
+ # This remembers what we have already pushed to the device.
+ self.pushed = set()
+
+ def tear_down(self):
+ """Clean up files after running all tests."""
+ self.device.RemovePath(DEVICE_DIR, force=True, recursive=True)
+
+ def push_file(self, host_dir, file_name, target_rel='.',
+ skip_if_missing=False):
+ """Push a single file to the device (cached).
+
+ Args:
+ host_dir: Absolute parent directory of the file to push.
+ file_name: Name of the file to push.
+ target_rel: Parent directory of the target location on the device
+ (relative to the device's base dir for testing).
+ skip_if_missing: Keeps silent about missing files when set. Otherwise logs
+ error.
+ """
+ file_on_host = os.path.join(host_dir, file_name)
+
+ # Only push files not yet pushed in one execution.
+ if file_on_host in self.pushed:
+ return
+
+ file_on_device_tmp = os.path.join(DEVICE_DIR, '_tmp_', file_name)
+ file_on_device = os.path.join(DEVICE_DIR, target_rel, file_name)
+ folder_on_device = os.path.dirname(file_on_device)
+
+ # Only attempt to push files that exist.
+ if not os.path.exists(file_on_host):
+ if not skip_if_missing:
+ logging.critical('Missing file on host: %s' % file_on_host)
+ return
+
+ # Work-around for 'text file busy' errors. Push the files to a temporary
+ # location and then copy them with a shell command.
+ output = self.adb_wrapper.Push(file_on_host, file_on_device_tmp)
+ # Success looks like this: '3035 KB/s (12512056 bytes in 4.025s)'.
+ # Errors look like this: 'failed to copy ... '.
+ if output and not re.search('^[0-9]', output.splitlines()[-1]):
+ logging.critical('PUSH FAILED: ' + output)
+ self.adb_wrapper.Shell('mkdir -p %s' % folder_on_device)
+ self.adb_wrapper.Shell('cp %s %s' % (file_on_device_tmp, file_on_device))
+ self.pushed.add(file_on_host)
+
+ def push_executable(self, shell_dir, target_dir, binary):
+ """Push files required to run a V8 executable.
+
+ Args:
+ shell_dir: Absolute parent directory of the executable on the host.
+ target_dir: Parent directory of the executable on the device (relative to
+ devices' base dir for testing).
+ binary: Name of the binary to push.
+ """
+ self.push_file(shell_dir, binary, target_dir)
+
+ # Push external startup data. Backwards compatible for revisions where
+ # these files didn't exist. Or for bots that don't produce these files.
+ self.push_file(
+ shell_dir,
+ 'natives_blob.bin',
+ target_dir,
+ skip_if_missing=True,
+ )
+ self.push_file(
+ shell_dir,
+ 'snapshot_blob.bin',
+ target_dir,
+ skip_if_missing=True,
+ )
+ self.push_file(
+ shell_dir,
+ 'snapshot_blob_trusted.bin',
+ target_dir,
+ skip_if_missing=True,
+ )
+ self.push_file(
+ shell_dir,
+ 'icudtl.dat',
+ target_dir,
+ skip_if_missing=True,
+ )
+
+ def run(self, target_dir, binary, args, rel_path, timeout, env=None,
+ logcat_file=False):
+ """Execute a command on the device's shell.
+
+ Args:
+ target_dir: Parent directory of the executable on the device (relative to
+ devices' base dir for testing).
+ binary: Name of the binary.
+ args: List of arguments to pass to the binary.
+ rel_path: Relative path on device to use as CWD.
+ timeout: Timeout in seconds.
+ env: The environment variables with which the command should be run.
+ logcat_file: File into which to stream adb logcat log.
+ """
+ binary_on_device = os.path.join(DEVICE_DIR, target_dir, binary)
+ cmd = [binary_on_device] + args
+ def run_inner():
+ try:
+ output = self.device.RunShellCommand(
+ cmd,
+ cwd=os.path.join(DEVICE_DIR, rel_path),
+ check_return=True,
+ env=env,
+ timeout=timeout,
+ retries=0,
+ )
+ return '\n'.join(output)
+ except device_errors.AdbCommandFailedError as e:
+ raise CommandFailedException(e.status, e.output)
+ except device_errors.CommandTimeoutError:
+ raise TimeoutException(timeout)
+
+
+ if logcat_file:
+ with self.device.GetLogcatMonitor(output_file=logcat_file) as logmon:
+ result = run_inner()
+ logmon.Close()
+ return result
+ else:
+ return run_inner()
+
+ def drop_ram_caches(self):
+ """Drop ran caches on device."""
+ cache = cache_control.CacheControl(self.device)
+ cache.DropRamCaches()
+
+ def set_high_perf_mode(self):
+ """Set device into high performance mode."""
+ perf = perf_control.PerfControl(self.device)
+ perf.SetHighPerfMode()
+
+ def set_default_perf_mode(self):
+ """Set device into default performance mode."""
+ perf = perf_control.PerfControl(self.device)
+ perf.SetDefaultPerfMode()
+
+
+_ANDROID_DRIVER = None
+def android_driver(device=None):
+ """Singleton access method to the driver class."""
+ global _ANDROID_DRIVER
+ if not _ANDROID_DRIVER:
+ _ANDROID_DRIVER = _Driver(device)
+ return _ANDROID_DRIVER
diff --git a/deps/v8/tools/testrunner/local/command.py b/deps/v8/tools/testrunner/local/command.py
index adc9c2e452..302d568e87 100644
--- a/deps/v8/tools/testrunner/local/command.py
+++ b/deps/v8/tools/testrunner/local/command.py
@@ -4,16 +4,22 @@
import os
+import re
import signal
import subprocess
import sys
import threading
import time
+from ..local.android import (
+ android_driver, CommandFailedException, TimeoutException)
from ..local import utils
from ..objects import output
+BASE_DIR = os.path.normpath(
+ os.path.join(os.path.dirname(os.path.abspath(__file__)), '..' , '..', '..'))
+
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
@@ -33,7 +39,18 @@ class AbortException(Exception):
class BaseCommand(object):
def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
- verbose=False):
+ verbose=False, resources_func=None):
+ """Initialize the command.
+
+ Args:
+ shell: The name of the executable (e.g. d8).
+ args: List of args to pass to the executable.
+ cmd_prefix: Prefix of command (e.g. a wrapper script).
+ timeout: Timeout in seconds.
+ env: Environment dict for execution.
+ verbose: Print additional output.
+ resources_func: Callable, returning all test files needed by this command.
+ """
assert(timeout > 0)
self.shell = shell
@@ -43,11 +60,11 @@ class BaseCommand(object):
self.env = env or {}
self.verbose = verbose
- def execute(self, **additional_popen_kwargs):
+ def execute(self):
if self.verbose:
print '# %s' % self
- process = self._start_process(**additional_popen_kwargs)
+ process = self._start_process()
# Variable to communicate with the signal handler.
abort_occured = [False]
@@ -79,14 +96,13 @@ class BaseCommand(object):
duration
)
- def _start_process(self, **additional_popen_kwargs):
+ def _start_process(self):
try:
return subprocess.Popen(
args=self._get_popen_args(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self._get_env(),
- **additional_popen_kwargs
)
except Exception as e:
sys.stderr.write('Error executing: %s\n' % self)
@@ -187,8 +203,85 @@ class WindowsCommand(BaseCommand):
sys.stdout.flush()
-# Set the Command class to the OS-specific version.
-if utils.IsWindows():
- Command = WindowsCommand
-else:
- Command = PosixCommand
+class AndroidCommand(BaseCommand):
+ def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
+ verbose=False, resources_func=None):
+ """Initialize the command and all files that need to be pushed to the
+ Android device.
+ """
+ self.shell_name = os.path.basename(shell)
+ self.shell_dir = os.path.dirname(shell)
+ self.files_to_push = resources_func()
+
+ # Make all paths in arguments relative and also prepare files from arguments
+ # for pushing to the device.
+ rel_args = []
+ find_path_re = re.compile(r'.*(%s/[^\'"]+).*' % re.escape(BASE_DIR))
+ for arg in (args or []):
+ match = find_path_re.match(arg)
+ if match:
+ self.files_to_push.append(match.group(1))
+ rel_args.append(
+ re.sub(r'(.*)%s/(.*)' % re.escape(BASE_DIR), r'\1\2', arg))
+
+ super(AndroidCommand, self).__init__(
+ shell, args=rel_args, cmd_prefix=cmd_prefix, timeout=timeout, env=env,
+ verbose=verbose)
+
+ def execute(self, **additional_popen_kwargs):
+ """Execute the command on the device.
+
+ This pushes all required files to the device and then runs the command.
+ """
+ if self.verbose:
+ print '# %s' % self
+
+ android_driver().push_executable(self.shell_dir, 'bin', self.shell_name)
+
+ for abs_file in self.files_to_push:
+ abs_dir = os.path.dirname(abs_file)
+ file_name = os.path.basename(abs_file)
+ rel_dir = os.path.relpath(abs_dir, BASE_DIR)
+ android_driver().push_file(abs_dir, file_name, rel_dir)
+
+ start_time = time.time()
+ return_code = 0
+ timed_out = False
+ try:
+ stdout = android_driver().run(
+ 'bin', self.shell_name, self.args, '.', self.timeout, self.env)
+ except CommandFailedException as e:
+ return_code = e.status
+ stdout = e.output
+ except TimeoutException as e:
+ return_code = 1
+ timed_out = True
+ # Sadly the Android driver doesn't provide output on timeout.
+ stdout = ''
+
+ duration = time.time() - start_time
+ return output.Output(
+ return_code,
+ timed_out,
+ stdout,
+ '', # No stderr available.
+ -1, # No pid available.
+ duration,
+ )
+
+
+Command = None
+def setup(target_os):
+ """Set the Command class to the OS-specific version."""
+ global Command
+ if target_os == 'android':
+ Command = AndroidCommand
+ elif target_os == 'windows':
+ Command = WindowsCommand
+ else:
+ Command = PosixCommand
+
+def tear_down():
+ """Clean up after using commands."""
+ if Command == AndroidCommand:
+ android_driver().tear_down()
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index ecfbf008a2..4742e84caf 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -55,7 +55,7 @@ for key in [SKIP, FAIL, PASS, CRASH, SLOW, FAIL_OK, NO_VARIANTS, FAIL_SLOPPY,
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
-for var in ["debug", "release", "big", "little",
+for var in ["debug", "release", "big", "little", "android",
"android_arm", "android_arm64", "android_ia32", "android_x64",
"arm", "arm64", "ia32", "mips", "mipsel", "mips64", "mips64el",
"x64", "ppc", "ppc64", "s390", "s390x", "macos", "windows",
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index 39d4fbd2a7..7416590dd0 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -240,7 +240,8 @@ class TestCase(object):
args=params,
env=env,
timeout=timeout,
- verbose=self._test_config.verbose
+ verbose=self._test_config.verbose,
+ resources_func=self._get_resources,
)
def _parse_source_flags(self, source=None):
@@ -260,6 +261,14 @@ class TestCase(object):
def _get_source_path(self):
return None
+ def _get_resources(self):
+ """Returns a list of absolute paths with additional files needed by the
+ test case.
+
+ Used to push additional files to Android devices.
+ """
+ return []
+
@property
def output_proc(self):
if self.expected_outcomes is outproc.OUTCOMES_PASS:
diff --git a/deps/v8/tools/testrunner/outproc/base.py b/deps/v8/tools/testrunner/outproc/base.py
index d1953dda99..39efb60fe9 100644
--- a/deps/v8/tools/testrunner/outproc/base.py
+++ b/deps/v8/tools/testrunner/outproc/base.py
@@ -85,6 +85,11 @@ class PassOutProc(BaseOutProc):
return OUTCOMES_PASS
+class NegPassOutProc(Negative, PassOutProc):
+ """Output processor optimized for negative tests expected to PASS"""
+ pass
+
+
class OutProc(BaseOutProc):
"""Output processor optimized for positive tests with expected outcomes
different than a single PASS.
@@ -113,6 +118,7 @@ class OutProc(BaseOutProc):
# TODO(majeski): Override __reduce__ to make it deserialize as one instance.
DEFAULT = PassOutProc()
+DEFAULT_NEGATIVE = NegPassOutProc()
class ExpectedOutProc(OutProc):
diff --git a/deps/v8/tools/testrunner/testproc/filter.py b/deps/v8/tools/testrunner/testproc/filter.py
index 5081997751..e2a5e972a9 100644
--- a/deps/v8/tools/testrunner/testproc/filter.py
+++ b/deps/v8/tools/testrunner/testproc/filter.py
@@ -59,25 +59,25 @@ class NameFilterProc(base.TestProcFilter):
super(NameFilterProc, self).__init__()
self._globs = defaultdict(list)
+ self._exact_matches = defaultdict(dict)
for a in args:
argpath = a.split('/')
suitename = argpath[0]
path = '/'.join(argpath[1:]) or '*'
- self._globs[suitename].append(path)
+ if '*' in path:
+ self._globs[suitename].append(path)
+ else:
+ self._exact_matches[suitename][path] = True
for s, globs in self._globs.iteritems():
if not globs or '*' in globs:
- self._globs[s] = []
+ self._globs[s] = ['*']
def _filter(self, test):
- globs = self._globs.get(test.suite.name)
- if globs is None:
- return True
-
- if not globs:
- return False
-
+ globs = self._globs.get(test.suite.name, [])
for g in globs:
+ if g == '*': return False
if fnmatch.fnmatch(test.path, g):
return False
- return True
+ exact_matches = self._exact_matches.get(test.suite.name, {})
+ return test.path not in exact_matches
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index b5efa3f69c..31acd3d7be 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -160,7 +160,6 @@ function TickProcessor(
this.stateFilter_ = stateFilter;
this.runtimeTimerFilter_ = runtimeTimerFilter;
this.sourceMap = sourceMap;
- this.deserializedEntriesNames_ = [];
var ticks = this.ticks_ =
{ total: 0, unaccounted: 0, excluded: 0, gc: 0 };
@@ -299,7 +298,6 @@ TickProcessor.prototype.processSharedLibrary = function(
TickProcessor.prototype.processCodeCreation = function(
type, kind, timestamp, start, size, name, maybe_func) {
- name = this.deserializedEntriesNames_[start] || name;
if (maybe_func.length) {
var funcAddr = parseInt(maybe_func[0]);
var state = parseState(maybe_func[1]);
diff --git a/deps/v8/tools/torque/vim-torque/README.md b/deps/v8/tools/torque/vim-torque/README.md
new file mode 100644
index 0000000000..fbdef0fd0d
--- /dev/null
+++ b/deps/v8/tools/torque/vim-torque/README.md
@@ -0,0 +1,33 @@
+# V8 Torque syntax support for vim
+
+This plugin adds syntax highlighting support for the V8 Torque domain-specific
+language.
+
+## Installation
+
+Installation depends on your favorite plugin manager.
+
+**Pathogen:**
+
+Run
+
+```sh
+ln -s $V8/tools/torque/vim-torque ~/.vim/bundle/vim-torque
+# or ~/.config/nvim/bundle/vim-torque for Neovim
+```
+
+**Vundle:**
+
+Add this line to your `.vimrc` or `~/.config/nvim/init.vim`.
+
+```vim
+Plugin 'file:///path/to/v8/tools/torque/vim-torque'
+```
+
+**vim-plug:**
+
+Add this line to your `.vimrc` or `~/.config/nvim/init.vim`.
+
+```vim
+Plug '~/path/to/v8/tools/torque/vim-torque'
+```
diff --git a/deps/v8/tools/torque/vim-torque/ftdetect/torque.vim b/deps/v8/tools/torque/vim-torque/ftdetect/torque.vim
new file mode 100644
index 0000000000..ead2c5ef35
--- /dev/null
+++ b/deps/v8/tools/torque/vim-torque/ftdetect/torque.vim
@@ -0,0 +1 @@
+au BufRead,BufNewFile *.tq set filetype=torque
diff --git a/deps/v8/tools/torque/vim-torque/syntax/torque.vim b/deps/v8/tools/torque/vim-torque/syntax/torque.vim
new file mode 100644
index 0000000000..17713c7213
--- /dev/null
+++ b/deps/v8/tools/torque/vim-torque/syntax/torque.vim
@@ -0,0 +1,84 @@
+" Copyright 2018 the V8 project authors. All rights reserved.
+" Use of this source code is governed by a BSD-style license that can be
+" found in the LICENSE file.
+
+if !exists("main_syntax")
+ " quit when a syntax file was already loaded
+ if exists("b:current_syntax")
+ finish
+ endif
+ let main_syntax = 'torque'
+elseif exists("b:current_syntax") && b:current_syntax == "torque"
+ finish
+endif
+
+let s:cpo_save = &cpo
+set cpo&vim
+
+syn match torqueLineComment "\/\/.*" contains=@Spell
+syn region torqueComment start="/\*" end="\*/" contains=@Spell
+syn region torqueStringS start=+'+ skip=+\\\\\|\\'+ end=+'\|$+
+
+syn keyword torqueAssert assert check debug unreachable
+syn keyword torqueAtom True False Undefined Hole Null
+syn keyword torqueBoolean true false
+syn keyword torqueBranch break continue goto
+syn keyword torqueConditional if else typeswitch otherwise
+syn match torqueConstant /\v<[A-Z][A-Z0-9_]+>/
+syn match torqueConstant /\v<k[A-Z][A-Za-z0-9]*>/
+syn keyword torqueFunction macro builtin runtime
+syn keyword torqueKeyword cast convert from_constexpr min max unsafe_cast
+syn keyword torqueLabel case
+syn keyword torqueMatching try label
+syn keyword torqueModifier extern javascript constexpr
+syn match torqueNumber /\v<[0-9]+(\.[0-9]*)?>/
+syn match torqueNumber /\v<0x[0-9a-fA-F]+>/
+syn keyword torqueOperator operator
+syn keyword torqueRel extends generates labels
+syn keyword torqueRepeat while for of
+syn keyword torqueStatement return tail
+syn keyword torqueStructure module struct type
+syn keyword torqueVariable const let
+
+syn match torqueType /\v(\<)@<=([A-Za-z][0-9A-Za-z_]*)(>)@=/
+syn match torqueType /\v(:\s*(constexpr\s*)?)@<=([A-Za-z][0-9A-Za-z_]*)/
+" Include some common types also
+syn keyword torqueType Arguments void never
+syn keyword torqueType Tagged Smi HeapObject Object
+syn keyword torqueType int32 uint32 int64 intptr uintptr float32 float64
+syn keyword torqueType bool string
+syn keyword torqueType int31 RawPtr AbstractCode Code JSReceiver Context String
+syn keyword torqueType Oddball HeapNumber Number BigInt Numeric Boolean JSProxy
+syn keyword torqueType JSObject JSArray JSFunction JSBoundFunction Callable Map
+
+hi def link torqueAssert Statement
+hi def link torqueAtom Constant
+hi def link torqueBoolean Boolean
+hi def link torqueBranch Conditional
+hi def link torqueComment Comment
+hi def link torqueConditional Conditional
+hi def link torqueConstant Constant
+hi def link torqueFunction Function
+hi def link torqueKeyword Keyword
+hi def link torqueLabel Label
+hi def link torqueLineComment Comment
+hi def link torqueMatching Exception
+hi def link torqueModifier StorageClass
+hi def link torqueNumber Number
+hi def link torqueOperator Operator
+hi def link torqueRel StorageClass
+hi def link torqueRepeat Repeat
+hi def link torqueStatement Statement
+hi def link torqueStringS String
+hi def link torqueStructure Structure
+hi def link torqueType Type
+hi def link torqueVariable Identifier
+
+let b:current_syntax = "torque"
+if main_syntax == 'torque'
+ unlet main_syntax
+endif
+let &cpo = s:cpo_save
+unlet s:cpo_save
+
+" vim: set ts=8:
diff --git a/deps/v8/tools/try_perf.py b/deps/v8/tools/try_perf.py
index 483dfe7199..58035efcc0 100755
--- a/deps/v8/tools/try_perf.py
+++ b/deps/v8/tools/try_perf.py
@@ -16,14 +16,15 @@ BOTS = {
'--linux64_haswell': 'v8_linux64_haswell_perf_try',
'--nexus5': 'v8_nexus5_perf_try',
'--nexus7': 'v8_nexus7_perf_try',
- '--nexus9': 'v8_nexus9_perf_try',
'--nexus10': 'v8_nexus10_perf_try',
+ '--pixel2': 'v8_pixel2_perf_try',
}
# This list will contain builder names that should be triggered on an internal
# swarming bucket instead of internal Buildbot master.
SWARMING_BOTS = [
'v8_linux64_perf_try',
+ 'v8_pixel2_perf_try',
]
DEFAULT_BOTS = [
diff --git a/deps/v8/tools/unittests/run_perf_test.py b/deps/v8/tools/unittests/run_perf_test.py
index 07dd515406..6ba18e0b9e 100755
--- a/deps/v8/tools/unittests/run_perf_test.py
+++ b/deps/v8/tools/unittests/run_perf_test.py
@@ -6,8 +6,7 @@
from collections import namedtuple
import coverage
import json
-from mock import DEFAULT
-from mock import MagicMock
+from mock import MagicMock, patch
import os
from os import path, sys
import platform
@@ -27,6 +26,7 @@ TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-perf")
V8_JSON = {
"path": ["."],
+ "owners": ["username@chromium.org"],
"binary": "d7",
"flags": ["--flag"],
"main": "run.js",
@@ -40,6 +40,7 @@ V8_JSON = {
V8_NESTED_SUITES_JSON = {
"path": ["."],
+ "owners": ["username@chromium.org"],
"flags": ["--flag"],
"run_count": 1,
"units": "score",
@@ -76,6 +77,7 @@ V8_NESTED_SUITES_JSON = {
V8_GENERIC_JSON = {
"path": ["."],
+ "owners": ["username@chromium.org"],
"binary": "cc",
"flags": ["--flag"],
"generic": True,
@@ -429,9 +431,9 @@ class PerfTest(unittest.TestCase):
platform.Run = MagicMock(
return_value=("Richards: 1.234\nDeltaBlue: 10657567\n", None))
run_perf.AndroidPlatform = MagicMock(return_value=platform)
- self.assertEquals(
- 0, self._CallMain("--android-build-tools", "/some/dir",
- "--arch", "arm"))
+ with patch.object(run_perf.Platform, 'ReadBuildConfig',
+ MagicMock(return_value={'is_android': True})):
+ self.assertEquals(0, self._CallMain("--arch", "arm"))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
diff --git a/deps/v8/tools/unittests/testdata/test1.json b/deps/v8/tools/unittests/testdata/test1.json
index 7fa1faac3b..939d6e2612 100644
--- a/deps/v8/tools/unittests/testdata/test1.json
+++ b/deps/v8/tools/unittests/testdata/test1.json
@@ -1,5 +1,6 @@
{
"path": ["."],
+ "owners": ["username@chromium.org"],
"flags": [],
"main": "run.js",
"run_count": 2,
diff --git a/deps/v8/tools/unittests/testdata/test2.json b/deps/v8/tools/unittests/testdata/test2.json
index 79fed2652f..632c4e5c6e 100644
--- a/deps/v8/tools/unittests/testdata/test2.json
+++ b/deps/v8/tools/unittests/testdata/test2.json
@@ -1,5 +1,6 @@
{
"path": ["."],
+ "owners": ["username@chromium.org"],
"flags": [],
"main": "run.js",
"run_count": 2,
diff --git a/deps/v8/tools/unittests/testdata/test3.json b/deps/v8/tools/unittests/testdata/test3.json
index 1b7ef96aae..3e871de28a 100644
--- a/deps/v8/tools/unittests/testdata/test3.json
+++ b/deps/v8/tools/unittests/testdata/test3.json
@@ -1,5 +1,6 @@
{
"path": ["."],
+ "owners": ["username@chromium.org"],
"flags": [],
"run_count": 1,
"results_processor": "results_processor.py",
diff --git a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
index c5e3ee35f1..eb9f7bafdc 100644
--- a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
@@ -1,6 +1,7 @@
{
"current_cpu": "x64",
"dcheck_always_on": false,
+ "is_android": false,
"is_asan": false,
"is_cfi": false,
"is_component_build": false,
diff --git a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
index c5e3ee35f1..eb9f7bafdc 100644
--- a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
@@ -1,6 +1,7 @@
{
"current_cpu": "x64",
"dcheck_always_on": false,
+ "is_android": false,
"is_asan": false,
"is_cfi": false,
"is_component_build": false,
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 56a74c208c..16927d85b3 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -1,4 +1,4 @@
-# Copyright 2017 the V8 project authors. All rights reserved.
+# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
@@ -123,8 +123,7 @@ INSTANCE_TYPES = {
219: "STORE_HANDLER_TYPE",
220: "UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE",
221: "UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE",
- 222: "WEAK_CELL_TYPE",
- 223: "WEAK_ARRAY_LIST_TYPE",
+ 222: "WEAK_ARRAY_LIST_TYPE",
1024: "JS_PROXY_TYPE",
1025: "JS_GLOBAL_OBJECT_TYPE",
1026: "JS_GLOBAL_PROXY_TYPE",
@@ -159,199 +158,200 @@ INSTANCE_TYPES = {
1081: "JS_WEAK_SET_TYPE",
1082: "JS_TYPED_ARRAY_TYPE",
1083: "JS_DATA_VIEW_TYPE",
- 1084: "JS_INTL_LOCALE_TYPE",
- 1085: "JS_INTL_RELATIVE_TIME_FORMAT_TYPE",
- 1086: "WASM_GLOBAL_TYPE",
- 1087: "WASM_INSTANCE_TYPE",
- 1088: "WASM_MEMORY_TYPE",
- 1089: "WASM_MODULE_TYPE",
- 1090: "WASM_TABLE_TYPE",
- 1091: "JS_BOUND_FUNCTION_TYPE",
- 1092: "JS_FUNCTION_TYPE",
+ 1084: "JS_INTL_COLLATOR_TYPE",
+ 1085: "JS_INTL_LIST_FORMAT_TYPE",
+ 1086: "JS_INTL_LOCALE_TYPE",
+ 1087: "JS_INTL_PLURAL_RULES_TYPE",
+ 1088: "JS_INTL_RELATIVE_TIME_FORMAT_TYPE",
+ 1089: "WASM_GLOBAL_TYPE",
+ 1090: "WASM_INSTANCE_TYPE",
+ 1091: "WASM_MEMORY_TYPE",
+ 1092: "WASM_MODULE_TYPE",
+ 1093: "WASM_TABLE_TYPE",
+ 1094: "JS_BOUND_FUNCTION_TYPE",
+ 1095: "JS_FUNCTION_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
("RO_SPACE", 0x02201): (138, "FreeSpaceMap"),
- ("RO_SPACE", 0x02259): (132, "MetaMap"),
- ("RO_SPACE", 0x022e1): (131, "NullMap"),
- ("RO_SPACE", 0x02359): (205, "DescriptorArrayMap"),
- ("RO_SPACE", 0x023c1): (182, "FixedArrayMap"),
- ("RO_SPACE", 0x02429): (222, "WeakCellMap"),
- ("RO_SPACE", 0x024d1): (152, "OnePointerFillerMap"),
- ("RO_SPACE", 0x02539): (152, "TwoPointerFillerMap"),
- ("RO_SPACE", 0x025d1): (131, "UninitializedMap"),
- ("RO_SPACE", 0x02661): (8, "OneByteInternalizedStringMap"),
- ("RO_SPACE", 0x02721): (131, "UndefinedMap"),
- ("RO_SPACE", 0x02799): (129, "HeapNumberMap"),
- ("RO_SPACE", 0x02831): (131, "TheHoleMap"),
- ("RO_SPACE", 0x028f9): (131, "BooleanMap"),
- ("RO_SPACE", 0x02a09): (136, "ByteArrayMap"),
- ("RO_SPACE", 0x02a71): (182, "FixedCOWArrayMap"),
- ("RO_SPACE", 0x02ad9): (184, "HashTableMap"),
- ("RO_SPACE", 0x02b41): (128, "SymbolMap"),
- ("RO_SPACE", 0x02ba9): (72, "OneByteStringMap"),
- ("RO_SPACE", 0x02c11): (193, "ScopeInfoMap"),
- ("RO_SPACE", 0x02c79): (216, "SharedFunctionInfoMap"),
- ("RO_SPACE", 0x02ce1): (133, "CodeMap"),
- ("RO_SPACE", 0x02d49): (199, "FunctionContextMap"),
- ("RO_SPACE", 0x02db1): (208, "CellMap"),
- ("RO_SPACE", 0x02e19): (215, "GlobalPropertyCellMap"),
- ("RO_SPACE", 0x02e81): (135, "ForeignMap"),
- ("RO_SPACE", 0x02ee9): (206, "TransitionArrayMap"),
- ("RO_SPACE", 0x02f51): (211, "FeedbackVectorMap"),
- ("RO_SPACE", 0x02ff9): (131, "ArgumentsMarkerMap"),
- ("RO_SPACE", 0x030b9): (131, "ExceptionMap"),
- ("RO_SPACE", 0x03179): (131, "TerminationExceptionMap"),
- ("RO_SPACE", 0x03241): (131, "OptimizedOutMap"),
- ("RO_SPACE", 0x03301): (131, "StaleRegisterMap"),
- ("RO_SPACE", 0x03391): (201, "NativeContextMap"),
- ("RO_SPACE", 0x033f9): (200, "ModuleContextMap"),
- ("RO_SPACE", 0x03461): (198, "EvalContextMap"),
- ("RO_SPACE", 0x034c9): (202, "ScriptContextMap"),
- ("RO_SPACE", 0x03531): (195, "BlockContextMap"),
- ("RO_SPACE", 0x03599): (196, "CatchContextMap"),
- ("RO_SPACE", 0x03601): (203, "WithContextMap"),
- ("RO_SPACE", 0x03669): (197, "DebugEvaluateContextMap"),
- ("RO_SPACE", 0x036d1): (194, "ScriptContextTableMap"),
- ("RO_SPACE", 0x03739): (151, "FeedbackMetadataArrayMap"),
- ("RO_SPACE", 0x037a1): (182, "ArrayListMap"),
- ("RO_SPACE", 0x03809): (130, "BigIntMap"),
- ("RO_SPACE", 0x03871): (183, "ObjectBoilerplateDescriptionMap"),
- ("RO_SPACE", 0x038d9): (137, "BytecodeArrayMap"),
- ("RO_SPACE", 0x03941): (209, "CodeDataContainerMap"),
- ("RO_SPACE", 0x039a9): (150, "FixedDoubleArrayMap"),
- ("RO_SPACE", 0x03a11): (188, "GlobalDictionaryMap"),
- ("RO_SPACE", 0x03a79): (210, "ManyClosuresCellMap"),
- ("RO_SPACE", 0x03ae1): (182, "ModuleInfoMap"),
- ("RO_SPACE", 0x03b49): (134, "MutableHeapNumberMap"),
- ("RO_SPACE", 0x03bb1): (187, "NameDictionaryMap"),
- ("RO_SPACE", 0x03c19): (210, "NoClosuresCellMap"),
- ("RO_SPACE", 0x03c81): (189, "NumberDictionaryMap"),
- ("RO_SPACE", 0x03ce9): (210, "OneClosureCellMap"),
- ("RO_SPACE", 0x03d51): (185, "OrderedHashMapMap"),
- ("RO_SPACE", 0x03db9): (186, "OrderedHashSetMap"),
- ("RO_SPACE", 0x03e21): (213, "PreParsedScopeDataMap"),
- ("RO_SPACE", 0x03e89): (214, "PropertyArrayMap"),
- ("RO_SPACE", 0x03ef1): (207, "SideEffectCallHandlerInfoMap"),
- ("RO_SPACE", 0x03f59): (207, "SideEffectFreeCallHandlerInfoMap"),
- ("RO_SPACE", 0x03fc1): (207, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("RO_SPACE", 0x04029): (190, "SimpleNumberDictionaryMap"),
- ("RO_SPACE", 0x04091): (182, "SloppyArgumentsElementsMap"),
- ("RO_SPACE", 0x040f9): (217, "SmallOrderedHashMapMap"),
- ("RO_SPACE", 0x04161): (218, "SmallOrderedHashSetMap"),
- ("RO_SPACE", 0x041c9): (191, "StringTableMap"),
- ("RO_SPACE", 0x04231): (220, "UncompiledDataWithoutPreParsedScopeMap"),
- ("RO_SPACE", 0x04299): (221, "UncompiledDataWithPreParsedScopeMap"),
- ("RO_SPACE", 0x04301): (204, "WeakFixedArrayMap"),
- ("RO_SPACE", 0x04369): (223, "WeakArrayListMap"),
- ("RO_SPACE", 0x043d1): (192, "EphemeronHashTableMap"),
- ("RO_SPACE", 0x04439): (106, "NativeSourceStringMap"),
- ("RO_SPACE", 0x044a1): (64, "StringMap"),
- ("RO_SPACE", 0x04509): (73, "ConsOneByteStringMap"),
- ("RO_SPACE", 0x04571): (65, "ConsStringMap"),
- ("RO_SPACE", 0x045d9): (77, "ThinOneByteStringMap"),
- ("RO_SPACE", 0x04641): (69, "ThinStringMap"),
- ("RO_SPACE", 0x046a9): (67, "SlicedStringMap"),
- ("RO_SPACE", 0x04711): (75, "SlicedOneByteStringMap"),
- ("RO_SPACE", 0x04779): (66, "ExternalStringMap"),
- ("RO_SPACE", 0x047e1): (82, "ExternalStringWithOneByteDataMap"),
- ("RO_SPACE", 0x04849): (74, "ExternalOneByteStringMap"),
- ("RO_SPACE", 0x048b1): (98, "ShortExternalStringMap"),
- ("RO_SPACE", 0x04919): (114, "ShortExternalStringWithOneByteDataMap"),
- ("RO_SPACE", 0x04981): (0, "InternalizedStringMap"),
- ("RO_SPACE", 0x049e9): (2, "ExternalInternalizedStringMap"),
- ("RO_SPACE", 0x04a51): (18, "ExternalInternalizedStringWithOneByteDataMap"),
- ("RO_SPACE", 0x04ab9): (10, "ExternalOneByteInternalizedStringMap"),
- ("RO_SPACE", 0x04b21): (34, "ShortExternalInternalizedStringMap"),
- ("RO_SPACE", 0x04b89): (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
- ("RO_SPACE", 0x04bf1): (42, "ShortExternalOneByteInternalizedStringMap"),
- ("RO_SPACE", 0x04c59): (106, "ShortExternalOneByteStringMap"),
- ("RO_SPACE", 0x04cc1): (140, "FixedUint8ArrayMap"),
- ("RO_SPACE", 0x04d29): (139, "FixedInt8ArrayMap"),
- ("RO_SPACE", 0x04d91): (142, "FixedUint16ArrayMap"),
- ("RO_SPACE", 0x04df9): (141, "FixedInt16ArrayMap"),
- ("RO_SPACE", 0x04e61): (144, "FixedUint32ArrayMap"),
- ("RO_SPACE", 0x04ec9): (143, "FixedInt32ArrayMap"),
- ("RO_SPACE", 0x04f31): (145, "FixedFloat32ArrayMap"),
- ("RO_SPACE", 0x04f99): (146, "FixedFloat64ArrayMap"),
- ("RO_SPACE", 0x05001): (147, "FixedUint8ClampedArrayMap"),
- ("RO_SPACE", 0x05069): (149, "FixedBigUint64ArrayMap"),
- ("RO_SPACE", 0x050d1): (148, "FixedBigInt64ArrayMap"),
- ("RO_SPACE", 0x05139): (131, "SelfReferenceMarkerMap"),
- ("RO_SPACE", 0x051b9): (171, "Tuple2Map"),
- ("RO_SPACE", 0x05509): (161, "InterceptorInfoMap"),
- ("RO_SPACE", 0x05629): (169, "ScriptMap"),
- ("RO_SPACE", 0x0a021): (154, "AccessorInfoMap"),
- ("RO_SPACE", 0x0a089): (153, "AccessCheckInfoMap"),
- ("RO_SPACE", 0x0a0f1): (155, "AccessorPairMap"),
- ("RO_SPACE", 0x0a159): (156, "AliasedArgumentsEntryMap"),
- ("RO_SPACE", 0x0a1c1): (157, "AllocationMementoMap"),
- ("RO_SPACE", 0x0a229): (158, "AsyncGeneratorRequestMap"),
- ("RO_SPACE", 0x0a291): (159, "DebugInfoMap"),
- ("RO_SPACE", 0x0a2f9): (160, "FunctionTemplateInfoMap"),
- ("RO_SPACE", 0x0a361): (162, "InterpreterDataMap"),
- ("RO_SPACE", 0x0a3c9): (163, "ModuleInfoEntryMap"),
- ("RO_SPACE", 0x0a431): (164, "ModuleMap"),
- ("RO_SPACE", 0x0a499): (165, "ObjectTemplateInfoMap"),
- ("RO_SPACE", 0x0a501): (166, "PromiseCapabilityMap"),
- ("RO_SPACE", 0x0a569): (167, "PromiseReactionMap"),
- ("RO_SPACE", 0x0a5d1): (168, "PrototypeInfoMap"),
- ("RO_SPACE", 0x0a639): (170, "StackFrameInfoMap"),
- ("RO_SPACE", 0x0a6a1): (172, "Tuple3Map"),
- ("RO_SPACE", 0x0a709): (173, "ArrayBoilerplateDescriptionMap"),
- ("RO_SPACE", 0x0a771): (174, "WasmDebugInfoMap"),
- ("RO_SPACE", 0x0a7d9): (175, "WasmExportedFunctionDataMap"),
- ("RO_SPACE", 0x0a841): (176, "CallableTaskMap"),
- ("RO_SPACE", 0x0a8a9): (177, "CallbackTaskMap"),
- ("RO_SPACE", 0x0a911): (178, "PromiseFulfillReactionJobTaskMap"),
- ("RO_SPACE", 0x0a979): (179, "PromiseRejectReactionJobTaskMap"),
- ("RO_SPACE", 0x0a9e1): (180, "PromiseResolveThenableJobTaskMap"),
- ("RO_SPACE", 0x0aa49): (181, "AllocationSiteMap"),
- ("RO_SPACE", 0x0aab1): (181, "AllocationSiteMap"),
+ ("RO_SPACE", 0x02251): (132, "MetaMap"),
+ ("RO_SPACE", 0x022d1): (131, "NullMap"),
+ ("RO_SPACE", 0x02341): (205, "DescriptorArrayMap"),
+ ("RO_SPACE", 0x023a1): (204, "WeakFixedArrayMap"),
+ ("RO_SPACE", 0x023f1): (152, "OnePointerFillerMap"),
+ ("RO_SPACE", 0x02441): (152, "TwoPointerFillerMap"),
+ ("RO_SPACE", 0x024c1): (131, "UninitializedMap"),
+ ("RO_SPACE", 0x02539): (8, "OneByteInternalizedStringMap"),
+ ("RO_SPACE", 0x025e1): (131, "UndefinedMap"),
+ ("RO_SPACE", 0x02641): (129, "HeapNumberMap"),
+ ("RO_SPACE", 0x026c1): (131, "TheHoleMap"),
+ ("RO_SPACE", 0x02771): (131, "BooleanMap"),
+ ("RO_SPACE", 0x02869): (136, "ByteArrayMap"),
+ ("RO_SPACE", 0x028b9): (182, "FixedArrayMap"),
+ ("RO_SPACE", 0x02909): (182, "FixedCOWArrayMap"),
+ ("RO_SPACE", 0x02959): (184, "HashTableMap"),
+ ("RO_SPACE", 0x029a9): (128, "SymbolMap"),
+ ("RO_SPACE", 0x029f9): (72, "OneByteStringMap"),
+ ("RO_SPACE", 0x02a49): (193, "ScopeInfoMap"),
+ ("RO_SPACE", 0x02a99): (216, "SharedFunctionInfoMap"),
+ ("RO_SPACE", 0x02ae9): (133, "CodeMap"),
+ ("RO_SPACE", 0x02b39): (199, "FunctionContextMap"),
+ ("RO_SPACE", 0x02b89): (208, "CellMap"),
+ ("RO_SPACE", 0x02bd9): (215, "GlobalPropertyCellMap"),
+ ("RO_SPACE", 0x02c29): (135, "ForeignMap"),
+ ("RO_SPACE", 0x02c79): (206, "TransitionArrayMap"),
+ ("RO_SPACE", 0x02cc9): (211, "FeedbackVectorMap"),
+ ("RO_SPACE", 0x02d69): (131, "ArgumentsMarkerMap"),
+ ("RO_SPACE", 0x02e11): (131, "ExceptionMap"),
+ ("RO_SPACE", 0x02eb9): (131, "TerminationExceptionMap"),
+ ("RO_SPACE", 0x02f69): (131, "OptimizedOutMap"),
+ ("RO_SPACE", 0x03011): (131, "StaleRegisterMap"),
+ ("RO_SPACE", 0x03089): (201, "NativeContextMap"),
+ ("RO_SPACE", 0x030d9): (200, "ModuleContextMap"),
+ ("RO_SPACE", 0x03129): (198, "EvalContextMap"),
+ ("RO_SPACE", 0x03179): (202, "ScriptContextMap"),
+ ("RO_SPACE", 0x031c9): (195, "BlockContextMap"),
+ ("RO_SPACE", 0x03219): (196, "CatchContextMap"),
+ ("RO_SPACE", 0x03269): (203, "WithContextMap"),
+ ("RO_SPACE", 0x032b9): (197, "DebugEvaluateContextMap"),
+ ("RO_SPACE", 0x03309): (194, "ScriptContextTableMap"),
+ ("RO_SPACE", 0x03359): (151, "FeedbackMetadataArrayMap"),
+ ("RO_SPACE", 0x033a9): (182, "ArrayListMap"),
+ ("RO_SPACE", 0x033f9): (130, "BigIntMap"),
+ ("RO_SPACE", 0x03449): (183, "ObjectBoilerplateDescriptionMap"),
+ ("RO_SPACE", 0x03499): (137, "BytecodeArrayMap"),
+ ("RO_SPACE", 0x034e9): (209, "CodeDataContainerMap"),
+ ("RO_SPACE", 0x03539): (150, "FixedDoubleArrayMap"),
+ ("RO_SPACE", 0x03589): (188, "GlobalDictionaryMap"),
+ ("RO_SPACE", 0x035d9): (210, "ManyClosuresCellMap"),
+ ("RO_SPACE", 0x03629): (182, "ModuleInfoMap"),
+ ("RO_SPACE", 0x03679): (134, "MutableHeapNumberMap"),
+ ("RO_SPACE", 0x036c9): (187, "NameDictionaryMap"),
+ ("RO_SPACE", 0x03719): (210, "NoClosuresCellMap"),
+ ("RO_SPACE", 0x03769): (189, "NumberDictionaryMap"),
+ ("RO_SPACE", 0x037b9): (210, "OneClosureCellMap"),
+ ("RO_SPACE", 0x03809): (185, "OrderedHashMapMap"),
+ ("RO_SPACE", 0x03859): (186, "OrderedHashSetMap"),
+ ("RO_SPACE", 0x038a9): (213, "PreParsedScopeDataMap"),
+ ("RO_SPACE", 0x038f9): (214, "PropertyArrayMap"),
+ ("RO_SPACE", 0x03949): (207, "SideEffectCallHandlerInfoMap"),
+ ("RO_SPACE", 0x03999): (207, "SideEffectFreeCallHandlerInfoMap"),
+ ("RO_SPACE", 0x039e9): (207, "NextCallSideEffectFreeCallHandlerInfoMap"),
+ ("RO_SPACE", 0x03a39): (190, "SimpleNumberDictionaryMap"),
+ ("RO_SPACE", 0x03a89): (182, "SloppyArgumentsElementsMap"),
+ ("RO_SPACE", 0x03ad9): (217, "SmallOrderedHashMapMap"),
+ ("RO_SPACE", 0x03b29): (218, "SmallOrderedHashSetMap"),
+ ("RO_SPACE", 0x03b79): (191, "StringTableMap"),
+ ("RO_SPACE", 0x03bc9): (220, "UncompiledDataWithoutPreParsedScopeMap"),
+ ("RO_SPACE", 0x03c19): (221, "UncompiledDataWithPreParsedScopeMap"),
+ ("RO_SPACE", 0x03c69): (222, "WeakArrayListMap"),
+ ("RO_SPACE", 0x03cb9): (192, "EphemeronHashTableMap"),
+ ("RO_SPACE", 0x03d09): (106, "NativeSourceStringMap"),
+ ("RO_SPACE", 0x03d59): (64, "StringMap"),
+ ("RO_SPACE", 0x03da9): (73, "ConsOneByteStringMap"),
+ ("RO_SPACE", 0x03df9): (65, "ConsStringMap"),
+ ("RO_SPACE", 0x03e49): (77, "ThinOneByteStringMap"),
+ ("RO_SPACE", 0x03e99): (69, "ThinStringMap"),
+ ("RO_SPACE", 0x03ee9): (67, "SlicedStringMap"),
+ ("RO_SPACE", 0x03f39): (75, "SlicedOneByteStringMap"),
+ ("RO_SPACE", 0x03f89): (66, "ExternalStringMap"),
+ ("RO_SPACE", 0x03fd9): (82, "ExternalStringWithOneByteDataMap"),
+ ("RO_SPACE", 0x04029): (74, "ExternalOneByteStringMap"),
+ ("RO_SPACE", 0x04079): (98, "ShortExternalStringMap"),
+ ("RO_SPACE", 0x040c9): (114, "ShortExternalStringWithOneByteDataMap"),
+ ("RO_SPACE", 0x04119): (0, "InternalizedStringMap"),
+ ("RO_SPACE", 0x04169): (2, "ExternalInternalizedStringMap"),
+ ("RO_SPACE", 0x041b9): (18, "ExternalInternalizedStringWithOneByteDataMap"),
+ ("RO_SPACE", 0x04209): (10, "ExternalOneByteInternalizedStringMap"),
+ ("RO_SPACE", 0x04259): (34, "ShortExternalInternalizedStringMap"),
+ ("RO_SPACE", 0x042a9): (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
+ ("RO_SPACE", 0x042f9): (42, "ShortExternalOneByteInternalizedStringMap"),
+ ("RO_SPACE", 0x04349): (106, "ShortExternalOneByteStringMap"),
+ ("RO_SPACE", 0x04399): (140, "FixedUint8ArrayMap"),
+ ("RO_SPACE", 0x043e9): (139, "FixedInt8ArrayMap"),
+ ("RO_SPACE", 0x04439): (142, "FixedUint16ArrayMap"),
+ ("RO_SPACE", 0x04489): (141, "FixedInt16ArrayMap"),
+ ("RO_SPACE", 0x044d9): (144, "FixedUint32ArrayMap"),
+ ("RO_SPACE", 0x04529): (143, "FixedInt32ArrayMap"),
+ ("RO_SPACE", 0x04579): (145, "FixedFloat32ArrayMap"),
+ ("RO_SPACE", 0x045c9): (146, "FixedFloat64ArrayMap"),
+ ("RO_SPACE", 0x04619): (147, "FixedUint8ClampedArrayMap"),
+ ("RO_SPACE", 0x04669): (149, "FixedBigUint64ArrayMap"),
+ ("RO_SPACE", 0x046b9): (148, "FixedBigInt64ArrayMap"),
+ ("RO_SPACE", 0x04709): (131, "SelfReferenceMarkerMap"),
+ ("RO_SPACE", 0x04771): (171, "Tuple2Map"),
+ ("RO_SPACE", 0x04811): (173, "ArrayBoilerplateDescriptionMap"),
+ ("RO_SPACE", 0x04b01): (161, "InterceptorInfoMap"),
+ ("RO_SPACE", 0x04bf9): (169, "ScriptMap"),
+ ("RO_SPACE", 0x09aa1): (154, "AccessorInfoMap"),
+ ("RO_SPACE", 0x09af1): (153, "AccessCheckInfoMap"),
+ ("RO_SPACE", 0x09b41): (155, "AccessorPairMap"),
+ ("RO_SPACE", 0x09b91): (156, "AliasedArgumentsEntryMap"),
+ ("RO_SPACE", 0x09be1): (157, "AllocationMementoMap"),
+ ("RO_SPACE", 0x09c31): (158, "AsyncGeneratorRequestMap"),
+ ("RO_SPACE", 0x09c81): (159, "DebugInfoMap"),
+ ("RO_SPACE", 0x09cd1): (160, "FunctionTemplateInfoMap"),
+ ("RO_SPACE", 0x09d21): (162, "InterpreterDataMap"),
+ ("RO_SPACE", 0x09d71): (163, "ModuleInfoEntryMap"),
+ ("RO_SPACE", 0x09dc1): (164, "ModuleMap"),
+ ("RO_SPACE", 0x09e11): (165, "ObjectTemplateInfoMap"),
+ ("RO_SPACE", 0x09e61): (166, "PromiseCapabilityMap"),
+ ("RO_SPACE", 0x09eb1): (167, "PromiseReactionMap"),
+ ("RO_SPACE", 0x09f01): (168, "PrototypeInfoMap"),
+ ("RO_SPACE", 0x09f51): (170, "StackFrameInfoMap"),
+ ("RO_SPACE", 0x09fa1): (172, "Tuple3Map"),
+ ("RO_SPACE", 0x09ff1): (174, "WasmDebugInfoMap"),
+ ("RO_SPACE", 0x0a041): (175, "WasmExportedFunctionDataMap"),
+ ("RO_SPACE", 0x0a091): (176, "CallableTaskMap"),
+ ("RO_SPACE", 0x0a0e1): (177, "CallbackTaskMap"),
+ ("RO_SPACE", 0x0a131): (178, "PromiseFulfillReactionJobTaskMap"),
+ ("RO_SPACE", 0x0a181): (179, "PromiseRejectReactionJobTaskMap"),
+ ("RO_SPACE", 0x0a1d1): (180, "PromiseResolveThenableJobTaskMap"),
+ ("RO_SPACE", 0x0a221): (181, "AllocationSiteMap"),
+ ("RO_SPACE", 0x0a271): (181, "AllocationSiteMap"),
("MAP_SPACE", 0x02201): (1057, "ExternalMap"),
- ("MAP_SPACE", 0x02259): (1072, "JSMessageObjectMap"),
+ ("MAP_SPACE", 0x02251): (1072, "JSMessageObjectMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
- ("RO_SPACE", 0x022b1): "NullValue",
- ("RO_SPACE", 0x02339): "EmptyDescriptorArray",
- ("RO_SPACE", 0x023b1): "EmptyFixedArray",
- ("RO_SPACE", 0x025a1): "UninitializedValue",
- ("RO_SPACE", 0x026f1): "UndefinedValue",
- ("RO_SPACE", 0x02789): "NanValue",
- ("RO_SPACE", 0x02801): "TheHoleValue",
- ("RO_SPACE", 0x028b9): "HoleNanValue",
- ("RO_SPACE", 0x028c9): "TrueValue",
- ("RO_SPACE", 0x029a1): "FalseValue",
- ("RO_SPACE", 0x029f1): "empty_string",
- ("RO_SPACE", 0x02fb9): "EmptyScopeInfo",
- ("RO_SPACE", 0x02fc9): "ArgumentsMarker",
- ("RO_SPACE", 0x03089): "Exception",
- ("RO_SPACE", 0x03149): "TerminationException",
- ("RO_SPACE", 0x03211): "OptimizedOut",
- ("RO_SPACE", 0x032d1): "StaleRegister",
- ("RO_SPACE", 0x05231): "EmptyByteArray",
- ("RO_SPACE", 0x05259): "EmptyFixedUint8Array",
- ("RO_SPACE", 0x05279): "EmptyFixedInt8Array",
- ("RO_SPACE", 0x05299): "EmptyFixedUint16Array",
- ("RO_SPACE", 0x052b9): "EmptyFixedInt16Array",
- ("RO_SPACE", 0x052d9): "EmptyFixedUint32Array",
- ("RO_SPACE", 0x052f9): "EmptyFixedInt32Array",
- ("RO_SPACE", 0x05319): "EmptyFixedFloat32Array",
- ("RO_SPACE", 0x05339): "EmptyFixedFloat64Array",
- ("RO_SPACE", 0x05359): "EmptyFixedUint8ClampedArray",
- ("RO_SPACE", 0x053b9): "EmptySloppyArgumentsElements",
- ("RO_SPACE", 0x053d9): "EmptySlowElementDictionary",
- ("RO_SPACE", 0x05421): "EmptyOrderedHashMap",
- ("RO_SPACE", 0x05449): "EmptyOrderedHashSet",
- ("RO_SPACE", 0x05481): "EmptyPropertyCell",
- ("RO_SPACE", 0x054a9): "EmptyWeakCell",
- ("RO_SPACE", 0x05599): "InfinityValue",
- ("RO_SPACE", 0x055a9): "MinusZeroValue",
- ("RO_SPACE", 0x055b9): "MinusInfinityValue",
- ("RO_SPACE", 0x055c9): "SelfReferenceMarker",
+ ("RO_SPACE", 0x022a1): "NullValue",
+ ("RO_SPACE", 0x02321): "EmptyDescriptorArray",
+ ("RO_SPACE", 0x02491): "UninitializedValue",
+ ("RO_SPACE", 0x025b1): "UndefinedValue",
+ ("RO_SPACE", 0x02631): "NanValue",
+ ("RO_SPACE", 0x02691): "TheHoleValue",
+ ("RO_SPACE", 0x02731): "HoleNanValue",
+ ("RO_SPACE", 0x02741): "TrueValue",
+ ("RO_SPACE", 0x02801): "FalseValue",
+ ("RO_SPACE", 0x02851): "empty_string",
+ ("RO_SPACE", 0x02d19): "EmptyScopeInfo",
+ ("RO_SPACE", 0x02d29): "EmptyFixedArray",
+ ("RO_SPACE", 0x02d39): "ArgumentsMarker",
+ ("RO_SPACE", 0x02de1): "Exception",
+ ("RO_SPACE", 0x02e89): "TerminationException",
+ ("RO_SPACE", 0x02f39): "OptimizedOut",
+ ("RO_SPACE", 0x02fe1): "StaleRegister",
+ ("RO_SPACE", 0x047d1): "EmptyByteArray",
+ ("RO_SPACE", 0x04861): "EmptyFixedUint8Array",
+ ("RO_SPACE", 0x04881): "EmptyFixedInt8Array",
+ ("RO_SPACE", 0x048a1): "EmptyFixedUint16Array",
+ ("RO_SPACE", 0x048c1): "EmptyFixedInt16Array",
+ ("RO_SPACE", 0x048e1): "EmptyFixedUint32Array",
+ ("RO_SPACE", 0x04901): "EmptyFixedInt32Array",
+ ("RO_SPACE", 0x04921): "EmptyFixedFloat32Array",
+ ("RO_SPACE", 0x04941): "EmptyFixedFloat64Array",
+ ("RO_SPACE", 0x04961): "EmptyFixedUint8ClampedArray",
+ ("RO_SPACE", 0x049c1): "EmptySloppyArgumentsElements",
+ ("RO_SPACE", 0x049e1): "EmptySlowElementDictionary",
+ ("RO_SPACE", 0x04a29): "EmptyOrderedHashMap",
+ ("RO_SPACE", 0x04a51): "EmptyOrderedHashSet",
+ ("RO_SPACE", 0x04a89): "EmptyPropertyCell",
+ ("RO_SPACE", 0x04b69): "InfinityValue",
+ ("RO_SPACE", 0x04b79): "MinusZeroValue",
+ ("RO_SPACE", 0x04b89): "MinusInfinityValue",
+ ("RO_SPACE", 0x04b99): "SelfReferenceMarker",
("OLD_SPACE", 0x02211): "EmptyScript",
("OLD_SPACE", 0x02291): "ManyClosuresCell",
("OLD_SPACE", 0x022b1): "NoElementsProtector",
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index e295060ab7..654e19f7e9 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -6,6 +6,5 @@ A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
-The autoroller bought a round of Himbeerbrause. Suddenly...
-The bartender starts to shake the bottles...............................
-.
+The autoroller bought a round of Himbeerbrause. Suddenly....
+The bartender starts to shake the bottles...